diff --git a/devcon-api/data/vectors/devcon-7.json b/devcon-api/data/vectors/devcon-7.json index a12cc5437..c90df3f83 100644 --- a/devcon-api/data/vectors/devcon-7.json +++ b/devcon-api/data/vectors/devcon-7.json @@ -37,7 +37,7 @@ "slot_end": 1731411000000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/12NbVEiYl32qL-QEUHOpKoymfYvpnypnNwC2bLEg0kzI", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1J0cKsHtFb1PEL9UqySbC9gQWhHnzkVjn/view", "speakers": [ "qi-su" ] @@ -791,9 +791,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 6, 6, @@ -1355,8 +1352,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -1403,7 +1398,7 @@ "slot_end": 1731582000000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/1Qb2TXCOf_qhcjYsh7BEEeZW5-zo9GB2HTcUih2-gmLw", - "resources_slides": null, + "resources_slides": "", "speakers": [ "vincent-weisser" ] @@ -2719,11 +2714,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -2773,8 +2763,8 @@ ], "duration": 1444, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "0019ee23c274c12938b4b57420084f9e98b99dd465a66b57890f423e9724efa7", + "sources_youtubeId": "lQzB6W6ZvEo", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6736fff51b0f83434d9a1780", @@ -2785,7 +2775,7 @@ "slot_end": 1731657600000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/15pGWoL5JVQjTyDhG1Gd2UgLELOwq6HZtyeZZ9rAGAZ8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1Ipu-THwhr0V5AjwUVlsq8-NaEl2zCyhD/view", "speakers": [ "barnabe-monnot" ] @@ -3544,9 +3534,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 6, 6, @@ -4103,8 +4090,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -4163,7 +4148,7 @@ "slot_end": 1731576600000, "slot_roomId": "classroom-b", "resources_presentation": "https://docs.google.com/presentation/d/1Jn6onllMeGwArE5qb8v5g4TAqMZfdWeU4wZW8fFGUhw", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1h5s7YD2c7wvIIYsl6IyuWbgxXSMD-sv7/view", "speakers": [ "ahmad", "andrew-twyman" @@ -4929,9 +4914,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 6, 6, @@ -5482,8 +5464,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -5532,8 +5512,8 @@ ], "duration": 1458, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "d5a8607677858333b0aae7830955e04f92c041b7b3b2107cb7392939b2eb0ae8", + "sources_youtubeId": "p7JPRTELnJc", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6736ce9e9dbb7a90e1f57ba7", @@ -5544,7 +5524,7 @@ "slot_end": 1731579000000, "slot_roomId": "stage-1", "resources_presentation": "https://docs.google.com/presentation/d/1eztv0xy8RI4T_eMei061J--yX-7gDRGN4ZnQYsasWbU", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1cez0UtMmY4mbonX-QvJz3ZahZVi_MhPE/view", "speakers": [ "luca-zanolini", "roberto-saltini" @@ -6304,9 +6284,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -6863,8 +6840,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -6929,7 +6904,7 @@ "slot_end": 1731469200000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1IlPTrpB8SK9bkKystbWnIBPtxOnZNnAAkKebg5A6o9U", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1DDyRwxk4wDscM9cBtOC9vnFsYW3UpaX5/view", "speakers": [ "hsyodyssey" ] @@ -7702,9 +7677,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 2, @@ -8247,8 +8219,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -8304,7 +8274,8 @@ "slot_start": 1731398400000, "slot_end": 1731400200000, "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1dJRUdeeSAm3IMKjIUvUMfYutPBeDhcB5ai34k_UGutY" + "resources_presentation": "https://docs.google.com/presentation/d/1dJRUdeeSAm3IMKjIUvUMfYutPBeDhcB5ai34k_UGutY", + "resources_slides": "https://drive.google.com/file/d/1S6u5VfaRmN5SbE5lfAYzEqUB_oWZPMFs/view" }, "vector": [ 0, @@ -9081,9 +9052,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 2, @@ -9621,8 +9589,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -9667,6 +9633,13 @@ "Security" ], "language": "en", + "sources_swarmHash": "82a6c0d37546dd4c2219242187c8d128050ef2534ab04d1ffb07bd5ecb7633aa", + "sources_youtubeId": "88hS4MsUwR4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "palina-tolmach" ], @@ -9674,7 +9647,8 @@ "slot_start": 1731407400000, "slot_end": 1731408000000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1w5ssCjwzeMzULOUFJzm_OVAD4K7Y8ivlYMu9NQds-Gw" + "resources_presentation": "https://docs.google.com/presentation/d/1w5ssCjwzeMzULOUFJzm_OVAD4K7Y8ivlYMu9NQds-Gw", + "resources_slides": "https://drive.google.com/file/d/1XJH4bqZ4aunIU75rBUJ95WoUXxH-0qtr/view" }, "vector": [ 6, @@ -10425,9 +10399,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -10991,8 +10962,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -11039,8 +11008,8 @@ ], "duration": 6725, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "647dac106bf5a731dbf576fdf876b6ca959feb907dddca93ad2c9b2489c5ea2f", + "sources_youtubeId": "N3xal0a1-Q8", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6736d0069dbb7a90e121ba96", @@ -11051,7 +11020,7 @@ "slot_end": 1731407400000, "slot_roomId": "classroom-e", "resources_presentation": "https://docs.google.com/presentation/d/1CNiy8pyXPgFrGk4YOJEIIwWPxSTEpivis_7GsXcdpdw", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1b-21Mv2xFk1qbcVoliQNps5i2zQDai45/view", "speakers": [ "clement-lesaege" ] @@ -11838,9 +11807,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 2, @@ -12371,8 +12337,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -12430,7 +12394,7 @@ "slot_end": 1731648600000, "slot_roomId": "classroom-a", "resources_presentation": "https://docs.google.com/presentation/d/10kKWaC4imyMLa4e4mR8BSiSrX6pmWjzQCOwVryP2ff8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1C_oze48ywGg4cDMuBMHzVdoWnBshnSBP/view", "speakers": [ "michael-moser" ] @@ -13222,9 +13186,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 2, @@ -13748,8 +13709,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -13797,8 +13756,8 @@ ], "duration": 1495, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "b63d6fa56a15994cca2cdb01389e1a10844f54ba6e58a9f5e6b70b907db057af", + "sources_youtubeId": "JFCfnhFL9Mc", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "673407ec3a168eb5356f00b8", @@ -13809,7 +13768,7 @@ "slot_end": 1731403800000, "slot_roomId": "stage-5", "resources_presentation": "https://docs.google.com/presentation/d/1RyOkM2nzZPnG9r_QZgUBb98ZHMt2VtBlxR0avT7eWHA", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1QCcwQBW0aLVnC2JVdxUdjx7Bpe55d-XC/view", "speakers": [ "conor-mcmenamin" ] @@ -14605,9 +14564,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 2, @@ -15129,8 +15085,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -15176,6 +15130,13 @@ "Tooling" ], "language": "en", + "sources_swarmHash": "28de20a4a6013f71cac622046aa24c6d4d7b3e298993fbdeddfac4fd84ca0ca7", + "sources_youtubeId": "utTJs7G11WY", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "yiannis-psaras", "dennis-trautwein" @@ -15184,7 +15145,8 @@ "slot_start": 1731477000000, "slot_end": 1731478800000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1l0VK-WM6V4AzPauAgjULwr3ct22celTYmuy97iN9sPw" + "resources_presentation": "https://docs.google.com/presentation/d/1l0VK-WM6V4AzPauAgjULwr3ct22celTYmuy97iN9sPw", + "resources_slides": "https://drive.google.com/file/d/1QgFNMwJs-VdRx5UQSg_8J9nkMdF7fsze/view" }, "vector": [ 0, @@ -15956,9 +15918,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -16502,8 +16461,6 @@ 0, 0, 0, - 0, - 0, 2, 2, 0, @@ -16565,11 +16522,11 @@ "slot_end": 1731567600000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1Ghf7GAlxhcuQaT9CB6h-vcw2b23gLxI2nnAAzr6YSHs", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1b4MZv_590A-J_Aq8bX_oVSzrqL5lD0-O/view", "speakers": [ "hira-siddiqui", - "ley", - "mujtaba-idrees" + "mujtaba-idrees", + "ley" ] }, "vector": [ @@ -17337,9 +17294,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -17887,8 +17841,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -17947,7 +17899,7 @@ "slot_end": 1731555600000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1doK-azcslBW5RIq_sNRQH7NJp6c4MBm2nfjksEAeGgw", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1a6bHriAXYwl5hU7BeJcSIx17Nu9W72sL/view", "speakers": [ "mislav-javor" ] @@ -18752,9 +18704,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -19265,8 +19214,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -19327,7 +19274,7 @@ "slot_end": 1731571200000, "slot_roomId": "stage-5", "resources_presentation": "https://docs.google.com/presentation/d/1a_SQ9DL-TDG33A44nSJ53matpwEz98lEvSg24o-XKA8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1CjYAVbxmdEI9QKF9HTFP-ml-NOT-SEiQ/view", "speakers": [ "max-segall" ] @@ -20132,9 +20079,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -20645,8 +20589,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -20713,8 +20655,8 @@ ], "duration": 360, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "d3aee961f55b3f8d283f2e8a6e9853fa958efba19de0e45aec10006d06eba55e", + "sources_youtubeId": "dvBaZBaMKVE", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6735d6519dbb7a90e19f7129", @@ -20725,7 +20667,7 @@ "slot_end": 1731580800000, "slot_roomId": "stage-5", "resources_presentation": "https://docs.google.com/presentation/d/1HPCOiNJTibtU-aBvYZEHb5wvCv6Fuf3ujBxdsuBTFYo", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/13sGxI5-BKIfcVIgylQSKqKUVvQ9wm7bI/view", "speakers": [ "joshua-cheong" ] @@ -21498,9 +21440,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -22043,8 +21982,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -22093,8 +22030,8 @@ ], "duration": 1378, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "85b56b290f00d5e5f5c5a70cd6ea9df49ab5ac76aea6dc76efd6adfb2bb59ec6", + "sources_youtubeId": "PCvewGIWtMA", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "673328393a168eb535690c58", @@ -22105,7 +22042,7 @@ "slot_end": 1731405600000, "slot_roomId": "stage-5", "resources_presentation": "https://docs.google.com/presentation/d/1C4LP01Njg8d8_7focQ3IHctmO58TbdilXcn-G6_m3sM", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1EwXB1d2zhOExBSKgASQE3XAKPIyYPH0r/view", "speakers": [ "ta-fakedev9999" ] @@ -22927,9 +22864,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 0, @@ -23423,8 +23357,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -23467,7 +23399,8 @@ ], "keywords": [ "Execution Tickets", - "Economic Simulation" + "Economic Simulation", + "" ], "duration": 4895, "language": "en", @@ -23483,7 +23416,7 @@ "slot_end": 1731661200000, "slot_roomId": "classroom-b", "resources_presentation": "https://docs.google.com/presentation/d/1oRvr-urjsOGeAOi88kdT1i0gPr31_3nITefPLKWTynU", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1vXvYPDArAbSzobR7_XyHAyBPC9xwSNsQ/view", "speakers": [ "pascal-stichler" ] @@ -24272,9 +24205,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -24801,8 +24731,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -24857,7 +24785,7 @@ "slot_end": 1731563400000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/1m1KqfSb19Pc7LjXcjrEe4jJpS6Jxx1LleB07azv4ilg", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1y4zGXssVsHqWwaNPbqiWzUhNAzP7rhc3/view", "speakers": [ "david" ] @@ -25618,9 +25546,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -26175,8 +26100,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -26231,7 +26154,7 @@ "slot_end": 1731566400000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1tIcPsq1d4NJY-OmswRcgBrKRXN68UP-xW0lJCxBh2e0", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1FFxauUZVpwTbZ7mr7m-Y4pIbEL-f6uTg/view", "speakers": [ "johannes-pfeffer" ] @@ -27065,9 +26988,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 0, @@ -27551,8 +27471,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -27591,7 +27509,8 @@ "slot_start": 1731497400000, "slot_end": 1731501000000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1mO_TORHBpzzpI0gU0stdNMv8Ew2DbX2vskYfKX4MClo" + "resources_presentation": "https://docs.google.com/presentation/d/1mO_TORHBpzzpI0gU0stdNMv8Ew2DbX2vskYfKX4MClo", + "resources_slides": "" }, "vector": [ 0, @@ -28904,11 +28823,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -28948,7 +28862,8 @@ "slot_start": 1731481200000, "slot_end": 1731484800000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/19hVh-i336QQTm56Yaa0OcRApQQX8RzhtksYA5s12z9M" + "resources_presentation": "https://docs.google.com/presentation/d/19hVh-i336QQTm56Yaa0OcRApQQX8RzhtksYA5s12z9M", + "resources_slides": "" }, "vector": [ 0, @@ -30261,11 +30176,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -30312,7 +30222,7 @@ "duration": 608, "language": "en", "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_youtubeId": "7H2CPeN7VcI", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "673470e09dbb7a90e101bd24", @@ -30323,7 +30233,7 @@ "slot_end": 1731488400000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1I12hDyQcy5XvNa2BYACXq82Cc7f1_Kw_-YiA4Yo9Lkw", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1XWhkj_T3ODhFNvr53VnEEVkAp2w0sHUf/view", "speakers": [ "theo-diamandis" ] @@ -31159,9 +31069,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 2, @@ -31644,8 +31551,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -31690,6 +31595,13 @@ "User Research" ], "language": "en", + "sources_swarmHash": "3a67a3a0254db2b3ae64b53cf0130c3710e040e2e7356dc3f2463e4511a89c12", + "sources_youtubeId": "HIg1bscGqOE", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "migalabs" ], @@ -31697,7 +31609,8 @@ "slot_start": 1731570000000, "slot_end": 1731570600000, "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1DzfSNuVPHDjyHuicrkDNZhpK9g2Wz2R5-L4UIPs00E0" + "resources_presentation": "https://docs.google.com/presentation/d/1DzfSNuVPHDjyHuicrkDNZhpK9g2Wz2R5-L4UIPs00E0", + "resources_slides": "https://drive.google.com/file/d/13AJ_15urkVxkhYDdhhRsWlBbz12CTIzD/view" }, "vector": [ 0, @@ -32512,9 +32425,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -33014,8 +32924,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -33072,7 +32980,7 @@ "slot_end": 1731405600000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1IY1pWsDydf5hoQbu9K7EcpHpB9Y8bnVDSIAu6RSXu0A", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/12eI28_lYc9ZglgMbd104kQwafq_Myyz7/view", "speakers": [ "atheartengineer", "ying-tong" @@ -33839,9 +33747,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 6, 0, @@ -34393,8 +34298,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -34449,7 +34352,7 @@ "slot_end": 1731394200000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1BcDHTCrVf5XicbPd1sVJ8RzyhsCOuPE4DuSSGBO6AA8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1osBmPqHTWApFGBOGclj60vOpO_9emG7k/view", "speakers": [ "pierre-daix-moreux" ] @@ -35216,9 +35119,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -35769,8 +35669,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -35819,7 +35717,7 @@ "slot_end": 1731405600000, "slot_roomId": "stage-6", "resources_presentation": "https://docs.google.com/presentation/d/1nL-c7JYqWnQddW0BtHR56cUv9hNYnZFOgm3Ce4DvdEQ", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1KoAHQ0pS1aOhjoMNaeAvdlG_YMY1zxxB/view", "speakers": [ "patricio-worthalter" ] @@ -36668,9 +36566,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -37139,8 +37034,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -37179,6 +37072,13 @@ "Security" ], "language": "en", + "sources_swarmHash": "6e8d1a2743bb7d146ec9094800a9b3229754b2c4bf1c69cc9ba89c4a5bd5d783", + "sources_youtubeId": "jN5sWIv4lX0", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "nixo" ], @@ -37186,7 +37086,8 @@ "slot_start": 1731655800000, "slot_end": 1731656400000, "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1xIUsNzb7q1OKJeUjQFtXCXAFrA6wTAlv2EY1fXN7D_s" + "resources_presentation": "https://docs.google.com/presentation/d/1xIUsNzb7q1OKJeUjQFtXCXAFrA6wTAlv2EY1fXN7D_s", + "resources_slides": "https://drive.google.com/file/d/1AtJnIt0oq9gsbiA7KB7IMwKgllXAE-Xx/view" }, "vector": [ 0, @@ -37937,9 +37838,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -38503,8 +38401,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -38556,7 +38452,7 @@ "slot_end": 1731493800000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1wCMf3indiDC3mcME4zorE86wItPgSN0glxPKvU3HC_U", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1OT6HV0X4le8Xjral_uN644NslSIIgGAH/view", "speakers": [ "kaan-uzdogan", "zoe-p", @@ -39408,9 +39304,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 2, @@ -39878,8 +39771,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -39938,7 +39829,7 @@ "slot_end": 1731661200000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1S4syGheCfolTfnj5zU6ru1154BHASk0dMcmtV1Y1Gmk", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1AWN98QhBDsr3w3fvs0zWrDXKK0d76mVF/view", "speakers": [ "hridam-basu" ] @@ -40718,9 +40609,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -41256,8 +41144,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -41313,7 +41199,7 @@ "slot_end": 1731639300000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1Qq5x2EWSZ2rS2muLZB5exp9AEesPxIA-JBqnKej4-LQ", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1t0Gi9kOdr46OiOugY7KzRTISx4GeC-LI/view", "speakers": [ "toni-wahrstatter" ] @@ -42102,9 +41988,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -42633,8 +42516,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -42679,8 +42560,8 @@ ], "duration": 1627, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "7c8f90a74abeecd314b6470f83bb4b75105cd78c5b2749fe25b1e20cc0f63c88", + "sources_youtubeId": "DNHso6qvJ3M", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "67346f9d9dbb7a90e1e914b8", @@ -42691,7 +42572,7 @@ "slot_end": 1731488400000, "slot_roomId": "stage-6", "resources_presentation": "https://docs.google.com/presentation/d/1feUrTp0VPOlKMoCAdlFDOUvfhWnTwVbESRqA1FvQsWQ", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1QrWYlNlliTMEwPswI6FqVLjyR0xk1N52/view", "speakers": [ "patrick-mccorry" ] @@ -43541,9 +43422,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -44011,8 +43889,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -44042,7 +43918,7 @@ "expertise": "Intermediate", "audience": "Research", "featured": true, - "doNotRecord": true, + "doNotRecord": false, "tags": [ "Data Availability", "Transaction fees mechanisms" @@ -44063,7 +43939,7 @@ "slot_end": 1731488400000, "slot_roomId": "main-stage", "resources_presentation": "https://docs.google.com/presentation/d/1SAdKiPawHNzsIk6mSX5zU6XFK1nPnDm393UMkF2hq-w", - "resources_slides": null, + "resources_slides": "", "speakers": [ "ansgar-dietrichs", "anthony-sassano", @@ -44860,9 +44736,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -45384,8 +45257,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -45432,8 +45303,8 @@ ], "duration": 1363, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "b5b977302068601225e40569ffc8d69c685132a70133de755774793111af393f", + "sources_youtubeId": "TP6roml9JTA", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6733fcc13a168eb53547f781", @@ -45444,7 +45315,7 @@ "slot_end": 1731409200000, "slot_roomId": "stage-5", "resources_presentation": "https://docs.google.com/presentation/d/1vwJxkmHD52YwdthsPLe4BX6pJ2iLQrw9Qqa8nA5c4cI", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1UoPixl8ypsnAk4CFZFYyPkrX-sKf1YY3/view", "speakers": [ "ren-crypto-fish" ] @@ -46238,9 +46109,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -46762,8 +46630,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -46819,7 +46685,7 @@ "slot_end": 1731559200000, "slot_roomId": "stage-1", "resources_presentation": "https://docs.google.com/presentation/d/1ZI-uvSDiXNOMq2oSg_Q_XXuhL57THew5PgeViZ-WwC8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/17dVyvq0xPWrhK3JO0dj8f8VLIkfsa2wY/view", "speakers": [ "maria-milagros-santamaria" ] @@ -47608,9 +47474,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -48137,8 +48000,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -48186,7 +48047,8 @@ "slot_start": 1731402000000, "slot_end": 1731416400000, "slot_roomId": "artists-cohort-pyramid", - "resources_presentation": "https://docs.google.com/presentation/d/1xaK3SW7SLn-t1fQVQV2EcoNXQjJ5XG6AO0udCWpcCng" + "resources_presentation": "https://docs.google.com/presentation/d/1xaK3SW7SLn-t1fQVQV2EcoNXQjJ5XG6AO0udCWpcCng", + "resources_slides": "" }, "vector": [ 0, @@ -49045,9 +48907,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -49503,8 +49362,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -49556,7 +49413,7 @@ "slot_end": 1731405600000, "slot_roomId": "classroom-b", "resources_presentation": "https://docs.google.com/presentation/d/1obbj2bv-axqq0YxZpjvXmMW2LkNzMztiVyIBfdu3Z6Q", - "resources_slides": null, + "resources_slides": "", "speakers": [ "austin-griffith" ] @@ -50331,9 +50188,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -50876,8 +50730,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -50917,6 +50769,13 @@ "Tooling" ], "language": "en", + "sources_swarmHash": "1cbd645a323feea8c19756e32111149addcdcc452b7be4f218b3a6273bb1f0c0", + "sources_youtubeId": "27XdaX2PiHU", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "briefkandle" ], @@ -50924,7 +50783,8 @@ "slot_start": 1731554400000, "slot_end": 1731554700000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1EKpvJeOeAM34stz5C81nwEHi_an7BtgW7b7GpbmcS4w" + "resources_presentation": "https://docs.google.com/presentation/d/1EKpvJeOeAM34stz5C81nwEHi_an7BtgW7b7GpbmcS4w", + "resources_slides": "https://drive.google.com/file/d/1YuyQJ18PX4fBDVf2Uzu4trmnNDoBki2X/view" }, "vector": [ 0, @@ -51696,9 +51556,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -52241,8 +52098,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -52303,7 +52158,7 @@ "slot_end": 1731646800000, "slot_roomId": "classroom-a", "resources_presentation": "https://docs.google.com/presentation/d/1KG1sgWfc3v4CHrdbeyiwz6GNGMfkJySEhfCERsBjwxA", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1Ec3xTKgVAOjj9pL5WIV6II0OSwDm7ucS/view", "speakers": [ "lin-oshitani" ] @@ -53064,9 +52919,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -53621,8 +53473,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -53672,8 +53522,8 @@ ], "duration": 1501, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "06fa912b31c5526d10f554221175ea2db40141a304aa57c4763c1d342f82c693", + "sources_youtubeId": "7PPAIFVVgRU", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "673583b29dbb7a90e1c8d87a", @@ -53684,7 +53534,7 @@ "slot_end": 1731560400000, "slot_roomId": "stage-6", "resources_presentation": "https://docs.google.com/presentation/d/1w_5QruyCi8qSHhXVnY97nh_BrNGlcqN8q30ZIT81j4g", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1t3zO4l83pBb8McR6Jzt-i4WsI8g-fTiT/view", "speakers": [ "richard-liu" ] @@ -54461,9 +54311,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -55004,8 +54851,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -55064,10 +54909,10 @@ "slot_end": 1731557700000, "slot_roomId": "classroom-c", "resources_presentation": "https://docs.google.com/presentation/d/1yyp2Kb948zJmCqxO8kfDdZZrs259maV0t-DrGiabFXw", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1RfH9E6WKIZDKBZcOA1godhob3qqo29Rv/view", "speakers": [ - "antonio-seveso", - "moritz-boullenger" + "moritz-boullenger", + "antonio-seveso" ] }, "vector": [ @@ -55835,9 +55680,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -56383,8 +56225,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -56445,7 +56285,7 @@ "slot_end": 1731411000000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1xTWn8OHn3Uo4DFB83e-yHN6sTgrIz_lfqTY2XGPrx98", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1BD6GcDhQxrmMGz4FbfAtS5ZoJf91IqUK/view", "speakers": [ "azam-soleimanian", "bogdan-ursu" @@ -57200,9 +57040,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -57764,8 +57601,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -57828,7 +57663,7 @@ "slot_end": 1731659400000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1bHlFD0PHf2BChoOQsCRxqawLU6GzB4gl5H7Tgnia2ag", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/18zVGTPMhjS3R6Ci-w7uLJ2MCZbRLwkOD/view", "speakers": [ "maryam-bahrani" ] @@ -58589,9 +58424,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -59149,8 +58981,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -59203,7 +59033,7 @@ "slot_end": 1731646200000, "slot_roomId": "classroom-a", "resources_presentation": "https://docs.google.com/presentation/d/1iFe5uo4vVJtKR4EYxLvteyWIaeQLzKWuZBOgGlIlGLk", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1A7SwgF9gYFDS241aTDN29m29fvkvLXBl/view", "speakers": [ "gideon-kaempfer" ] @@ -60026,9 +59856,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -60521,8 +60348,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -60572,8 +60397,8 @@ ], "duration": 496, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "e19f5fc32d1a5089d8018f712e7d0b670d892704a4a0f3fa9018d3fc5a719c62", + "sources_youtubeId": "Wan90qeRCxA", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "67349ada9dbb7a90e11d7cab", @@ -60584,7 +60409,7 @@ "slot_end": 1731493800000, "slot_roomId": "stage-6", "resources_presentation": "https://docs.google.com/presentation/d/1_ySmTRldIj5s-VJNLFK0CcREWHnRA9KeGMc5zS6taAk", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1Hs8eFPigMsB051sylBZWQk4x4a5HPC40/view", "speakers": [ "sasha-shilina" ] @@ -61462,9 +61287,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 2, @@ -61904,8 +61726,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -61962,7 +61782,7 @@ "slot_end": 1731481200000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1YCHEohvVe_Js1rQhdgpyK_NLyxE88glwaYOMJmSs5aQ", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1Dq1wJ6i383jlHxnKH7CRIecj68kKlW7P/view", "speakers": [ "daniel-lehrner" ] @@ -62735,9 +62555,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -63282,8 +63099,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -63335,7 +63150,7 @@ "slot_end": 1731555300000, "slot_roomId": "classroom-a", "resources_presentation": "https://docs.google.com/presentation/d/1DMxoqy08a2IwA26zMJNDXvac90qjFcdm4KPkNLwWrl0", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1N6IpyuXCx9Niw7GYss4y2VXs1126PLuE/view", "speakers": [ "dhrumil-shah", "dhvani-patel" @@ -64200,9 +64015,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 0, @@ -64656,8 +64468,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -64691,6 +64501,13 @@ "keywords": [], "tags": [], "language": "en", + "sources_swarmHash": "db9c3bf3ac88fc0e1b02fbe543835a1658ae222287be34e7cb273c75741990e2", + "sources_youtubeId": "q3rpu8aDRA8", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "remco-bloemen" ], @@ -64698,7 +64515,8 @@ "slot_start": 1731558000000, "slot_end": 1731558480000, "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/10po4XtVzHK_QqfTE4tTOIt7GHM_mPiDKzpxYrl2-TFc" + "resources_presentation": "https://docs.google.com/presentation/d/10po4XtVzHK_QqfTE4tTOIt7GHM_mPiDKzpxYrl2-TFc", + "resources_slides": "https://drive.google.com/file/d/12YLD9pTQpr5tYwySzhEJ3buGlWUM9Xh9/view" }, "vector": [ 0, @@ -66011,11 +65829,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -66061,6 +65874,13 @@ "Use Cases" ], "language": "en", + "sources_swarmHash": "b6c47db46047deeab07b894d50997db7f6ccbcd1dfd8e4b67dff71704cb29e09", + "sources_youtubeId": "AZ7ctYLxstQ", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "guo-liu" ], @@ -66068,7 +65888,8 @@ "slot_start": 1731571800000, "slot_end": 1731572400000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1OD0-nwsjhb5qQwxWCHD0G1UeVtcNpvNFHNOz8Qb2e8U" + "resources_presentation": "https://docs.google.com/presentation/d/1OD0-nwsjhb5qQwxWCHD0G1UeVtcNpvNFHNOz8Qb2e8U", + "resources_slides": "https://drive.google.com/file/d/126QEmd6RTO8l43d1tr5b4IvA7ib-xjEM/view" }, "vector": [ 0, @@ -66826,9 +66647,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -67385,8 +67203,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -67449,10 +67265,10 @@ "slot_end": 1731660600000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/15oeEm8e0fRuUC_J5uLjNq4nII_1D6WDqnxNLOBX4rRs", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1-CZlKFrRA-Z3KHlENCJ8PQC3xZ0hKSmB/view", "speakers": [ - "arun-maharajan", - "rumee-singh" + "rumee-singh", + "arun-maharajan" ] }, "vector": [ @@ -68283,9 +68099,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -68770,8 +68583,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -68831,7 +68642,7 @@ "slot_end": 1731573600000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1BEfulRwWXyv3ETY4qD5zkkb47m78LVdcVcRHGFpP07o", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1IzPViXIZte38sGYx-IN9vum-BGVO4k0h/view", "speakers": [ "david-casey" ] @@ -69664,9 +69475,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -70151,8 +69959,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -70216,8 +70022,8 @@ ], "duration": 1305, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "697f0bbc602c4f9bf696747238823569347bbb6d18cb835a5826d7e7cb07a1ac", + "sources_youtubeId": "QKpxqrga8hk", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6735e3989dbb7a90e163e84c", @@ -70228,7 +70034,7 @@ "slot_end": 1731573000000, "slot_roomId": "stage-5", "resources_presentation": "https://docs.google.com/presentation/d/1Vl4odQ2HutojKK7BY-U7D84ku-UD2fPpjY9T487-56w", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1Oj8xcrNyYsJtC284p7t9_C1-Fqqy7ias/view", "speakers": [ "rebecca-kacherginsky" ] @@ -70988,9 +70794,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -71548,8 +71351,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -71593,8 +71394,8 @@ ], "duration": 1374, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "23b4fb2bc9e233d47041e22e5af1b2131bdb3812a6278ffd4484a29aa6bb24bf", + "sources_youtubeId": "GDUrhkD-6-s", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "67358ab59dbb7a90e189ee35", @@ -71605,7 +71406,7 @@ "slot_end": 1731562200000, "slot_roomId": "stage-5", "resources_presentation": "https://docs.google.com/presentation/d/1pJOc-CV91BIPcP9d3jUosxSzjtzYhRP3NAd6Cw9UsK0", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/19EuDoZ4nntYpKx_p-9sJ1CI57f9XASM4/view", "speakers": [ "sean-anderson" ] @@ -72361,9 +72162,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -72923,8 +72721,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -72987,7 +72783,7 @@ "slot_end": 1731639900000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1tXKl-KfPkdejsNKMAx-FYPo2FsJi7vaUxz5WyIszR84", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1Zdi5EvYO78kR-P5seRx9f_x6Kfh9RK6a/view", "speakers": [ "max-resnick" ] @@ -73746,9 +73542,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -74305,8 +74098,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -74367,10 +74158,10 @@ "slot_end": 1731409200000, "slot_roomId": "classroom-d", "resources_presentation": "https://docs.google.com/presentation/d/1pzlL4H4Mj25_flDl4GMvEE-meKuh9eh2PLHklBBwEdw", - "resources_slides": null, + "resources_slides": "", "speakers": [ - "florian-glatz", - "marina-markezic" + "marina-markezic", + "florian-glatz" ] }, "vector": [ @@ -75141,9 +74932,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -75689,8 +75477,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -75743,21 +75529,21 @@ "slot_end": 1731398400000, "slot_roomId": "classroom-d", "resources_presentation": "https://docs.google.com/presentation/d/1L-zUhR7NnvpMbCgqVyQBGzA1iXGeJqSDxOxg2F323yI", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1BAjQDnKcTgLH8M8xmRRCCLJxmJvSxfKt/view", "speakers": [ - "abhinav-goel", - "bruno-batavia", - "bruno-moniz", - "camila-rioja", - "daniel-marquez", + "suzana-maranhao-moreno", "hart-montgomery", "italo-borssatto", "luca-cosivi", + "bruno-batavia", + "abhinav-goel", "qin-en", - "shukyee-ma", - "sophia-lopez", - "suzana-maranhao-moreno", + "daniel-marquez", "thiago-rudiger", + "camila-rioja", + "bruno-moniz", + "sophia-lopez", + "shukyee-ma", "weekee-toh" ] }, @@ -76619,9 +76405,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -77076,8 +76859,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -77134,10 +76915,10 @@ "slot_end": 1731582000000, "slot_roomId": "classroom-b", "resources_presentation": "https://docs.google.com/presentation/d/1u3q_4IFfuXzZDfanHnjTsWcClgoN_wAS6_wEi2qSxiU", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1oMumy6nRT27rkf_iIUqr5tDr2wvdMDqt/view", "speakers": [ - "evan-griffiths", "gabriel-fior", + "evan-griffiths", "peter-jung" ] }, @@ -77970,9 +77751,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -78454,8 +78232,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -78514,7 +78290,7 @@ "slot_end": 1731393000000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1y9pSagBtZCT-PX46JT90ABDD2x8ACdZNVrHy2b9NaOQ", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1QZBdeNgRzJqteWLxvECgZQLAJ8Q57tM1/view", "speakers": [ "ray-jacobson" ] @@ -79284,9 +79060,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -79832,8 +79605,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -79892,7 +79663,7 @@ "slot_end": 1731393600000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1zCoYHO3MyfIPj-uA_nWYvHwd9lM6-v7ZDScGGdme-Fg", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/15SiSxz7ZQa7RIaT4DeokgTwZ39QsWu6Q/view", "speakers": [ "julien-niset" ] @@ -80662,9 +80433,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -81210,8 +80978,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -81270,7 +81036,7 @@ "slot_end": 1731493200000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/15T911YKp9NooTa41RChSG4jfO2xC3VEveRPwe9SbKcc", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1yjjc__aTeG8PJhisw5J2XfyrdF0tu2OI/view", "speakers": [ "franck-royer" ] @@ -82030,9 +81796,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -82588,8 +82351,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -82636,6 +82397,13 @@ "zk-identity" ], "language": "en", + "sources_swarmHash": "87b7ddcd406040c80df04bc900bb9f3e9d17b5070167f53211df9daff0d65b9d", + "sources_youtubeId": "X3fJw5yM4PE", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "yanis" ], @@ -82643,7 +82411,8 @@ "slot_start": 1731571200000, "slot_end": 1731571800000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1UfdavDRYTsOiq_2t15-4vh9KLrcltw6zpHsdrX2d8DA" + "resources_presentation": "https://docs.google.com/presentation/d/1UfdavDRYTsOiq_2t15-4vh9KLrcltw6zpHsdrX2d8DA", + "resources_slides": "https://drive.google.com/file/d/1NGWtmBmBwrhuVNSzHA2x-9yyImPZxJz_/view" }, "vector": [ 0, @@ -83420,9 +83189,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -83958,8 +83724,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -84008,8 +83772,8 @@ ], "duration": 4074, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "6c7a78a70d240767eb04e0e57d5e8c3f7bac563d03d06bef103a2698e8d2d1f8", + "sources_youtubeId": "aKbsZkxeJNs", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "673481849dbb7a90e1d0191d", @@ -84020,7 +83784,7 @@ "slot_end": 1731488400000, "slot_roomId": "classroom-d", "resources_presentation": "https://docs.google.com/presentation/d/13A9C80P0zlw8I1eLl0T6fZOyBk0YNwjGY4iHKV2trZs", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1qgQun5VXrikpxicltaIEq4RBCuqWZ2lt/view", "speakers": [ "joao-ferreira" ] @@ -84790,9 +84554,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -85338,8 +85099,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -85400,7 +85159,7 @@ "slot_end": 1731652200000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1gzLhNSH6_1pl5WMDq6uGEDZ0bXnRl6twolEKZ0Cy3Xk", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/10uVHg7R_NlsJY3NivBBbUd4ApBQbTucc/view", "speakers": [ "ernesto-garcia" ] @@ -86167,9 +85926,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -86721,8 +86477,6 @@ 0, 0, 0, - 0, - 0, 2, 2, 0, @@ -86761,7 +86515,7 @@ "duration": 4305, "language": "en", "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_youtubeId": "hSQm26fSz68", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "673456529dbb7a90e105884c", @@ -86772,7 +86526,7 @@ "slot_end": 1731483000000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/1Y7SoCe3qErMg50qHQ7I69ZuGqYs3OYa620D1qj0qI8A", - "resources_slides": null, + "resources_slides": "", "speakers": [ "austin-griffith" ] @@ -87624,9 +87378,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -88092,8 +87843,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -88135,6 +87884,13 @@ "Public good" ], "language": "en", + "sources_swarmHash": "3f98b4b5a33951c995d1bd8ab5ebe10dec689e24b1a89d174020876ced3d25e3", + "sources_youtubeId": "hHVGYhRsEO0", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "carlos", "austin-griffith" @@ -88143,7 +87899,8 @@ "slot_start": 1731483000000, "slot_end": 1731493800000, "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1xr4Ix-QggZF86XTqtv9VesZjbkocLKoff6BZ8MaCOVM" + "resources_presentation": "https://docs.google.com/presentation/d/1xr4Ix-QggZF86XTqtv9VesZjbkocLKoff6BZ8MaCOVM", + "resources_slides": "" }, "vector": [ 0, @@ -88992,9 +88749,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -89458,8 +89212,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -89501,7 +89253,7 @@ "duration": 5272, "language": "en", "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_youtubeId": "lNLskT2LV6Y", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "67342bba9dbb7a90e1c0df38", @@ -89512,7 +89264,7 @@ "slot_end": 1731473100000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/1KDpey24PdDXO0vBbx6eyVUR3g478eggW52p-Z2GsI8E", - "resources_slides": null, + "resources_slides": "", "speakers": [ "austin-griffith" ] @@ -90364,9 +90116,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -90832,8 +90581,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -90875,6 +90622,13 @@ "Gaming" ], "language": "en", + "sources_swarmHash": "81a7ae3b112b8f93564896d24819fcb5b67fc17cbf97d567c4025c5b742db75f", + "sources_youtubeId": "qsj79c-D_CQ", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "frolic" ], @@ -90882,7 +90636,8 @@ "slot_start": 1731565500000, "slot_end": 1731567000000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1XNooFuPjBwvsST2bNcBCNK1qdcTRfZstabTCqwGflzw" + "resources_presentation": "https://docs.google.com/presentation/d/1XNooFuPjBwvsST2bNcBCNK1qdcTRfZstabTCqwGflzw", + "resources_slides": "https://drive.google.com/file/d/11rIJxpgyrQrI2xrv8Cd_zAmdFqaYSWJM/view" }, "vector": [ 0, @@ -91738,9 +91493,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -92197,8 +91949,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -92257,7 +92007,7 @@ "slot_end": 1731394800000, "slot_roomId": "classroom-b", "resources_presentation": "https://docs.google.com/presentation/d/1918G58t4sIlc_rPC4YAPTZbQ0BQzE8SIevAi5yPsMaA", - "resources_slides": null, + "resources_slides": "", "speakers": [ "austin-griffith" ] @@ -93032,9 +92782,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -93575,8 +93322,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -93633,7 +93378,7 @@ "slot_end": 1731494400000, "slot_roomId": "classroom-a", "resources_presentation": "https://docs.google.com/presentation/d/1qMDjbZEumhcxlbrMh8-E5iJpvSVc-VXecukqL_lGC4I", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1o_LnE8NiMCuiDYzLw5aDNkoXd09a8DOn/view", "speakers": [ "alvarius" ] @@ -94492,9 +94237,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -94953,8 +94695,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -95018,7 +94758,7 @@ "slot_end": 1731571200000, "slot_roomId": "classroom-e", "resources_presentation": "https://docs.google.com/presentation/d/11rThgXehZjvKPRgDBNd_WxtwkApTlRaFelE2r0W73fM", - "resources_slides": null, + "resources_slides": "", "speakers": [ "aayush-gupta" ] @@ -95798,9 +95538,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -96338,8 +96075,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -96396,7 +96131,7 @@ "slot_end": 1731559500000, "slot_roomId": "classroom-b", "resources_presentation": "https://docs.google.com/presentation/d/1V3HDkZ9JwUiU5yIZOaYoLXG69XdykTD5DfEAoqdLSV0", - "resources_slides": null, + "resources_slides": "", "speakers": [ "kevin-jones" ] @@ -97171,9 +96906,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -97716,8 +97448,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -97772,7 +97502,8 @@ "slot_start": 1731643200000, "slot_end": 1731646800000, "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1MVujY9JzCpdqRtOCYp2LMS83iZxI8agQ_40Y6msZg4I" + "resources_presentation": "https://docs.google.com/presentation/d/1MVujY9JzCpdqRtOCYp2LMS83iZxI8agQ_40Y6msZg4I", + "resources_slides": "https://drive.google.com/file/d/1hLKk8KJE7-KWeU2KIbeEpvYFD39Ktbd9/view" }, "vector": [ 0, @@ -98529,9 +98260,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -99089,8 +98817,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -99148,7 +98874,7 @@ "slot_end": 1731655200000, "slot_roomId": "classroom-a", "resources_presentation": "https://docs.google.com/presentation/d/19UJc62RDhdFgd9QX-vci6ymxrQZ-z4fPkWTZ_-T_1JA", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1OoEOWgKSj65usUeuZdPOajOr4KxnS0w_/view", "speakers": [ "shao" ] @@ -99957,9 +99683,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -100468,8 +100191,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -100526,7 +100247,7 @@ "slot_end": 1731401400000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1D7C6aTr2ZEkqegToHCcpPBpSCVDYOFShDiu3eia6D9U", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1HCWFa9gJcLD1Fv5nJ0yX9rYiijt8ie9p/view", "speakers": [ "bianca-buzea" ] @@ -101310,9 +101031,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 0, @@ -101844,8 +101562,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -101905,7 +101621,7 @@ "slot_end": 1731479400000, "slot_roomId": "main-stage", "resources_presentation": "https://docs.google.com/presentation/d/14QG3jNVI1Dkw_-jw6BLW28LVEfOTx51EPqm3Dradf3o", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1Bahj1d2OatLylbD4s0WeQ1xrBmlTSuQe/view", "speakers": [ "oren-katz" ] @@ -102725,9 +102441,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -103223,8 +102936,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -103273,8 +102984,8 @@ ], "duration": 5276, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "c8e7cf0e66e2b6ebf630142f0086fc2793594a167b7034952d9b193355db6732", + "sources_youtubeId": "IQ8J0wTHk98", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6734821d9dbb7a90e1d7f8b2", @@ -103285,7 +102996,7 @@ "slot_end": 1731495600000, "slot_roomId": "classroom-d", "resources_presentation": "https://docs.google.com/presentation/d/1ED-jzkjpVBh8C5Ixuhx0_o1QSI84E5mcPX2UeHxX4D8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1EEwPJeQFOU8QEkIkC5yA3XA1_mTxSX2M/view", "speakers": [ "farhad-asgarov" ] @@ -104039,9 +103750,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -104603,8 +104311,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -104660,7 +104366,7 @@ "slot_end": 1731495600000, "slot_roomId": "classroom-c", "resources_presentation": "https://docs.google.com/presentation/d/1IMXFflR1DsQZPhVlnc9Ss-Xp6JJcahFgzp1FXWS8ldw", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1fLFFH5GzzYRIMrTxirIFVdltv2GIXTXA/view", "speakers": [ "lukas-rosario", "conner-swenberg" @@ -105466,9 +105172,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -105979,8 +105682,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -106036,15 +105737,15 @@ "slot_end": 1731660300000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/1ALuLCS19rAT1pKeXeFHZHyfPrK4qrNB6_tMJOzCUe-s", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1h2kOEqUgMO5HapaDbt0MlhDo7qxufD-S/view", "speakers": [ + "richard-liu", "austin-griffith", - "florian-dreschner", + "rob-knight", + "veronica-zheng", "forest-fang", "miriam-neubauer", - "richard-liu", - "rob-knight", - "veronica-zheng" + "florian-dreschner" ] }, "vector": [ @@ -106809,9 +106510,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -107360,8 +107058,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -107408,8 +107104,8 @@ ], "duration": 3361, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "6c57769773ac56d933eb3c893cc4df6bd2f5bccb37c41cf2c9ab834e1c790639", + "sources_youtubeId": "UPXYzWS7ZJ4", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6735be379dbb7a90e1bce08f", @@ -107420,7 +107116,7 @@ "slot_end": 1731574800000, "slot_roomId": "stage-6", "resources_presentation": "https://docs.google.com/presentation/d/1cnZdODskc3YfoVa93c9fvIVSBrE-RXobQHmu9RtOa-s", - "resources_slides": null, + "resources_slides": "", "speakers": [ "aayush-gupta", "aisling-connolly", @@ -108191,9 +107887,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -108744,8 +108437,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -108800,7 +108491,7 @@ "slot_end": 1731400800000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1jkv8pKTuI6m3JNnCg4Deeyb6gn7FhIcRIC_N_qPtTsY", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1d1m2uDgsdfnZCd9GCNBFmp2eqngHNs5p/view", "speakers": [ "gnana-lakshmi" ] @@ -109585,9 +109276,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -110120,8 +109808,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -110180,7 +109866,7 @@ "slot_end": 1731571200000, "slot_roomId": "classroom-c", "resources_presentation": "https://docs.google.com/presentation/d/1FX62edXoNMzGl2PtKs3vXQ9ynqkr1cEZhkDrsjPT3_s", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1vO3TL6M9bfrJYIcap1-vhoOZyje9N1um/view", "speakers": [ "chuxin-huang", "michael-silberling" @@ -110993,9 +110679,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -111501,8 +111184,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -111545,7 +111226,7 @@ "duration": 527, "language": "en", "sources_swarmHash": "", - "sources_youtubeId": "3g60qYUZoa8", + "sources_youtubeId": "92NTfwByGVQ", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": null, @@ -111554,7 +111235,7 @@ "slot_end": 1731403200000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1V_J6YbuL2ThEQuDEoLf9zCN3vAnn9d3pOrEfEmHUw0E", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1szo5giWjIOl5NmKdsMwCIbWY2lOxPR5y/view", "speakers": [ "yan-ho" ] @@ -112409,9 +112090,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -112874,8 +112552,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -112933,7 +112609,7 @@ "slot_end": 1731478800000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1IXXA9ruxzg_Nt5FLQ1mscpwiYUMY8eIgfJ5XX04lyi8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1xtNVNWsvQ0sU5LPA3skEVMI9ta-bp0Ui/view", "speakers": [ "georgia-rakusen" ] @@ -113703,9 +113379,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -114253,8 +113926,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -114306,7 +113977,7 @@ "slot_end": 1731657600000, "slot_roomId": "classroom-c", "resources_presentation": "https://docs.google.com/presentation/d/1QTyHl9pwbLCVXdU_szuD2iq8BWfd8ruX_kXFRDtOq_E", - "resources_slides": null, + "resources_slides": "", "speakers": [ "eda-akturk" ] @@ -115090,9 +114761,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -115626,8 +115294,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -115669,6 +115335,13 @@ "Tooling" ], "language": "en", + "sources_swarmHash": "d6df65d55acff6d99267da83f0521b74b5e8c8a06936b8d73bb43b753e8a2b24", + "sources_youtubeId": "WBzfE5KYRTU", + "sources_ipfsHash": "", + "sources_livepeerId": "bLCu7FxPJ-s", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "ludens" ], @@ -115676,7 +115349,8 @@ "slot_start": 1731564000000, "slot_end": 1731565500000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1BpeEbVQlu2yDNQAlW5vuzWhq0v-7ANSaWNSni086zr0" + "resources_presentation": "https://docs.google.com/presentation/d/1BpeEbVQlu2yDNQAlW5vuzWhq0v-7ANSaWNSni086zr0", + "resources_slides": "" }, "vector": [ 0, @@ -116448,9 +116122,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -116991,8 +116662,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -117035,8 +116704,8 @@ "keywords": [], "duration": 1548, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "1c2071de09945f3fbb4accbcf71124fd79f5d1f7db887f434a269bb325cdf40f", + "sources_youtubeId": "r3TycwvFcdQ", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6735ae3b4ccb22799e3aa47f", @@ -117047,7 +116716,7 @@ "slot_end": 1731571500000, "slot_roomId": "classroom-a", "resources_presentation": "https://docs.google.com/presentation/d/1nM0xwitXjawugXH5pPFNpq6MvywWnlHN137YbOB6mx8", - "resources_slides": null, + "resources_slides": "", "speakers": [ "small-brain" ] @@ -117862,9 +117531,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -118365,8 +118031,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -118412,7 +118076,7 @@ ], "duration": 3882, "language": "en", - "sources_swarmHash": "", + "sources_swarmHash": "822ddb34e2793469b80389b5b20e3bd12779b9c5cad9df9f473811c19c4a77a0", "sources_youtubeId": "uVuE6XwnOAg", "sources_ipfsHash": "", "sources_livepeerId": "", @@ -118424,12 +118088,12 @@ "slot_end": 1731644100000, "slot_roomId": "classroom-e", "resources_presentation": "https://docs.google.com/presentation/d/1eO1XIDc-q3KrPnErEQR2cd7nQ0SBTvIxhRw2jsJFAFw", - "resources_slides": null, + "resources_slides": "", "speakers": [ - "dc-posch", - "janine-leger", + "timour-kosters", "nicole-sun", - "timour-kosters" + "dc-posch", + "janine-leger" ] }, "vector": [ @@ -119307,9 +118971,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -119747,8 +119408,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -119808,10 +119467,10 @@ "slot_end": 1731651000000, "slot_roomId": "stage-5", "resources_presentation": "https://docs.google.com/presentation/d/1mYc9MY0LQHKBFJ4LpR5hSBWXay5VfaXFYGB_2fn3Tk4", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1B6GMhVAOEs4E6b8rFdzodZ4yNtp1iOTh/view", "speakers": [ - "bridget-hearst", "evin-mcmullen", + "bridget-hearst", "olivia-smith", "rachel-onchain" ] @@ -120596,9 +120255,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -121131,8 +120787,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -121183,7 +120837,8 @@ "slot_start": 1731575100000, "slot_end": 1731577800000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/14-gheTUG-tAY311u_ro8WU5QDEn9UD6qoIs10GwD2uk" + "resources_presentation": "https://docs.google.com/presentation/d/14-gheTUG-tAY311u_ro8WU5QDEn9UD6qoIs10GwD2uk", + "resources_slides": "https://drive.google.com/file/d/1-EdITc0JQ02NJa7F2-0T17zJepF74r3P/view" }, "vector": [ 0, @@ -122036,9 +121691,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -122498,8 +122150,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -122547,6 +122197,13 @@ "Protocol Design" ], "language": "en", + "sources_swarmHash": "b69873dbca0293348685caa12fbec0dbf646359f0a4b622f237780c174f9d7f6", + "sources_youtubeId": "ZPVXghNFj1I", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "auryn-macmillan" ], @@ -122554,7 +122211,8 @@ "slot_start": 1731394200000, "slot_end": 1731394800000, "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1o2XT-zv9XnvPAfdhkvLLqa8zJtsbRxJ_E9wOKaoe6WU" + "resources_presentation": "https://docs.google.com/presentation/d/1o2XT-zv9XnvPAfdhkvLLqa8zJtsbRxJ_E9wOKaoe6WU", + "resources_slides": "https://drive.google.com/file/d/1rJQFDAy9EjPIKCbbzRsAyARjAivymAfT/view" }, "vector": [ 0, @@ -123312,9 +122970,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -123871,8 +123526,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -123916,6 +123569,13 @@ "User Experience" ], "language": "en", + "sources_swarmHash": "56c6f3a38e0e38a31344382f0ec51fe0d336a1c9a9fbd66a0b2d27bc50715645", + "sources_youtubeId": "uCcU6qUX8w0", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "niharika" ], @@ -123923,7 +123583,8 @@ "slot_start": 1731465900000, "slot_end": 1731467700000, "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1zpiZg4dw7Yoaj_Sy6FpGr9LUxUTz8UzDFGi_e9GpUrU" + "resources_presentation": "https://docs.google.com/presentation/d/1zpiZg4dw7Yoaj_Sy6FpGr9LUxUTz8UzDFGi_e9GpUrU", + "resources_slides": "https://drive.google.com/file/d/1mVq_DaNVNcgWJ-3owqcQknjlQVhd2ej1/view" }, "vector": [ 0, @@ -124690,9 +124351,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -125238,8 +124896,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -125279,6 +124935,13 @@ "Verkle trees" ], "language": "en", + "sources_swarmHash": "5ee0568ebdea72398644ac8e95091444abbabfbcd722932eb41e1d3f5fba5178", + "sources_youtubeId": "LpL-KJqxUwo", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "richa" ], @@ -125286,7 +124949,8 @@ "slot_start": 1731473100000, "slot_end": 1731474000000, "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/10DqlfeLWBhwxvz53O8852FsvukCw9hOvBC9rvKdKNM8" + "resources_presentation": "https://docs.google.com/presentation/d/10DqlfeLWBhwxvz53O8852FsvukCw9hOvBC9rvKdKNM8", + "resources_slides": "" }, "vector": [ 0, @@ -126050,9 +125714,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -126601,8 +126262,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -126645,6 +126304,13 @@ "Protocol Design" ], "language": "en", + "sources_swarmHash": "e4419e8fd54099cbd7c22435a9391dc9f0acb4ac88e3cd4199213dbda44bfb0d", + "sources_youtubeId": "j-Na9bPZEhI", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "nico-rodriguez" ], @@ -126652,7 +126318,8 @@ "slot_start": 1731555600000, "slot_end": 1731555900000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1PDrU1-lLeAQqLQ4AlBYvxRmDQ_iMUPXlUDleh2GhdJo" + "resources_presentation": "https://docs.google.com/presentation/d/1PDrU1-lLeAQqLQ4AlBYvxRmDQ_iMUPXlUDleh2GhdJo", + "resources_slides": "https://drive.google.com/file/d/17m1CF7Q4z3X-oZAl4nZxIk2BMpvz8Gnc/view" }, "vector": [ 0, @@ -127449,9 +127116,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -127969,8 +127633,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -128019,18 +127681,18 @@ "duration": 1568, "language": "en", "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_youtubeId": "69IZIIsanvM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736f7b91b0f83434d9762eb", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "", + "transcript_vtt": "", + "transcript_text": "", "eventId": "devcon-7", "slot_start": 1731645000000, "slot_end": 1731646800000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1uiCga8JgSVcEJbIHMSRcQYytS1nwlZiz6mLdidoUbr8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1MxUF3ReK7gjSPhfqvCqzh4I5uQ-YXwAr/view", "speakers": [ "jonah-burian" ] @@ -128786,9 +128448,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -129348,8 +129007,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -129396,6 +129053,13 @@ "Layer 1" ], "language": "en", + "sources_swarmHash": "18bdd6c02044f4d2a659c7ff87da1b5b56eeec9a92243f02be56200ba0d9d312", + "sources_youtubeId": "5oqUAf7vy28", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "thanh-hai-tran" ], @@ -129403,7 +129067,8 @@ "slot_start": 1731470400000, "slot_end": 1731471000000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1hG56xfpzqIZ6kxBtsd4z9tj3Nz-JQcrTh2y22jsemLo" + "resources_presentation": "https://docs.google.com/presentation/d/1hG56xfpzqIZ6kxBtsd4z9tj3Nz-JQcrTh2y22jsemLo", + "resources_slides": "https://drive.google.com/file/d/14Zu73Eq_kkRzOOJfJzFvnufist36ZupV/view" }, "vector": [ 6, @@ -130167,9 +129832,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 6, @@ -130718,8 +130380,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -130781,7 +130441,7 @@ "slot_end": 1731642000000, "slot_roomId": "classroom-a", "resources_presentation": "https://docs.google.com/presentation/d/1lN3h53WUwFynoQ5vR7IcSTB74eoYrpCJWu2YVtIjyTw", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1IdbON4m-O_WxcoEdRgsVJHvn1lQ_BKGO/view", "speakers": [ "lisa-akselrod" ] @@ -131547,9 +131207,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -132101,8 +131758,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -132161,7 +131816,7 @@ "slot_end": 1731554100000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1OMXnYKdkNqOzuJsNTBVLxJNmMzaz6upJphPKMzWFHU8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1v_BxfyvCMCWG-ZIoON9KEFQrbEB1yjkN/view", "speakers": [ "derek-chiang" ] @@ -132931,9 +132586,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -133479,8 +133131,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -133541,7 +133191,7 @@ "slot_end": 1731556800000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1iiL0JiNnH0ChCkoh1IzT9f8BrqYd5kQugPQmJbbcZXo", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1_86XnoNJvrgp0_4z1JXExVEwqPfRfsri/view", "speakers": [ "konrad-urban" ] @@ -134354,9 +134004,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 0, @@ -134859,8 +134506,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -134921,7 +134566,7 @@ "slot_end": 1731556200000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1U-dbNKwiKAFUbasDggGI5sY4MPQrY0WG2flAU08jtEo", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/12hKgy5lNVVdNQLwSglO5iK5PZYhOj6-b/view", "speakers": [ "radina-talanova" ] @@ -135730,9 +135375,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -136239,8 +135881,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -136289,19 +135929,19 @@ ], "duration": 1538, "language": "en", - "sources_swarmHash": "3cfe6542d26a3eaebd761df6e4526de0802a9350d8c30c268d74cb2280c4269c", - "sources_youtubeId": "X4zJze4gY7M", + "sources_swarmHash": "", + "sources_youtubeId": "", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673321023a168eb5354cf6cb", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "", + "transcript_vtt": "", + "transcript_text": "", "eventId": "devcon-7", "slot_start": 1731638700000, "slot_end": 1731640500000, "slot_roomId": "stage-6", "resources_presentation": "https://docs.google.com/presentation/d/1lKUWcEm1t3Bl4RRPCau4E1fatycz3c_Um37TvwammW4", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/11kZAI4amvAcIXsWmOwJICfeW2WEguKAb/view", "speakers": [ "suzana-maranhao-moreno" ] @@ -136396,6 +136036,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -136407,10 +136048,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -137621,8 +137258,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -137679,7 +137314,7 @@ "slot_end": 1731403800000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1qjEbqOegs1ocDdXvx_djLjbOkH762J5kzikQ4VqSqdg", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1uDceJAf9QJwPami2jzrfbEOxGyRKTaL3/view", "speakers": [ "everett-hildenbrandt" ] @@ -138454,9 +138089,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -138999,8 +138631,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -139057,12 +138687,12 @@ "slot_end": 1731573000000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1Vhj4BZKZxNH74CAaa0TGW6GQk3bGkBt7tTxzfTfY5O0", - "resources_slides": null, + "resources_slides": "", "speakers": [ + "lightclient", "alex-beregszaszi", "daniel", "eniko-garam", - "lightclient", "mark-tyneway" ] }, @@ -139834,9 +139464,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -140379,8 +140006,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -140429,8 +140054,8 @@ ], "duration": 1459, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "0cb6ab2a62dfb516dfe5874f72d5dd2c24314f94c4351a4db19f278ec45af1cf", + "sources_youtubeId": "uG5SEBzh3qg", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "673486969dbb7a90e11222dd", @@ -140441,7 +140066,7 @@ "slot_end": 1731495600000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1SPNfEMjAph1OpaPequc8om3JZMEGtbmn17fCEmGQhuE", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1h_UlftTJ5OCCWsd8hA0KSPJ27dDQwJsi/view", "speakers": [ "kris-kaczor" ] @@ -141225,9 +140850,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 0, @@ -141759,8 +141381,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -141821,10 +141441,10 @@ "slot_end": 1731565800000, "slot_roomId": "classroom-b", "resources_presentation": "https://docs.google.com/presentation/d/116i9aeE05txm_kkq2IUV5e1zkt4cdfE8v3unnfsygbQ", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1LPieASAyTH3eqZcEzcClwNkXw-G-rHzC/view", "speakers": [ - "florent", "michael-elliot", + "florent", "remi", "theo-madzou" ] @@ -142599,9 +142219,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -143144,8 +142761,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -143184,7 +142799,8 @@ "slot_start": 1731484800000, "slot_end": 1731487500000, "slot_roomId": "decompression-room", - "resources_presentation": "https://docs.google.com/presentation/d/1hrC4BF-BEAqbZu7xLGF7HSCIT40WN7UKvfkGD14lwho" + "resources_presentation": "https://docs.google.com/presentation/d/1hrC4BF-BEAqbZu7xLGF7HSCIT40WN7UKvfkGD14lwho", + "resources_slides": "" }, "vector": [ 0, @@ -144497,11 +144113,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -144549,8 +144160,8 @@ ], "duration": 1341, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "aca6bb10e220e0006d3bfb5794638f15a49f13f070b281e3aa6f13a85f48d15b", + "sources_youtubeId": "7EOjrYnrE8g", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "673440649dbb7a90e1ffe27c", @@ -144561,7 +144172,7 @@ "slot_end": 1731477600000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1P0tuKkaODBsM7KxkSGtcEnPO4Ol_WaMBik4Mne_Fr0Y", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1WZd4UpPYJ-I4MOlEJNXuEvtytQSsubSs/view", "speakers": [ "daniel", "julian-arnesino" @@ -144733,8 +144344,8 @@ 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -145328,9 +144939,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -145882,8 +145490,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -145924,6 +145530,13 @@ "Solarpunk" ], "language": "en", + "sources_swarmHash": "8852482be3807eba17d5f9debd1cfa66d74aa6a315b42cb50220516bec0a3ffa", + "sources_youtubeId": "u257H6Plt-k", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "koeppelmann" ], @@ -145931,7 +145544,8 @@ "slot_start": 1731558480000, "slot_end": 1731559320000, "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1M-GH6csygqzvZdnn2OpNm0N7Qq6jQ2Z9bbEUeyCxiOY" + "resources_presentation": "https://docs.google.com/presentation/d/1M-GH6csygqzvZdnn2OpNm0N7Qq6jQ2Z9bbEUeyCxiOY", + "resources_slides": "" }, "vector": [ 0, @@ -146791,9 +146405,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -147246,8 +146857,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -147308,7 +146917,7 @@ "slot_end": 1731659400000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1noOR17_aYCG_ZJUZyMBpdklsW49xHNFwO6ykyh99eko", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/17917tWdCe0JC1fAz0PCBeJaMSL6kdn9p/view", "speakers": [ "albert-rubio" ] @@ -148074,9 +147683,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 6, 0, @@ -148626,8 +148232,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -148685,7 +148289,7 @@ "slot_end": 1731498000000, "slot_roomId": "classroom-a", "resources_presentation": "https://docs.google.com/presentation/d/1HA7lnd6KnPUYw130uBGO9g_dh4nt0Km9EV8l4rT8-rU", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1IBUShmsrSzsHLEqzScR9VhPrTgnw9o6u/view", "speakers": [ "hal-seki" ] @@ -149540,9 +149144,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -150005,8 +149606,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -150054,6 +149653,13 @@ "Security" ], "language": "en", + "sources_swarmHash": "da9e9a072c922d7a4acd547d980a47a675dd1fe56ac995d591a577d698f9fd83", + "sources_youtubeId": "3i6dZgUm7JU", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "julian-sutherland" ], @@ -150061,7 +149667,8 @@ "slot_start": 1731471600000, "slot_end": 1731477000000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1J21ogENKuo-BCOrzHLQIUHr3yr7F7Ki9YBkOmnf6sTU" + "resources_presentation": "https://docs.google.com/presentation/d/1J21ogENKuo-BCOrzHLQIUHr3yr7F7Ki9YBkOmnf6sTU", + "resources_slides": "" }, "vector": [ 6, @@ -150812,9 +150419,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -151379,8 +150983,6 @@ 0, 0, 0, - 0, - 0, 2, 2, 0, @@ -151432,7 +151034,7 @@ "slot_end": 1731660600000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1N-AwhGffiR0ykC4WCngFcoVQYru5isOOupvxSu_ZqIc", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1H3oOsVrgF-saUovJjy3g5vYSG6ntTHlk/view", "speakers": [ "wanseob-lim" ] @@ -152199,9 +151801,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -152753,8 +152352,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -152800,13 +152397,13 @@ "slot_end": 1731668400000, "slot_roomId": "main-stage", "resources_presentation": "https://docs.google.com/presentation/d/1zk6NkmsduzkZUpci9G4Ulqy-v_Ab9pktxWiTS7fPEh4", - "resources_slides": null, + "resources_slides": "", "speakers": [ + "skylar-weaver", "aya-miyaguchi", - "gubsheep", "justin-glibert", - "nicholas-paul", - "skylar-weaver" + "gubsheep", + "nicholas-paul" ] }, "vector": [ @@ -154120,11 +153717,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -154176,7 +153768,7 @@ "slot_end": 1731655800000, "slot_roomId": "breakout-1", "resources_presentation": "https://docs.google.com/presentation/d/1yeTJ8P67T5QYFuo5u1uIU8PtyMBoM_1mpCtwWM27BQc", - "resources_slides": null, + "resources_slides": "", "speakers": [ "alex-stokes" ] @@ -154932,9 +154524,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -155494,8 +155083,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -155551,7 +155138,7 @@ "slot_end": 1731661200000, "slot_roomId": "breakout-1", "resources_presentation": "https://docs.google.com/presentation/d/1yfoTOM-8vuH0ZUXAvbG6H3K4-yhQUtdboeWI5L5uJ7M", - "resources_slides": null, + "resources_slides": "", "speakers": [ "tim-beiko" ] @@ -156307,9 +155894,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -156871,8 +156455,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -156923,7 +156505,7 @@ "slot_end": 1731650400000, "slot_roomId": "breakout-1", "resources_presentation": "https://docs.google.com/presentation/d/1Ovum9wCpn-eOO_GaydQ7myGTVXFB4g6lDX0Btv4ApMI", - "resources_slides": null, + "resources_slides": "", "speakers": [ "lightclient" ] @@ -157093,9 +156675,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -158244,8 +157823,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -158297,13 +157874,14 @@ "slot_end": 1731644100000, "slot_roomId": "breakout-1", "resources_presentation": "https://docs.google.com/presentation/d/1O8Er1O0dSSedqAxQY9z1pjTRU-Hr46AyiOlBS6Dnvq0", - "resources_slides": null, + "resources_slides": "", "speakers": [ - "cody-crozier", + "pedro-gomes", + "tom-teman", + "niharika", "derek-chiang", "mark-smargon", - "pedro-gomes", - "tom-teman" + "cody-crozier" ] }, "vector": [ @@ -158461,7 +158039,7 @@ 0, 0, 0, - 0, + 6, 0, 0, 0, @@ -159106,9 +158684,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -159619,8 +159194,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -159664,7 +159237,8 @@ "slot_start": 1731394800000, "slot_end": 1731409200000, "slot_roomId": "breakout-2", - "resources_presentation": "https://docs.google.com/presentation/d/1KG701RsIoq1QyT_uxs6WdIDJVZLJz0WL2Zcdl1b-gzg" + "resources_presentation": "https://docs.google.com/presentation/d/1KG701RsIoq1QyT_uxs6WdIDJVZLJz0WL2Zcdl1b-gzg", + "resources_slides": "" }, "vector": [ 0, @@ -160635,9 +160209,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 0, @@ -160979,8 +160550,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -161029,12 +160598,12 @@ "slot_end": 1731646800000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/1KpnGjqycfpLNFKUjuTryELdVgZfiVhV0qOcH-f6orS0", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1FPkrcYulC9u3mlG4Q7QMg_VSPOZazDBZ/view", "speakers": [ - "albert-ni", - "barry", - "gubsheep", "justin-glibert", + "gubsheep", + "barry", + "albert-ni", "vitalik-buterin" ] }, @@ -161220,6 +160789,7 @@ 0, 0, 0, + 0, 6, 6, 0, @@ -161229,7 +160799,6 @@ 0, 0, 0, - 0, 6, 6, 6, @@ -162349,11 +161918,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -162408,7 +161972,7 @@ "slot_end": 1731409800000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1a63MEhn5HWzDRARwTVIqwlXGcTuEurRAij2MM5-ACMI", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/150sqO_98C8rtvEMVq5crZTtA6kSsccax/view", "speakers": [ "puja-ohlhaver" ] @@ -163196,9 +162760,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -163726,8 +163287,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -163782,7 +163341,7 @@ "slot_end": 1731562200000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/10k5sMswiuZ6sjCWh6_3DzYLI8Ix836tP-o0-fhgeUCI", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1usTYYfGYtQzCbyaP1WI8rEU2lpud8xLG/view", "speakers": [ "jay-baxter" ] @@ -164570,9 +164129,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -165100,8 +164656,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -165158,7 +164712,7 @@ "slot_end": 1731497400000, "slot_roomId": "stage-1", "resources_presentation": "https://docs.google.com/presentation/d/1U1W0kONT5CqQY5olh7ieFlQWhiN9s7HXZjQqM1AuBzg", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1VypQo2Sue78xs-FzT2lTVNhVS2IGabwD/view", "speakers": [ "eric-alston" ] @@ -165920,9 +165474,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -166476,8 +166027,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -166525,8 +166074,8 @@ ], "duration": 1513, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "aee6df134fb5a9461bd2b460f55ad9332c8b4c900f495e9be865a132e73b1c79", + "sources_youtubeId": "rO1amHaKH2U", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6736f9bf1b0f83434ddf1bf6", @@ -166537,7 +166086,7 @@ "slot_end": 1731650400000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1jXv2kbSefJjmF8zhC0ibw1x1paaVWwuDjrd37N_Nzj8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1MBGkwol6aPlJeQrf3L5YVddZYWwvWy0r/view", "speakers": [ "christoph-schlegel", "sxysun" @@ -167299,9 +166848,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 6, 0, @@ -167856,8 +167402,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -167920,7 +167464,7 @@ "slot_end": 1731567000000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1LhUMD8kPbukRuIeQXcyC9Nzn52Zdr6JH80byPktkErk", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1ch3UFbhUC5HFtrpH6VXITHr5AZmyBJTI/view", "speakers": [ "dr-john-fletcher-phd", "kilian-hikaru-scheutwinkel" @@ -168710,9 +168254,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -169239,8 +168780,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -169299,7 +168838,7 @@ "slot_end": 1731472200000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1zHvG3U1k7Ixpod7JNDZSJechFPRUfpGmYtaC0t0ufJA", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/103zrCHIutXZO8_mtK-68QpFlfxTOrWVv/view", "speakers": [ "elia-anzuoni" ] @@ -170072,9 +169611,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -170617,8 +170153,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -170673,7 +170207,7 @@ "slot_end": 1731583800000, "slot_roomId": "main-stage", "resources_presentation": "https://docs.google.com/presentation/d/1YTeRCkNqi_tWXXuL2gLaihLcpRslx1hjlAncemiP4bU", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1fZ6at-FEnXjAyX8qfE3znM8YwU3fIzpr/view", "speakers": [ "jarrad-hope" ] @@ -171453,9 +170987,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -171993,8 +171524,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -172042,8 +171571,8 @@ ], "duration": 1467, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "357fe5c92b73f63d2f7a80622e435d3f2259295c1227433add5efdfa0ad8bac8", + "sources_youtubeId": "ioCdBWLmuI8", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6735c5159dbb7a90e10e8725", @@ -172054,7 +171583,7 @@ "slot_end": 1731576600000, "slot_roomId": "stage-5", "resources_presentation": "https://docs.google.com/presentation/d/19NEsXDqwrsMeZ3hvkerJHBNN4pTD7oRWn6fSazU8JWU", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1A4_Pa2cbDHHe6_L6gm2ktHCnOzWEdQW_/view", "speakers": [ "dc-posch", "nalin-b" @@ -172204,9 +171733,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -172860,9 +172389,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -173373,8 +172899,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -173429,7 +172953,7 @@ "slot_end": 1731582600000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/19vJbc3_xafMXjoRH6SLUAgIZjg0J4oTsoiFzXzdq3Ao", - "resources_slides": null, + "resources_slides": "", "speakers": [ "rev-miller" ] @@ -174292,9 +173816,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -174749,8 +174270,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -174807,7 +174326,7 @@ "slot_end": 1731390600000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1I8L_RL8n3RI4PQDkpmQfboZc2IVdS6GLh-psdPM4k5s", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1LzVukBA7im392myDzTOJpZUffVG5z5Nl/view", "speakers": [ "josef-je" ] @@ -175596,9 +175115,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -176125,8 +175641,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -176183,7 +175697,7 @@ "slot_end": 1731400200000, "slot_roomId": "stage-5", "resources_presentation": "https://docs.google.com/presentation/d/1Zq5DAdb9ha3cFF-gOzk6L82ORlY9uvzFl7T5sV1W2mg", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1NV0mr4e5x0vJ-BznUx5j8Xvb3iqQvidK/view", "speakers": [ "kelvin-fichter" ] @@ -176979,9 +176493,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -177503,8 +177014,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -177563,7 +177072,7 @@ "slot_end": 1731640500000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1abSiS9Ilz8g4Nc9doFglzH8ruOPatELbzUm3z4IqpRE", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1h5eEqI2PO5I3vi-Oy7ZPE5InVRBtFOuA/view", "speakers": [ "minh-ho" ] @@ -178317,9 +177826,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 6, 0, @@ -178881,8 +178387,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -178933,10 +178437,10 @@ "slot_end": 1731562200000, "slot_roomId": "classroom-e", "resources_presentation": "https://docs.google.com/presentation/d/1MEHwnn1XVg3IxqYq8U8Z80rO7dw8-zksCQ9QTwsL6X8", - "resources_slides": null, + "resources_slides": "", "speakers": [ - "aya-miyaguchi", - "simona-pop" + "simona-pop", + "aya-miyaguchi" ] }, "vector": [ @@ -179120,8 +178624,8 @@ 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -180251,11 +179755,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -180311,7 +179810,7 @@ "slot_end": 1731402000000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/15uqb6bZbGBerAG0KgTCVf2KHzFimQ1D5YSJ8jUna96c", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1xK1Ig499KHKUofOKpVgT2W_xD1biJh61/view", "speakers": [ "songyi-lee" ] @@ -181166,9 +180665,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -181631,8 +181127,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -181691,7 +181185,7 @@ "slot_end": 1731583800000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/12xOmjbuWiCGoJo_Bx-KMT5zB8_88W6kmYHfhx1CzVcA", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1vSfQ-yqKGV-B-5P-B-kc9FYQU0rulYcC/view", "speakers": [ "chris", "davide-rezzoli" @@ -182448,9 +181942,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -183012,8 +182503,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -183069,7 +182558,7 @@ "slot_end": 1731494400000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1zKy1Wacd_g6VIy9gBPNTLczV1UoUIzGVToCNnN39u1c", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1qp0D9X40TlsiZ36J7E1P3YhccrliNn6u/view", "speakers": [ "evin-mcmullen" ] @@ -183222,7 +182711,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -183824,8 +183312,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -184389,8 +183875,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -184433,7 +183917,7 @@ "duration": 467, "language": "en", "sources_swarmHash": "81e87623a13ffdd562e5c7608e9481ec24aa6405629b9a8fedfa843deab3d387", - "sources_youtubeId": "ngz-Zug_LS8", + "sources_youtubeId": "zIqVFYQqPk0", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "67349e5c9dbb7a90e13140a5", @@ -184442,7 +183926,7 @@ "slot_end": 1731493800000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1oHb6j9HUcr5SBg9cKc9eUxdiZdwushIlE08dKhrQ1zE", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1qJHjV9G5v7I6s3DSDZb7gyTx-QOO-Nql/view", "speakers": [ "sebastian-buergel" ] @@ -185344,9 +184828,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -185760,8 +185241,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -185808,7 +185287,7 @@ "slot_end": 1731585000000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/15jv-W1ReL9GrekRSr8kZvYKEsNZleXRAf2BtcLW2I5s", - "resources_slides": null, + "resources_slides": "", "speakers": [ "vitalik-buterin", "robin-hanson", @@ -187126,11 +186605,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -187178,8 +186652,8 @@ ], "duration": 527, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "76bd2634e5d453bfa2824fcabc6fbef5805676a8b463305dcfc383fc83a61af1", + "sources_youtubeId": "5DY0rpnu3pg", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "67374a741b0f83434d940cae", @@ -187190,7 +186664,7 @@ "slot_end": 1731658800000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1fuSrN9JQHv91E6bFCwDtFCGMP2T4Sg6pk3Mhh_y-ZYg", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1do1auA7GAx-rD64irTWSWsiQF4uXsu4D/view", "speakers": [ "han-tuzun" ] @@ -188070,9 +187544,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -188510,8 +187981,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -188565,7 +188034,7 @@ "slot_end": 1731580800000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/1f6N0D4m-xFEHIkJO7OzkhfJlUtBWAno5sr5jRV3Q5kk", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1pkPZzUe3L6PI12hbSwDs78WfmRbGxhq4/view", "speakers": [ "isla-munro", "una-wang" @@ -189355,9 +188824,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -189884,8 +189350,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -189943,7 +189407,7 @@ "slot_end": 1731403800000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1Pq-UfROf_3nVsy2VhJLpxOcTmyPsVPQsHMIH4SZRIfY", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1ofAkN0PJc0taGsGZCR4HRJ_e8ZfEA2A7/view", "speakers": [ "gabriel-shapiro" ] @@ -190793,9 +190257,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -191261,8 +190722,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -191320,7 +190779,7 @@ "slot_end": 1731394800000, "slot_roomId": "stage-3", "resources_presentation": "https://docs.google.com/presentation/d/1m4BLa2dYtnZhDK4AVI7x-ufBecnidvE3pGBZchBa82k", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1mS2lnCqkBjQit39J_YAyDojxS_RJF09M/view", "speakers": [ "lefteris-karapetsas" ] @@ -192170,9 +191629,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -192640,8 +192096,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -192686,8 +192140,8 @@ ], "duration": 549, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "e303a2a1ebd6ba053493f2c8a2e7ee2fa9e9782406379462d20600be1549f1f9", + "sources_youtubeId": "w3dZMvSOYzg", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6736d6cf74749a4b89309278", @@ -192698,7 +192152,7 @@ "slot_end": 1731642600000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1YmHz7J7_ErPzoGv9lX-paIBhxZrCFEk_NqqiRA-wNk8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1DgoUV3fxjTFzkGdsm845p2f8Fpae5jTb/view", "speakers": [ "jerome-de-tychey" ] @@ -193483,9 +192937,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -194018,8 +193469,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -194076,7 +193525,7 @@ "slot_end": 1731407400000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1t0x6tAJgffLnu_2grB_zh1mp_RzOCOpsI9MX1oYUMlY", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1nSPoVGUa-X0K_WHRAeIRS-Ee5YovlOzb/view", "speakers": [ "sarah-allen" ] @@ -194931,9 +194380,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -195394,8 +194840,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -195450,7 +194894,7 @@ "slot_end": 1731569700000, "slot_roomId": "breakout-3", "resources_presentation": "https://docs.google.com/presentation/d/16BGl6R_AIgh1Fz7_yHfLOgt8VeqXZacUFlcwQz9qvOU", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1pE7XPZg4ez44F7Lv-fdNYZId8e9illYJ/view", "speakers": [ "raina-macintyre" ] @@ -196204,9 +195648,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -196770,8 +196211,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -196817,8 +196256,8 @@ ], "duration": 1209, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "35935a9b46ad253cf227167671ae96624ef153e85cf54b1f0fd1a02dbfb710a5", + "sources_youtubeId": "0xEaNNSEk8A", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "673477539dbb7a90e13b9365", @@ -196829,7 +196268,7 @@ "slot_end": 1731491400000, "slot_roomId": "stage-6", "resources_presentation": "https://docs.google.com/presentation/d/1p6CtJjA99UENn3f3VpXSbI_lYWQj6O34OdWgb8FUKiE", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/15yF5An8NMTymdQGfyw5QLfKyqHfcfqPT/view", "speakers": [ "amir-taaki" ] @@ -197609,9 +197048,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -198149,8 +197585,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -198192,6 +197626,13 @@ "User Research" ], "language": "en", + "sources_swarmHash": "fd5d936b101d87ae0481fc107d0991452d257551ca224e89a95c302747151d84", + "sources_youtubeId": "J7rIcyjJc3o", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "lisa-jy-tan" ], @@ -198199,7 +197640,8 @@ "slot_start": 1731484200000, "slot_end": 1731484800000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1a7L3Uq6GwbOc7abUc_joXIpX0LM57pFSIKpUmrjx4rU" + "resources_presentation": "https://docs.google.com/presentation/d/1a7L3Uq6GwbOc7abUc_joXIpX0LM57pFSIKpUmrjx4rU", + "resources_slides": "" }, "vector": [ 0, @@ -198994,9 +198436,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -199516,8 +198955,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -199565,8 +199002,8 @@ ], "duration": 424, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "6e1deb4588302899e6c3ac679a05cba40b4c5f0e69f113b648f1fa16caae67f2", + "sources_youtubeId": "7kzWsR_RSkQ", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "673748ae1b0f83434d688ac5", @@ -199577,7 +199014,7 @@ "slot_end": 1731657000000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1XRh2Y67-uqHjSpr6HxoT0Q9rUneXHLaUjz_9YbFd3SM", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1dFE1_Xj4U2mtDTW878sJlLy9XIatpYUY/view", "speakers": [ "aellison-cassimiro" ] @@ -200331,9 +199768,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -200895,8 +200329,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -200953,7 +200385,7 @@ "slot_end": 1731490200000, "slot_roomId": "classroom-c", "resources_presentation": "https://docs.google.com/presentation/d/1hKCNu1k-EbMC3GsA0i_-SO8vLwgPTyED9D91FSwTjoU", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1nV0Io6JsOXdUOHC0-wbyVmhdMxYXhdSZ/view", "speakers": [ "g-nick-gnidan" ] @@ -201728,9 +201160,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -202274,8 +201703,6 @@ 0, 0, 0, - 0, - 0, 2, 2, 0, @@ -202330,12 +201757,12 @@ "slot_end": 1731650400000, "slot_roomId": "stage-6", "resources_presentation": "https://docs.google.com/presentation/d/1SpHoMINj55MzEUWqqO7ToaiDbowMedsSM9tKnMWMaSY", - "resources_slides": null, + "resources_slides": "", "speakers": [ - "harith-kamarul", - "loi-luu", "matthew-tan", - "tn-lee" + "harith-kamarul", + "tn-lee", + "loi-luu" ] }, "vector": [ @@ -203333,9 +202760,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 2, 2, @@ -203653,8 +203077,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -203693,7 +203115,8 @@ "slot_start": 1731474000000, "slot_end": 1731475800000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1oXwhbXtA9_IkZiM1hj6x3YDfhq54XQGQeoHUbTHVK4I" + "resources_presentation": "https://docs.google.com/presentation/d/1oXwhbXtA9_IkZiM1hj6x3YDfhq54XQGQeoHUbTHVK4I", + "resources_slides": "" }, "vector": [ 0, @@ -205006,11 +204429,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -205056,6 +204474,13 @@ "Zk Rollups" ], "language": "en", + "sources_swarmHash": "c9a23e3b6a9906ea44962ac58a6e09b68f3d360182917fcd270688efa206aee9", + "sources_youtubeId": "UmvxGOLEMLc", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "joe-andrews" ], @@ -205063,7 +204488,8 @@ "slot_start": 1731405600000, "slot_end": 1731407400000, "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1faHlm1Vau0v1_f53rf67KFbBYY4FT3pugB04UolFn_M" + "resources_presentation": "https://docs.google.com/presentation/d/1faHlm1Vau0v1_f53rf67KFbBYY4FT3pugB04UolFn_M", + "resources_slides": "https://drive.google.com/file/d/1NlDWwn2TMsryPhQyAL1cVqaq01pZV7Z_/view" }, "vector": [ 0, @@ -205883,9 +205309,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -206378,8 +205801,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -206436,7 +205857,7 @@ "slot_end": 1731467700000, "slot_roomId": "stage-1", "resources_presentation": "https://docs.google.com/presentation/d/1Wcw6Mzk0DP95udiY_4VYK0pAVZ2Ac5fQgZmO7yWbJSg", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1oFesUG7pL5nwDig2XojA00nIAvxDejEn/view", "speakers": [ "devansh-mehta" ] @@ -207316,9 +206737,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -207756,8 +207174,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -207810,7 +207226,7 @@ "slot_end": 1731472200000, "slot_roomId": "stage-2", "resources_presentation": "https://docs.google.com/presentation/d/1B7KXH5uVHB04jWwnsYtQMYYbRlXaYPjx6HTM5n2vYhk", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/13CziBz3ONvnzss5-5qWPwTiItbR43bvZ/view", "speakers": [ "kolby-moroz-liebl" ] @@ -208584,9 +208000,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -209130,8 +208543,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -209169,6 +208580,13 @@ "Network State" ], "language": "en", + "sources_swarmHash": "8b4b3cf0da0672c8c54310cdfa69005eb4d812e2213e0c328ef76b2c5f1e32bf", + "sources_youtubeId": "47dUdOvhKtM", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "joe-lubin" ], @@ -209176,7 +208594,8 @@ "slot_start": 1731580200000, "slot_end": 1731582000000, "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/11eX1oRXoI4urF046XwUWj6LWwTjgZKotWz4aBKhGwB4" + "resources_presentation": "https://docs.google.com/presentation/d/11eX1oRXoI4urF046XwUWj6LWwTjgZKotWz4aBKhGwB4", + "resources_slides": "https://drive.google.com/file/d/1nGgfMy90j1SVWRdXrnRQLEcAWLuedDL1/view" }, "vector": [ 0, @@ -209974,9 +209393,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -210491,8 +209907,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -210541,17 +209955,19 @@ ], "duration": 1644, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "d5621e2ea0a49b23844b7151c58ef13724516abf76cbd7ebc6f88e1234e217aa", + "sources_youtubeId": "VduhOSI-CxQ", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6733f6d03a168eb535426732", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6733f6d03a168eb535426732.vtt", + "transcript_text": " Hello everyone. Hi everyone. Stick around because that was really the perfect introduction for our talk. In a wider framing for how decentralized currencies are actually being put to work in different emerging economies. I'm really excited to talk to you about how we're using the free permissionless distribution of money to really impact people on the ground in last mile communities that are currently underserved by traditional financial systems. I am here as the executive director of a project called GoodDollar. GoodDollar is a universal basic income protocol whose mission is actually to enable anyone with access to a smartphone, with a cell phone, to be able to access decentralized currencies and actually onboard into crypto. We do this through distributing a digital universal basic income every day, and that's helped us onboard nearly a million people. I am thrilled to be joined here today with Jambi Jorge, who is the director of grassroots economics, which is, I would say, probably one of the leading projects, if not the leading project, that's actually innovating with decentralized currencies and really breaking the monopoly on money in terms of helping communities to create more credit, more commerce bottom-up through using decentralized tooling. And it's a great example of Ethereum in the real world. I will say that I've been building in this space for a long time, and it makes me very excited to see the talk that came before, if you will, that really shows that this progress is happening at scale. And Jambi is, I'd say, the zoom in, the double click in, as to the framing that we just saw. So some of you guys might have seen, crypto is useless. Some of you guys might have seen this tuk-tuk yesterday or seen some people walking around with a crypto is useless hat. And the idea here is actually a very clever campaign out of Celo, which is that crypto is useless unless you've had a real need to use it. And the meta-framing for all of this is that there are still 1.7 billion people around the world who are living in countries where inflation outpages wages. If the past few years have been any indication, that's going to continue to increase. There are 1.4 billion adults around the world that remain unbanked. That means people who have no access to any of the financial services that Joseph just mentioned in terms of savings, payments, credit, etc. Most of this 1.4 billion people have access to a very magic computer that they carry around in their phones, which is a smartphone. And that's what enables them to access the same Ethereum virtual machine that we all use. And so there's the 6.17 million crypto owners today represents a big gap to close in terms of where crypto still has its biggest potential to make a real impact on the financial lives of billions around the globe. We're here to talk about decentralized economic opportunities. So what are some of the principles that we're looking at? The first is permissionless access. So how are we actually removing gatekeepers? Crypto is the most obvious example of that. Anyone can sign up for a Web3 wallet. Anyone can send or receive currency. It is this permissionlessness that actually enables local empowerment to happen bottom up, where communities are able to grow in their own super cycle, if you will. And ultimately, this is designed to drive resiliency and adaptability for economies that, where it doesn't really make sense that the global macroeconomic shocks of, say, the U.S. presidential election have such a big effect on last mile farmers or communities all around the world. And so this is designed to actually increase resilience so that there's less of a single point of failure for individuals and communities. The reality is that still today, it's places where capital is needed the most, that capital is the most expensive. This comes as no surprise to anyone here from the real-world Ethereum track. But in Nigeria, just for context, for a $100 to $1,000 loan, you're looking at an annualized interest rate from anywhere from 30% to 50%, right? So if you're actually looking to start a small business, it means that money is more expensive for you than it is for many of us from the United States or from Europe. And the user experience has also kept crypto adoption from happening for a number of reasons, right? So up until very, very recently, it's been very hard to access Web3 in terms of the sign-up process for a wallet. Certainly, if you don't have access to a bank account, it's not that simple to go online and buy $100 of Bitcoin on Binance. All of this results in there being a very high cost of experimentation for individuals. And this is what creates this narrative around crypto being useless, right? Is that with all these challenges facing the space, it's hard to build products that seem like they're of real use. However, in 2024, we're facing a new reality where not all of our problems have been solved, but certainly the potential exists to make a massive change. We see that there are a number of simple mobile native solutions. There are over 100 million stablecoin holders around the world. This has probably been seen as one of the key killer use cases of crypto today. And this creates the preconditions for us to really begin to build for real communities and real needs. All of this is critical because everything happening on-chain, everything happening transparently, actually improves our odds for financial access because it enables individuals and communities to really build credit history in a transparent way. As we just discussed, it's actually lower middle income countries that are driving crypto adoption today. So India, Nigeria, Indonesia are the fastest growing crypto markets, and 15 of the 20 fastest growing markets come from low-income countries and Unsurprisingly, it's decentralized systems and currencies that are leading the way This is most likely because of leapfrog innovation, which was referenced before where when there's no pre-existing technology to overcome people just just jump directly to the next generation. And so grassroots economics, which has been building in this space for a long time, I consider one of the flagship leapfrog innovations in the space because they're innovating both on the payment rails and fintech level, but most importantly on the currency and the monetary level in terms of really breaking the monopoly on money and how communities can create their own value, create their own credit worthiness, and actually expand the amount of economic activity that they're doing within the community. This is where crypto has its biggest potential, and it's my true pleasure to introduce Jambi Jorge, who has been building this and implementing this on the ground for actually nearly 100 communities. And she's flown here today from Kenya to tell us about her work and some of the impact that it's had on their communities. Thank you, Anna. Yeah, my name is Jambin Jiroge. I have been working for Grassroots Economics Foundation for the last six years, and I'm really happy to be here. I am an advocate for community inclusion currencies for poverty eradication and sustainable development. So how do we heal a post-colonial legacy of torn communities and ecosystem? How do we enable communities to create scale and scale credit? How does this enable wealth creation to stay within the communities? This, commitment pooling. We've been working on commitment pooling, which is an economic protocol inspired by indigenous wisdom. So when I talk about indigenous wisdom, I mean rotation of labor association. I come from the Kikuyu community in Kenya, and we call it Mwethia. We have so many communities in Kenya with very different names for this indigenous practice. And it's not just in Kenya, it's even global. What you're looking at, the pictures here, the first one is a type of a rotation of labor, an educational kind of it. That is a service, women grinding maize for flour. That is an infrastructural kind of way and there's so many. So, this is the difference of what was and what is now. So, what was the shared economic commons? You know, we had synergetic commitments, harmonious flows, accountable systems, you know, ecosystem regeneration, democratic stewards, and like what we have now. You know, the fiat dominance, open markets, etc. Now, in communities right now, you know, the fiat dominance, open markets, etc. Now, in communities right now, they have these six assets. For them to understand they have the six assets and to understand at what level these assets are is very important, and that is what you're doing with communities now. And this image here is them realizing that they have it. Like, I have what you don't have, and you can benefit from one another. Creating that web is really visualizing what they can and what they have to offer one another. Now, I was talking about rotation of labor associations. So this is all about making commitments, fulfilling commitments, and accepting commitments. So just to explain rotation of labor in a bit, this is family one, family two, and family three. All the families have needs. They need a house built. They need a farm fixed. So what they do, they create commitments backed by, they create commitments, and then they say, okay, fine, I'm committing, I will come and help build a house for family one. I'm also committing that I will also help family two build a house. And eventually, they pull their commitments together, okay? So those are pulled commitments. So that means when I make commitments, I will accept commitments and I will also fulfill commitments. So when they all fulfill commitments, they are allowing accountability and exchange, and they have a clear and non-agreed-upon rules. So eventually when they do a cycle of helping one another, giving commitments, accepting commitments and fulfilling commitments, there is a range of physical assets grown, social assets grown, human, spiritual, political and natural assets, you know, they change. Now, Grassroots Economics Foundation has been helping formalize these commitments by adding technology and now it becomes easier for me to value what i'm offering in a community it's easier because they have a unique signature there is an expiration date you can you can decide to have a demo or not but a demurrage is very important to help with the circulation of the vouchers and then it also means that whoever accepts That voucher has the right to redeem it from the issuer. Okay? now Making commitments and fulfilling commitments and accepting commitment is paramount and this is what used to happen and it's now What you're advocating for but trust is very important. This is how trust was grown then. This is a nice illustration. We do a lot of games and practicals when you're training communities. And this is how they understand the current reality by understanding the six assets and at what level is which asset. And when they do this, they're able to come up with a vision and know how each person, each action step will be worked on according to the rotation of labor associations now this is production financing this is also a very important protocol that you've borrowed from our forefathers it only means that when the community members create vouchers, they put them into the pool. And when a financial wants to get some of the products that are backed by these vouchers, they will need to put in money into the pool and pull out of vouchers. I'm going to give a good example of a financial who pre-ordered coconuts from farmers in Kilifi. So what they did, they put money into the pool and took the vouchers backed by the coconuts. Coconuts take five to seven years to grow. So that means when... So the financier had the money before, prior, and the vouchers prior, and the community members got the money before. So that means they were able to sort out issues, fees, better farming methods, better tools. So we're just basically bringing a nice socioeconomic process that is about bringing together a nice blend of changing the traditional demand and supply models is about bringing together a nice blend of, you know, changing the traditional demand and supply models and, you know, tokenization. Now, this is what the communities in Kenya are doing. They're using the USSD, and there's also the web interface. So this is mostly, the web interface is mostly for those who do not have smartphones, phones, so they can create their own paper wallet, and they can use one phone. Now, we have so far seen over 65,000 households using vouchers in Kenya today. We have 4 million plus worth of transactions, USD worth and 5X average multiplier effect. So, pooled commitments enable us to scale into the future to our beautiful shared heritage. Thank you. Thank you. And so, this came up in the talk right before, but it's actually how do we use these systems rather to create new trust, but actually to unlock trust that actually already exists on the ground. And this is what I've admired, the work that grassroots and Jambi have been doing for so many years now is actually looking bottom up from the community infrastructure and developing tooling that actually enables those communities to unlock and assess value in new ways. Excitingly, this is one example of, I believe, thousands, if not hundreds of thousands that are happening around the world in terms of communities that are beginning to build circular economies using decentralized currencies that actually enable people to create their own value. This is a circular economy image from a community in Brazil that has hundreds of women that actually operate using the good dollar token as a community store to buy and sell services. We also have great examples of different entrepreneurs around the world who are using decentralized currencies bottom-up to create airtime shops, enabling people to use crypto to swap into mobile minutes, into airtime value, which actually is a basic need for anyone living in the contemporary world. And I think really critically the reason why this is relevant and relevant now is because now we start to compose the full stack that actually enables this to become practical and possible and it involves more than just technology, right? And so what we see is that building the stack bottom up from the technical layer, what makes it actually useful for real people, for the hundreds of people that Jambi and Grassroots have touched, is actually the social element, the community and education element that actually brings people along and customizes these technologies for their needs. So to bring it back, crypto is useful, but it's most useful in an ecosystem that has all of the different components of the stack that make it useful. And so this suit that I'm wearing is not just for fun. It is fun, though. It's to represent the idea of an ecosystem and the value of an ecosystem and that actually being the critical enabling linchpin to making the real world Ethereum useful. And I think everyone at DevCon who's made such an effort to come here to actually contribute to this ecosystem understands that. And the more localized ecosystems that we develop, the more we will see this come true. So yes, this is crypto today. Real world Ethereum is decentralizing economic opportunity. And I think we need to stay passionate and stay focused because this is still the biggest upgrade to money since it went digital. We see decentralized economies actually upgrading the money itself and it enables money to flow back to the parts of the economy that are otherwise deemed unprofitable or non-profitable and this is what grassroots and other projects are doing it actually enables the real wealth to come back into community because in you know many communities around the world the people are there the resources are there the desire to collaborate is there. But what's actually missing is money. And so if we can actually innovate on the money itself through using the right tooling, then we are able to support the decentralization of economic opportunity that we want to see happen. Thank you very much for listening. And thank you to the Ethereum Foundation for making this possible. All right. Awesome, guys. So again, Q&A session. We have the QR code on the screen. Scan it and send the question to the screen so we can see what you have burning question here. All right, guys. Let's see, I think people just wait a little bit. People are still scanning. I think we have about a few minutes while we're waiting for them to send the question. Anything you want to say, add on a little bit? There's something else I wanted to add. For the production financing page, when you put in money, community members can now withdraw that or ramp that via Valora, MiniPay, through Kotani, OneRamp, PhoneBank into Kenya Shillings. So it's really been amazing. And I would also want to thank Ethereum Foundation for making sure that I got here. Thanks. All right. You have a question? All right. Do you mind pass the mic? One second. Oh, here's the question. But let's hear from the ladies first. Okay. Okay, go ahead. Thanks for calling me lady. I'm not sure I identify that way, but sure. I'm sorry. It's okay. So, Jebdi, this is a question for you. What are the biggest barriers that are still faced? Because you've obviously had to overcome quite a few to get the technology in place to support these existing traditional systems. What are the barriers that still exist, if any, in utilizing technology in this way? Yeah, so one of the major barriers that we've had to, you know, we still live and experience right now is the network connectivity in some of the rural areas. So when they are trying to sort of exchange into the swap, yeah, so network connectivity in some areas really is a challenge and we normally sometimes have to force them to go to the nearest cyber cafe or the nearest school because we're assuming that is where they would get network. So movement challenges and I would say that is, like, one of the major, and really smartphones, phones in some rural areas. So we've had to, like, even give a group, like, one phone where they need to, like, log out, log in using their paper wallets. Sure. We have three questions on the screen. Anything you want to answer first? So, the first one, how do we quantify impact? So now, when I was talking about production financing, so what we do, we borrow this from the past, okay? So in communities, there were leaders in each community, like village elders. They would sit and decide the next rotation of labor activities. So now we don't have staff at grass, like salaried staff. So what we do, we work with stewards, and stewards are members of the community. So they are the ones that handhold the community members. So whenever the communities are meeting for rotation of labor, whatever it is that they do, is it building houses, is it building roads or tilling farms? So they send that as a report. So what we do, we put in sell a dollar into the pool, get their vouchers. So the vouchers in this case is backed by their commitments or their M&E services. So when the report is in, then we pay them back using the vouchers. So really the trades on the Saraf.network platform represent the reports that they've sent. So these trades in the eastern part of Kenya equals to 10 houses, 5 ETC. All right, that's awesome. Let's go to the next one. I think we have like three minutes to question, Maria. Yeah. No, no, you take your time. I'm going to address the first two in one answer. So how do you actually deal with onboarding? I'm going to speak now from we gave money away. From the perspective of good dollar, UBI or free money has actually proved to be an incredibly effective onboarding method in terms of bringing people into Web3. And I think what's important about that is that we see that that onboarding mechanism then translates into people exploring a deeper financial life on-chain. And so obviously a good UX, simple onboarding experience, but actually making assets free and accessible and encouraging people to experiment has been critical to onboarding people, retaining people, and encouraging their ability and desire to experiment. There was one other question, but I forgot. Oh, it's about creating trust between local... How can trust between local communities in emerging countries and the global crypto ecosystem can be unlocked? So I think, in my experience, a lot of this is actually happening on the local and the community level, right? So it's through local community leaders that create the relevant education and context for the individuals that come along and are the individuals that are also members of that community. And as builders, it's about having the relevant on and off ramps and the relevant tools that are specific to that particular country and country context. So I think that has actually proven to be on and off ramps which come up all the time, and the wallet experience are the really critical bridges in the actual user experience when it goes from someone using a very specific local app or currency such as grassroots versus the cash-out experience, a.k.a. fiat in my pocket, mobile minutes on my phone, Ethereum in my wallet. All right. Anything, three obvious questions you want to answer? We have like one more minute if you want to. Yeah. How do we deal with failures of trust within a group? Like, from our experience as grassroots economists, we work with already existing groups that have their own constitution, and they know how they deal with some of these challenges. So what you're doing is just adding technology to a practice that they already know, that has already been there. So most of the times, the groups follow what they normally do, even without, like, amongst the community. Is it going to the chief? Is it going to the village elder? How do we sit down and sort that out? So mostly it's within the groups. Want to do one more? If you have a short one, we have a 30-second. You can do it if you want to. Want to do one more? If you have a short one, we have 30 seconds. You can do it if you want to. How can these initiatives be protected from bots? Yeah, go ahead. Sure. How can initiatives be protected from bots? It's a great question. Certainly when you're giving free money away, it's something that you have to confront and deal with. Decentralized identity or decentralized secure unique identity is not a new problem, and there's multiple ways to skin the cat. I'd say actually what makes me the most excited now is like new solutions that are based off of existing community and trust that actually enable members of groups to verify one another and verify the unique identity of one another. And I think this is a space that many builders in Ethereum have been tackling for many years. And we're about to see a next generation of solutions that are actually much more usable for individuals to verify one another. And I think that's how bots will be dealt with moving forward.", "eventId": "devcon-7", "slot_start": 1731409200000, "slot_end": 1731411000000, "slot_roomId": "stage-6", "resources_presentation": "https://docs.google.com/presentation/d/1_dONrIsV4L0B5mPO_9XqzEKOZKIP8ACpAPTEAQfEWMQ", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1ou7qNUWv2FPi4pnln0ZfGstsznoTy0Na/view", "speakers": [ "anna-stone", "damaris-njoroge" @@ -211342,9 +210758,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -211872,8 +211285,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -211919,6 +211330,13 @@ "Use Cases" ], "language": "en", + "sources_swarmHash": "67d13d6f3f5b77d476ea538e34aa83aba2921660fa653b0adc74a46bb181e4b0", + "sources_youtubeId": "btaNddkfyLg", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "andreas-tsamados" ], @@ -211926,7 +211344,8 @@ "slot_start": 1731555000000, "slot_end": 1731556200000, "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1XQpLsYFcvAaRsWM6b13TUaTHGrXpSSKJ4fVPEoKkJfw" + "resources_presentation": "https://docs.google.com/presentation/d/1XQpLsYFcvAaRsWM6b13TUaTHGrXpSSKJ4fVPEoKkJfw", + "resources_slides": "https://drive.google.com/file/d/1nUVGIbij3EuHqzjsqCgaqo4TqIh0MOGX/view" }, "vector": [ 0, @@ -212756,9 +212175,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -213241,8 +212657,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -213289,7 +212703,7 @@ "slot_end": 1731486600000, "slot_roomId": "classroom-e", "resources_presentation": "https://docs.google.com/presentation/d/1W0jwOLdutdtpuJNo6WvxKfcV8v0h4mUvf0CLm68DfjQ", - "resources_slides": null, + "resources_slides": "", "speakers": [ "julien", "grant-southey", @@ -214610,11 +214024,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 2, 0, @@ -214657,6 +214066,13 @@ "Testing" ], "language": "en", + "sources_swarmHash": "14177e03121feac769f91c2e8d4d3a396ab599adb4c47087f768bcd41cca9aac", + "sources_youtubeId": "N5WzD8ptGJA", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "mikhail-kalinin", "alex-vlasov" @@ -214665,7 +214081,8 @@ "slot_start": 1731578400000, "slot_end": 1731580200000, "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1MDK3dwXPQcTMGQIVnxa-4Kpkp17RJexPuQt0c3zp1_Q" + "resources_presentation": "https://docs.google.com/presentation/d/1MDK3dwXPQcTMGQIVnxa-4Kpkp17RJexPuQt0c3zp1_Q", + "resources_slides": "https://drive.google.com/file/d/1IDmfIELnPZ1hfh0IVFUDyE1MEDVltMmE/view" }, "vector": [ 6, @@ -215417,9 +214834,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -215980,8 +215394,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -216040,7 +215452,7 @@ "slot_end": 1731489000000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1c2MfQGdbJapup-3V1uRqWXcF71JAgZPMM_0mp-IIXL8", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/189jGPVhFdFZ3JXuGocjTHjHtGjw2xpAW/view", "speakers": [ "nipun-pitimanaaree" ] @@ -216794,9 +216206,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -217361,8 +216770,6 @@ 0, 0, 0, - 0, - 0, 2, 2, 0, @@ -217421,7 +216828,7 @@ "slot_end": 1731645900000, "slot_roomId": "classroom-b", "resources_presentation": "https://docs.google.com/presentation/d/1s2NkLIuneQtBUvfLLlkFlOE3IWetDHM6-4OAQMItN-0", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/19gf4SRrr0gnNrZC2edSzmlB3w2quGwEM/view", "speakers": [ "heidi-wilder", "peter-kacherginsky" @@ -218207,9 +217614,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -218740,8 +218144,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -218784,6 +218186,13 @@ "UI/UX" ], "language": "en", + "sources_swarmHash": "72ff21af14505627696a7bd93c8c07e233c737eb6578ca391f879c0a1fe3b6e9", + "sources_youtubeId": "qGSGzHMqsSQ", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "charles-guillemet" ], @@ -218791,7 +218200,8 @@ "slot_start": 1731409200000, "slot_end": 1731409800000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1oJyQ2nbiJ3dVyeigOw76p6xClrq9LFrInO05tg2QbVg" + "resources_presentation": "https://docs.google.com/presentation/d/1oJyQ2nbiJ3dVyeigOw76p6xClrq9LFrInO05tg2QbVg", + "resources_slides": "" }, "vector": [ 6, @@ -219542,9 +218952,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -220106,8 +219513,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -220160,7 +219565,7 @@ "slot_end": 1731497400000, "slot_roomId": "main-stage", "resources_presentation": "https://docs.google.com/presentation/d/18NUBFhBTGUc1VCTGb7xM78rgqQTtMu78w-hWIYbTYxA", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/13OHA0aPhVbPt4qitwe7HAI7-fNqOkRv-/view", "speakers": [ "karl-floersch" ] @@ -221172,9 +220577,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -221478,8 +220880,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -221541,12 +220941,12 @@ "slot_end": 1731646800000, "slot_roomId": "stage-1", "resources_presentation": "https://docs.google.com/presentation/d/1VL2_zkuomzUJ59v6VkJCzFGACRwHLUks6cXhys2kzmA", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1VKr9CqjlWrX8Y_Nm5DNmSPWZm64LSn5O/view", "speakers": [ "griff-green", + "nico-gallardo", "james-kiernan", - "lauren-luz", - "nico-gallardo" + "lauren-luz" ] }, "vector": [ @@ -222356,9 +221756,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -222864,8 +222261,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -222916,7 +222311,7 @@ "slot_end": 1731496800000, "slot_roomId": "stage-1", "resources_presentation": "https://docs.google.com/presentation/d/15PZ749rPc9HedXMUE_qdwIMFPhSIfM_Qt1GSmEy4JsU", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1IObHgeQDaeL0hcSxTbumWTZSaud--NSd/view", "speakers": [ "timdaub" ] @@ -223677,9 +223072,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -224234,8 +223626,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -224303,14 +223693,14 @@ "slot_end": 1731661200000, "slot_roomId": "stage-1", "resources_presentation": "https://docs.google.com/presentation/d/1XzYtYO3NQtFr1B6HDE_M1alRWMpmL2iUvLkfcX4Z02g", - "resources_slides": null, + "resources_slides": "", "speakers": [ - "0xrajeev", - "harikrishnan-mulackal", "josselin-feist", + "0xrajeev", "matthias-egli", "mehdi-zerouali", - "mooly-sagiv" + "mooly-sagiv", + "harikrishnan-mulackal" ] }, "vector": [ @@ -225062,9 +224452,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -225626,8 +225013,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -225670,8 +225055,8 @@ ], "duration": 554, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "3d144e5a76361e6fc5c09fda245ccedab865abf52497afcee7f791dba178d7aa", + "sources_youtubeId": "Q-sMeimSWJU", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": "6736d76774749a4b89323f1c", @@ -225682,7 +225067,7 @@ "slot_end": 1731643200000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/12_jK1k9PlkGv-cHbW_ySIi8eDOSs8LavI_tRw_CJE10", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1gs2a9CoJvoTKSpAlvhqGza_MZ_TXJu1B/view", "speakers": [ "remy-roy" ] @@ -226467,9 +225852,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -227002,8 +226384,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -227067,7 +226447,7 @@ "slot_end": 1731580200000, "slot_roomId": "stage-4", "resources_presentation": "https://docs.google.com/presentation/d/1frO1LBrX3h2e6LoJqHpvDElvChyEEDg6CFrkzwQt5VY", - "resources_slides": null, + "resources_slides": "https://drive.google.com/file/d/1yNRDCzxQdJOZ4Dx6lpGFjDfJzK6JNy1y/view", "speakers": [ "akshit-gupta" ] @@ -227843,9 +227223,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -228385,8 +227762,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -228409,33 +227784,46 @@ }, { "session": { - "id": "desci-co-designing-the-future-of-science", - "sourceId": "DCHCYW", - "title": "DeSci: Co-Designing The Future of Science", - "description": "Connect with leaders in the DeSci Space to co-design the future of science. \r\n\r\nThis workshop aims to connect: \r\n- Developers & technical leaders by elevating your technology to be used by the DeSci community\r\n- Scientists & former scientists who can share needs in science to be solved for\r\n- DeSci leaders who can showcase what is happening now in DeSci and the visions the space is working towards \r\n\r\nLet's build a more collaborative, trustful, and effective scientific future together!", + "id": "desci-on-trial-two-years-2000-eth-11-projects-2bn-data-points-on-ethereum-has-desci-advanced-science", + "sourceId": "MZ3RLT", + "title": "DeSci on Trial: Two Years, 2000 ETH, 11 Projects, 2bn data points on Ethereum - has DeSci advanced science?", + "description": "Two years, 11 projects, $5M in funding for on chain science - what has DeSci on Ethereum really achieved? We'll critically examine key projects like Copenhagen University's longevity research and Newcastle's autophagy activation, assessing scientific rigor, web3 benefits, and real-world impact. Join us for an honest look at DeSci's promises vs. realities, featuring a live researcher update and helping shape a governance proposal on one of the presented projects.", "track": "Real World Ethereum", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Academic", + "expertise": "Beginner", + "audience": "Research", "featured": false, "doNotRecord": false, - "keywords": [ - "Science" - ], "tags": [ - "science", - "Data Availability", - "DeSci" + "Permissionless", + "Use Cases", + "DeSci", + "impact", + "DeSci", + "Permissionless", + "Use Cases" ], - "language": "en", - "speakers": [ - "erin-magennis" + "keywords": [ + "Impact" ], + "duration": 699, + "language": "en", + "sources_swarmHash": "665be7a46aec53f3558e341d98fa338cd77f6f9dbffb7880f5ab1880683bdffd", + "sources_youtubeId": "8UReCWNKbLY", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67374b1f1b0f83434da8beb9", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67374b1f1b0f83434da8beb9.vtt", + "transcript_text": " Welcome to the last day of DEF CON. Really appreciate you all still coming here after such a long and incredible week. This is actually my fifth DEF CON, so I'm really excited to be here. I have the honor to speak today. It's the third time I'm speaking, and it's just really incredible to see how the space has advanced. So I'm here today to talk to you about DeSci. Scientists today, roughly, at least the scientists that we spend, spend 80% of their time applying for funding and 20% doing the science. Academia, scientific funding, and even the biotech VC ecosystem is really broken in the sense of like how we distribute and access and fund innovation. Imagine if software developers had to spend 80% of their time getting funding to write a piece of code and only were coding 20%. Like that would take us back into the 80s and 90s. And that's essentially where our scientific innovation ecosystem is stuck. So we have the fundamental hypothesis that most of the world's potential scientific talent actually today remains untapped. We live in a time of science friction, not of science fiction. Those super cool, solopunk-like cities, we're only going to get there if we actually upgrade the way that we do innovation. So this is a Nature report that recently came out. A scientist that treated her own cancer with viruses. She grew in a lab. And she treated herself with this. And the response was that this under the radar stigmatized an ethically fraught practice. Essentially, scientists shouldn't be doing this. And there's a really cool comment. Scientists are science, cures cancer. The science, no, no, you can't do that. And then you wonder why scientists are fleeing academia. So de-sign, decentralized science, is ultimately really about making science permissionless. In the same way that we have permissionless money, we should have permissionless science. This is the typical innovation and drug development cycle that happens within a firm, this process today takes up to 10 years and costs up to $2 billion. And within here, you have, this is like one company, for instance, trying to cure cancer. Instead of all working together globally in a globally connected marketplace, each company does work by and in of itself. So what we developed on Ethereum is something called an IP NFT. It's a combination of a legal contract, a smart contract, and then you encrypt all of the secret sauce, the private data, on a combination of Arweave and Filecoin. This mechanism to date has funded over 2,000 ETH in research across the world. This, for instance, is the research portfolio of VitaDAO, one of the leading DeSci DAOs that exist today. So there's an ecosystem now of DeSci DAOs that are all actively So there's an ecosystem now of DeSci DAOs that are all actively using this framework to fund research on Ethereum. This, for instance, is with University of Copenhagen, University of Oslo. We have one project with Newcastle University. If any of you are into longevity research, the project that we funded at Newcastle is about autophagy activators. And these new drugs that are being developed that are 100% now owned by DAO are actually proven to be two to three times more effective than rapamycin. Rapamycin being the other leading longevity drug. This is 100% owned on Ethereum now. This happens, as I said, through an IP NFT. There's a further governance framework that we developed, which is called IPTs, which essentially make individual research projects now tradable. I'll go through a little case study. Sorry, it's very fast-paced. It's a lot of ground to cover for five minutes. So this project was funded for $90,000 in May 2023. It is about longevity gene therapy. It was then tokenized. This is how the IP NFT kind of looks on chain. And then here you can kind of see a public tokenization event where now members of VitaDAO can publicly contribute to funding this. Then a DEX Uniswap liquidity pool was launched. And what happens now is you have a live market that becomes really hungry for data, which typically doesn't happen. So all of the market participants at TokenHole are constantly in touch with the researcher, trying to find out how the research is going. So since then, he's released a whole bunch of data. This is a biotech developer who's also new to crypto. So we're bringing an entire new user base really into crypto. So since 2023, we've now built over seven of these different biodas that have in total sourced over 2,000 projects and essentially moved over 20 million of science on-chain. We have one focused on psychedelic research, for instance, another one focused on hair loss, cryogenics,", "eventId": "devcon-7", - "slot_start": 1731659400000, - "slot_end": 1731660000000, + "slot_start": 1731658800000, + "slot_end": 1731659400000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1RHyT56CbMgegV6NNemWv9dElkUsDZAzoG2HGnU3oSVo" + "resources_presentation": "https://docs.google.com/presentation/d/1GqW9KTYWAB1IHrlktGM0ntHgWK-2umJNkyohBO811gU", + "resources_slides": "https://drive.google.com/file/d/1jFZuZ2WeWVYNPgGIF0en2wHSrVjdt5_U/view", + "speakers": [ + "paul-kohlhaas" + ] }, "vector": [ 0, @@ -229256,10 +228644,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -229269,6 +228653,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -229457,7 +228842,7 @@ 0, 0, 2, - 0, + 2, 0, 0, 0, @@ -229755,6 +229140,7 @@ 2, 0, 0, + 2, 0, 0, 0, @@ -229765,54 +229151,55 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0 ] }, { "session": { - "id": "desci-on-trial-two-years-2000-eth-11-projects-2bn-data-points-on-ethereum-has-desci-advanced-science", - "sourceId": "MZ3RLT", - "title": "DeSci on Trial: Two Years, 2000 ETH, 11 Projects, 2bn data points on Ethereum - has DeSci advanced science?", - "description": "Two years, 11 projects, $5M in funding for on chain science - what has DeSci on Ethereum really achieved? We'll critically examine key projects like Copenhagen University's longevity research and Newcastle's autophagy activation, assessing scientific rigor, web3 benefits, and real-world impact. Join us for an honest look at DeSci's promises vs. realities, featuring a live researcher update and helping shape a governance proposal on one of the presented projects.", - "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Research", + "id": "designing-an-end-to-end-solution-for-based-preconfirmations", + "sourceId": "CRWBCC", + "title": "Designing an End to End Solution for Based Preconfirmations", + "description": "This workshop provides the audience with a foundation for building an end-to-end solution to deliver fast preconfirmation of transactions on a based-rollup like Taiko. In addition to understanding the basics of based sequencing and preconfirmations, attendees will learn about settling these preconfirmations as an Eigenlayer AVS, designing the AVS client, syncing L2 state using preconfirmed blocks, preconfer election, and managing a proposer lookahead using Beacon state within smart contracts.", + "track": "Layer 2", + "type": "Workshop", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Permissionless", - "Use Cases", - "DeSci", - "impact", - "DeSci", - "Permissionless", - "Use Cases" + "Layer 2s", + "Rollups", + "User Experience", + "sequencer", + "based", + "Layer 2s", + "Rollups", + "User Experience" ], "keywords": [ - "Impact" + "Preconfirmations", + "Based Rollups", + "Based Sequencing" ], - "duration": 699, + "duration": 5149, "language": "en", - "sources_swarmHash": "665be7a46aec53f3558e341d98fa338cd77f6f9dbffb7880f5ab1880683bdffd", - "sources_youtubeId": "8UReCWNKbLY", + "sources_swarmHash": "cf9df3cae1b815b47b992c112df3a3d160808224cc703fbcd2cf37543590dbc6", + "sources_youtubeId": "70xIIrGXDSo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67374b1f1b0f83434da8beb9", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67374b1f1b0f83434da8beb9.vtt", - "transcript_text": " Welcome to the last day of DEF CON. Really appreciate you all still coming here after such a long and incredible week. This is actually my fifth DEF CON, so I'm really excited to be here. I have the honor to speak today. It's the third time I'm speaking, and it's just really incredible to see how the space has advanced. So I'm here today to talk to you about DeSci. Scientists today, roughly, at least the scientists that we spend, spend 80% of their time applying for funding and 20% doing the science. Academia, scientific funding, and even the biotech VC ecosystem is really broken in the sense of like how we distribute and access and fund innovation. Imagine if software developers had to spend 80% of their time getting funding to write a piece of code and only were coding 20%. Like that would take us back into the 80s and 90s. And that's essentially where our scientific innovation ecosystem is stuck. So we have the fundamental hypothesis that most of the world's potential scientific talent actually today remains untapped. We live in a time of science friction, not of science fiction. Those super cool, solopunk-like cities, we're only going to get there if we actually upgrade the way that we do innovation. So this is a Nature report that recently came out. A scientist that treated her own cancer with viruses. She grew in a lab. And she treated herself with this. And the response was that this under the radar stigmatized an ethically fraught practice. Essentially, scientists shouldn't be doing this. And there's a really cool comment. Scientists are science, cures cancer. The science, no, no, you can't do that. And then you wonder why scientists are fleeing academia. So de-sign, decentralized science, is ultimately really about making science permissionless. In the same way that we have permissionless money, we should have permissionless science. This is the typical innovation and drug development cycle that happens within a firm, this process today takes up to 10 years and costs up to $2 billion. And within here, you have, this is like one company, for instance, trying to cure cancer. Instead of all working together globally in a globally connected marketplace, each company does work by and in of itself. So what we developed on Ethereum is something called an IP NFT. It's a combination of a legal contract, a smart contract, and then you encrypt all of the secret sauce, the private data, on a combination of Arweave and Filecoin. This mechanism to date has funded over 2,000 ETH in research across the world. This, for instance, is the research portfolio of VitaDAO, one of the leading DeSci DAOs that exist today. So there's an ecosystem now of DeSci DAOs that are all actively So there's an ecosystem now of DeSci DAOs that are all actively using this framework to fund research on Ethereum. This, for instance, is with University of Copenhagen, University of Oslo. We have one project with Newcastle University. If any of you are into longevity research, the project that we funded at Newcastle is about autophagy activators. And these new drugs that are being developed that are 100% now owned by DAO are actually proven to be two to three times more effective than rapamycin. Rapamycin being the other leading longevity drug. This is 100% owned on Ethereum now. This happens, as I said, through an IP NFT. There's a further governance framework that we developed, which is called IPTs, which essentially make individual research projects now tradable. I'll go through a little case study. Sorry, it's very fast-paced. It's a lot of ground to cover for five minutes. So this project was funded for $90,000 in May 2023. It is about longevity gene therapy. It was then tokenized. This is how the IP NFT kind of looks on chain. And then here you can kind of see a public tokenization event where now members of VitaDAO can publicly contribute to funding this. Then a DEX Uniswap liquidity pool was launched. And what happens now is you have a live market that becomes really hungry for data, which typically doesn't happen. So all of the market participants at TokenHole are constantly in touch with the researcher, trying to find out how the research is going. So since then, he's released a whole bunch of data. This is a biotech developer who's also new to crypto. So we're bringing an entire new user base really into crypto. So since 2023, we've now built over seven of these different biodas that have in total sourced over 2,000 projects and essentially moved over 20 million of science on-chain. We have one focused on psychedelic research, for instance, another one focused on hair loss, cryogenics,", + "sources_streamethId": "673869271b0f83434debead4", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673869271b0f83434debead4.vtt", + "transcript_text": " Thank you. Kind of. I mean, it's fine, but since we're here. Okay. Yeah, very good afternoon to all of you present here. So, yeah, we have reached the final day of DEF CON. We have had, like, ten different talks on pre-confirmations, three full-scale events, and the only question that I've been getting is, how do you make all of this? Like, fast transaction confirmations, yeah, we get it, but how do you make this? So yeah, now on the last day, welcome to the workshop on designing an end-to-end system for Bayes pre-conformations. A quick introduction of the speakers. Ahmed, go ahead. So my name is Ahmed Bitar. I work as an Ethereum core developer. Normally, I'm also the product manager of Surge and the technical lead for our pre-conformation solution. And I'm Anshu. I am working as a blockchain engineer at Nethermind. And I joined this industry like five years ago, mostly focused on DeFi. But for the past six months, my efforts have been mostly concentrated on base reconfirmations. So today this session is a no-code session. So I won't be like writing a line of code and then asking you to copy it and repeat it like 100 times over. I know most of you will leave in like half an hour if I do that. So instead, this is a session on design thinking, where we'll build the whole concept from ground up and share what exactly we did during our research over the past six months. But if you are interested in looking at the code, then I have linked it below. It's taikoprecomp-avs-repository, which you can find on the Nettermine GitHub page. So since we want to build it all up from ground up, we have to start with the foundational knowledge, which is rollups. Because the base in base pre-confirmations comes from base rollups. Just by show of hands, how many of you do understand what a roll-up is? Okay, pretty much everyone. That's great. Well, roll-up is a scaling solution. And why do we need a scaling solution? We need a scaling solution because the L1 is really slow. The throughput of L1 is not much. And why exactly is that the case? Well, in a blockchain, blocks are basically a consensus on state transition. That's the first section of the Ethereum white paper. You have state A, you put a bunch of transactions and apply it to state A, which is like a delta, and you get a state B. But now imagine millions of nodes doing this every single epoch. Now that's going to make this network really slow, and that's a major problem. So what's the roll-up way of doing it? Well, if processing state transitions is the biggest issue on the L1, what if we process these state transitions off-chain or apply these deltas off-chain? And that's exactly what a roll-up does. You can have one single roll-up node, or an L2 node, since it becomes a layer 2, that applies this delta off-chain, processes the state transition, and the L1 is just an observer. On the L1, you have a roll-up inbox contract, and you simply push the state transition and the delta, maybe as a form of a blob, or in the call data. In the image, it's a blob. And the L1 is just an observer. And in the most basic form, this is what a rollup is. But is this enough? Well, no. Because if it's just one node pushing the transition and the delta, how do we know whether it's actually correct or not? And this is where the flavors of the rollups come in, optimistic and ZK. In the case of an optimistic rollup, well, you push the state transition and the delta, and then you just wait. You just wait for someone to prove you incorrect. If you're correct, it's all good. But someone can just come by and say, oh, hey, it's a proof, here's a proof. This transition that you posted is not possible with the delta that you posted. And then you kind of get slashed if you have some stake in. Depends on what kind of process the rollup wants to do. And Arbitrum is an example of an optimistic rollup. I guess most of you must have used Arbitrum. And then we have ZK rollups. In the case of ZK rollups, instead of waiting on for someone to prove you incorrect, the moment you push the transition and the delta, you have to prove that this is correct. So it's called a validity proof. You're basically proving the validity of the transition. But the point here is that whatever time or computational effort it takes to verify this validity proof must be less than what it takes for the L1 itself to process all the data. Only then it actually makes sense, right? So these are the two variants for proving whether a transition is correct or not. Once again, by show of hands, how many of you have heard of centralized sequencing? Okay. It's the most cursed concept right now. And just to give a quick primer, when you're making a transaction on Arbitrum, you're not directly sending the transaction to a public mempool, like when you're doing when you're transacting on Ethereum L1. Instead, it's going to an Arbitrum sequencer. And it's a private server. What the sequencer does is it has complete control over arranging or ordering the transactions. And they usually promise that they will arrange the transaction in a certain way. We don't know whether they're actually doing it or not. But in the case of Arbitrum, the promise is that it's a first come, first serve. If your transaction comes before that other person, we'll put your transaction first in the block. But once again, it's a promise. We don't know whether that's actually happening or not. So let's get based and talk about based rollups and pre-confirmations. Well, the based part is actually not a different variant of a rollup, so it doesn't stand beside our optimistic or ZK rollup. Instead, based is a form of sequencing, just like we have centralized sequencing, we have base sequencing. And in the case of base sequencing, the L1 proposer is the sequencer. You don't have a centralized server sequencing the transactions. Instead, it's the L1 proposer. Let's say we have Tyco. Tyco is a base rollup. And in the case of Tyco, the L1 proposer literally runs a Tyco software alongside the usual consensus and execution clients. And whenever their block comes in, they literally just pull the L2 transactions from a public Tyco mempool, they order it in their L1 block and put it on the network. So yeah, you basically are inheriting the L1 security as well as the L1 liveness, because the L1 itself is your sequencer. So just a quick overview of how Tyco actually is arranged. Well, you have the rollups inbox contract that I have been talking about. And although Tyco doesn't really call it rollup inbox, this is the colloquial term. Tyco has like a bunch of contracts that work together and they just call it Tyco L1 contract. So that's the L1 component. Now, if you're running an Ethereum validator, you usually run an execution client and a consensus client, right? So in a similar way, if you want to run or be a part of an L2 network, the proposer has to run an L2 execution client and an L2 consensus client. So in this case, TycoGet is the execution client, which is a modification of the standard Go Ethereum. And this is where you have the mempool, you have the actual L2 chain, all the blocks are formalized, and you also have the EVM on the L2 network. And then we have the Tyco client, which has some subcomponents. And this forms the consensus client, and it deals with proving the blocks and proposing the blocks whenever required. Now today, the most important thing for us are block proposals, because that's where the whole concept of pre-conformations will be built upon. So how exactly block proposals work in taiko or in a base roll up so well when you're making a transaction using any kind of standard wallet it goes to the mempool or the public mempool which is offered by taiko geth and every few seconds or depending upon what algorithm the proposer is using, transactions are fetched from the mempool. And this transaction batch forms the actual delta. And when you're calling the grow up inbox contract, you're basically calling a function, which is proposed block, and you're just passing this delta along. Now you might be wondering, well, that's the delta, fine. Where's the transition? So in Tyco, it's a two-step process. The delta is gone, and now we have the Tyco prover, which comes in later on and just says, okay, remember that delta that was pushed a few seconds ago or a few minutes ago? You see, this is the transition that that delta causes. And since Tyco is a ZK rollup, it pushes a concise proof along with it, like, okay, this is the transition, and this is the proof that the delta caused this transition. Now the most important aspect, Tyco driver. So once these L2 blocks are put on L1, there is a block proposed event that is released, which shows that, hey, okay, this L2 block was put in the L1 contract. And whenever that block proposed event is released, Tyco driver listens to it and advances the head in get. Advancing the head basically means you're formalizing and putting an L2 block within the L2 network. And this is when the wallets end up getting the transaction receipt or the confirmation. So you see, the catch here is that Tyco driver only receives the block proposed event every 12 seconds. That means when you're making a transaction on Tyco, you get a transaction receipt after 12 seconds, which is huge. That's not ideal for a rollup or a scaling solution. And that's very evident from the Dune analytics graph of Tycho's block times. It's average between 12 to 24 seconds, which makes sense. It's inheriting L1 block time. That's not ideal at all. So we need pre-conformations. And pre-conformations is not something new. We have already had it on Arbitrum for a long while and several other roll-ups. Again, show of hands, how many of you have transacted on Arbitrum? And when you transact on Arbitrum, you basically get a transaction receipt in a very small period of time, like half a second, one second, sometimes two seconds. But the point here is that Arbitrum only posts blocks every two minutes on L1. So how are you getting this transaction receipt immediately? It's because that's a pre-confirmation. They're giving you a promise that, hey, see, this is the receipt, and we will be putting this on the L1 eventually. In Tyco, you have to wait for it to be put on the L1. But, well, this gives better UX for Arbitrum and bad UX to Tyco. Now what if we want to put pre-conformations on Tyco? It's really tricky. Because in the case of Arbitrum, we have one server, literally one server in one corner of the world, just running, ordering transactions, providing pre-confirmations. Easy. In the case of based rollups, the sequencer is changing every single slot. The proposer changes every single slot. So, well, the sequencer changes every single slot. And then, these days, we don't have the proposer building the blocks. The blocks are actually built via a PBS pipeline, like MEV Boost by Flashbots, where builders build the blocks for you as the proposer, and then you just propose the block, whichever builder gives you the highest bid, you just take their block and propose it. You don't even have any control over what's in the block. So how exactly will you put in base preconformations with three layers of complexity? Well, we have done it and let's start with the design principles. Maybe a quick round of questions if anyone has any questions. No? Okay. Take it over. Alright. Great, thank you. Okay, so the first thing we wanted to do was we wanted to not introduce centralization again. And the way we wanted... The idea here is that if we wanted to introduce centralization, we wouldn't have built it as a base roll-up in the first place. Then how are we going to solve this complex problem? So let me explain how gateways work first. So gateways are basically centralized servers that expose an RPC for the user to be able to provide them that pre-conf. The user sends the transaction to the RPC, and then it selects which transactions it wants to pre-conf, and then it proposes that block to L1. Of course, the confirmation receipt that goes to the user is given in a matter of milliseconds, between 100 and 200 milliseconds, which is very fast. Of course, the UX is very cool, but the compromise is very high. Now, also, there is another concept here in pre-confirmation that is important, which is not all validator has signed up to become pre-confers. And because of this, you sometimes have some validators who have decided to register as a pre-confer and some others that haven't registered. So the gateway can provide pre-confirmation for users, and it not necessarily will push a block, a proposed block for the L2 on this slot, but it could potentially push it here. And the way to do this will be explained in a later stage when we talk about forced inclusion lists. All right. The other thing that we wanted to focus on, which was important for us, is that we wanted to use the existing transaction structure, the existing wallet. We didn't want to invent a new complexity to the already existing situation when you are sending transactions through wallets. So some suggestions in the pre-confing space were like, oh, we should potentially put an inclusion pre-conf fee premium. So like what are you going to pay the pre-confer so he can provide you with this fast service? And the base fee per gas for that as well. Or, for example, an execution pre-conf, yeah, it's basically very similar to each other. They're all the same. So what we decided is, no, we're going to choose something that is basic. So we're going to choose the same exact EIP 1559 fields which is the priority fee. So the priority fee pays for the pre-confirmation, the proposing and the proving of that transaction. And the user does not have to worry about all of these complex things, other things. Okay. It's not moving. Okay. Alright. So I'll start now explaining what we designed. So So, in Tyco, like I explained, we have the, in Tyco, we have that Tyco client and we also have Tyco GAT. In Tyco also we have, before these, before we come to these, we have the contract that receives the block proposals. And so what we did is we added something we call a pre-conferring node. And that sits between the proposer and the contracts. And we added some contracts. One contract is the pre-conformation service contract that basically receives the blocks that are coming from the pre-confer. And also, we added a re-staking contract that will basically allow the proposers to register as a pre-confer. And by this, these proposers, whichever they are, any validator, can basically just run this set of software as a sidecar to whatever they're running for the validator. So alongside L1, you're just running these three Docker images three docker images, docker containers, and then you are able to pre-conf transactions when your slot is up. So how does this exactly work? So we have a loop that happens every three seconds. When you are chosen as the pre-confer for the upcoming slot, what happens is that you as a pre-conferring node will fetch the transactions every three seconds from the Tyco proposer, which will basically fetch them from Tyco get. The user will have sent this transaction to the mempool, so there is no centralizing aspect here. You don't have to connect to a specific endpoint to send the transaction. And every three seconds, the pre-confer will sign this batch of transactions that it has received from the Taiko proposer, and then broadcast it to other pre-confers through P2P. Oops, okay. Okay, permissionless, okay. So here we look at, okay, so how do we choose which of the registered pre-confers are going to be used or have the right to propose these blocks? And this is when we use the look-ahead that is provided by the consensus layer to know which exactly is the proposer that has been registered in the upcoming 64 or like in this epoch and in the epoch after. So in consensus layer in beacon chain you can query the CL client for the existing epoch, the current epoch, which proposer, which validators have the right to propose and for the existing epoch, the current epoch, which proposer, which validators have the right to propose and for the upcoming epoch. So it provides you with a list, 32 long list of after you specify the epoch that you want. And so basically what we did is that we made the preconf node fetch this list from the CL client and push it to the preconfirmation service. And this way we know, for example, that since this proposer is not registered, this proposer is not registered, we know that this proposer is registered, so then we choose this proposer to be the one who is pre-confining those blocks. This thing doesn't always work. Okay. So then there is like, with any system, you have to have incentive to act correctly. Some systems depend on only rewarding good behavior. Some systems depend on punishing bad behavior. Most systems, or a lot of systems, depend on doing both. So in this case, the pre-confer gets the pre-confirmation fees and the proposing fees. In return, he needs to pre-conf and provide good information and honor the pre-confirmations that he gave to the users. The way we check that is that in the case, the pre-conred node reveals the signed malicious pre-conformation. The way it's done is that, so let's say I pre-confed a batch of transaction, and then I didn't end up pushing this batch of transactions on chain. But I have already broadcasted that I have pre-conf this batch of transaction with my signature. So what would happen is another pre-confer that was listening on P2P would then pick this signature with the batch of transactions and push that to the pre-confirmation service contract. The pre-confirmation service contract would check if this is a valid signature for the pre-confer of the slot and if it is then it will go and ask the restaking or staking contract to slash the signer. And in this case, yeah, there is also the slashing for the incorrect look ahead because you fished the slashing for the incorrect look ahead because you fish the look ahead from the CL but how does the EL know that this look ahead is correct? You can use 4788 EIP which basically provides you with the CL beacon route and using that you can push a proof if the look ahead is incorrect and the pre-confer who sent an incorrect look ahead would be slashed. Okay. So before I move on if you guys have any questions it's good to have them now. So yeah please go ahead. Before I move on, if you guys have any questions, it's good to have them now. So yeah, please go ahead. I have two questions. First one is, based on, I'm not familiar with 4788, but could you not, like, can it only prove that you can look at it as a law? Yeah, so the thing about 4788 is that it provides you with the beacon root of the parent block and not the current block. And because of this, when the look ahead is pushed, I cannot in that moment verify its correctness. But after one single block, I can, with the proof, using 4788, make sure that if it's not correct, that it is not. So this is why we opted for this. In the case that it was possible to verify, we would have potentially opted for pushing the proof with the look ahead. The problem with that normally entails that you would pay more because the proof needs to be verified on chain in every single submission. Whereas if you do it in a fraud proof manner, only when someone's acting maliciously that you need to do this. So the cost drops dramatically. And there is precautions to this. So whoever is submitting the look ahead will get rewarded submitting the proof, the fraud proof for the look ahead will get rewarded for this by the slashing of the other pre-confer. Other questions? Yeah. Okay. Let me try to make this work somehow. Okay. Work please. Okay. No. Okay. Yeah. Over here. Sure. I mean, here. This one is now opted in and this one is . Yeah. . Okay. So this is probably not mentioned in this presentation, which is a problem. Hey, sorry. Basically, based rollups depend on giving the user the ability to always push transactions on the L1 permissionlessly. And that means that there is no one who could prevent the user from pushing a transaction that could invalidate future transactions that have been pre-confed that will be pushed later that have been pre-confed. And this leads to execution pre-confs not being able to be provided. And this leads to execution preconf not being able to be provided. And the only solution that we have to that is delayed inbox. So preconfers are able to push directly to the Tyco smart contract to propose blocks whereas normal users who don't want to use the preconfing system that is built and want to push directly to avoid potentially censorship by the pre-confer, even though this is a decentralized solution, it could potentially have some type of censorship. So if they push their transaction to this delayed inbox, what happens is that we wait for the pre-confirmations to land and then we include those transactions that have been pushed by the user. So in this instance, just to be clear, any transactions that come in this slot will go to the queue and will not be proposed. No, it will not be denied. It will go to the queue. Yeah, delayed. Yes. It will be delayed after the pre-confer has pushed his pre-confirmation, and then it will be included. What if my pre-confirmation ended on L1 state, which is like a really nice component of basic L1s? Yeah, no. I could expect a pre-confirmation with interacting with L1. Yes. So if we engage composability in here, then it could, yes, any L1 state change could potentially affect this L2 transaction that depends on it. And this is not a problem that the solution is trying to solve, unfortunately. Yeah, I don't think, like, as of now, there is, potentially, there's some people working on this, but I'm not aware of any solution to this particular problem. This is a very good question. Thank you, sir. Any other questions? Yeah, go ahead. Does this require at least one recall first? Yeah, so I think this might be talked about in a later stage, so I'm not going to touch on it more. But there is a way for choosing a random pre-confer in the case that there isn't a pre-confer available for the next 64 slots. Any other questions? Yeah, go ahead. The gateway. Okay. So the gateway basically buys the right from the proposers that have registered at Pconfer to propose at a certain slot. I don't think so. No, they can have varying tips between one gateway to another. The problem is that with gateways is that the gateways will have to compete. In the beginning, we might have a couple of gateways out there. The problem with that is that people normally end up converging into a couple of gateways that then would have a monopoly. So like two or one gateway that would then be dominating. Because the idea here is that if a gateway cannot secure validators or proposers that are willing to sell it, to sell the gateway, their right to propose these Tyco transactions, the gateway cannot operate. Do they have a business where pre-confirmations are built on other pre-confirmations? gateway cannot operate. . Yeah. So you do. And that's why we have the P2P. I'll explain that in this slide. So when we said here, sorry, this thing is not, okay, yeah, so when we said that the preconf node every three seconds batches a transaction, takes a batch of transaction and signs it, and then it broadcasts the signed preconfirmation preconf blocks on P2P. This is important because anyone who's listening can do a bunch of things. First, they can advance the head of the TychoGath so people can keep up with these pre-confirmation. Are you moving to a safe head there? Sorry? Is it moving just to a safe head? It's not a safe head, no. It's not even a safe head at this point because it's basically a soft head, No, it's not even a safe head at this point because it's basically a soft head, very soft head. Because as long as it hasn't been proposed on L1, it's not fully safe. I mean, there is financial precautions to the pre-confer if he doesn't obey or honor these pre-confirmations, but software can always fail or they could potentially have other incentives. So it's a very soft head, but at least you get consistent block times, which is a better user experience than what you get right now. I mean, Anshu said that transactions normally take 12 seconds, and on Tyco, sometimes they take 20 seconds. So there is some kind of extra delay. It's a very long time. Any other questions before moving on? Sorry, I just wanted to understand the consensus at the P2P layer that you were describing. What do you mean, the consensus? So my original question was, if you have pre-confirmations being built upon by... Sorry, new pre-confirmations being built upon by, sorry, new pre-confirmations being built upon the state of previous pre-confirmations, how does that, like consensus is needed there between two different proposals? So the pre-confer that comes, so let's say that, okay, let's say that all of these are pre-confers. And this one is a pre-confer and has been pushing, it pushed basically four blocks. It pre-conf first one, second one, third one, fourth one, and then it basically just went and pushed it to L1. This one would be listening to these pre-confirmation batches on the P2P and would receive them. And it would just wait for a confirmation that they have landed on L1, and then it would start building on top of that. Would the middle one be accepting pre-confirmations before it's been posted to L1? NOAM CHOMSKY- No. Because since it's pre-confirmation, depends on the state of what happens in that chain here. So it can't do that. Or it will collect, I mean, it can, but it will collect a bunch of transactions that has been already included, for example. So it would potentially lose that space on the L1 side. And also, if it does not, if it sees, for example, pre-confs, a bunch of transactions that then already has landed, the execution pre-conf is not honored, that doesn't mean that it will be slashed. It won't be slashed in the system because the inclusion preconf is still there. But it will lose that. And if it does not include them, it will be slashed. So in both cases, it's kind of losing something. Of course, non-inclusion makes it lose more. So it would include them even though they're already processed and will be invalid if they're included again, but at least it won't be slashed. Yeah, so this is the consensus. I mean, this is a problem with also varying types of pre-confs because you can always... So in the system right now we're trying to build with pre-confirmations is that we're trying to avoid a singular solution to dominate. So there is multiple solutions. So there is the gateway, there is what we're building, and there is multiple ones. So if each one kind of needs to build on the other one, it needs to wait for the state of L1 to be updated to start building on top of it. Additionally to that, if a user pushes a transaction directly to L1 without pushing it through a pre-confer, then that also changes the state. So it doesn't make any sense at all to start building pre-confirmations before knowing the exact state on the L1. Thank you. Okay. Let's hope this marker starts working. Okay. Yeah, so we talked about slashing. Now, so as we said, the pre-conference needs to include and propose these batches of transactions on the L1. But if this validator is selling his right to build the block through PBS to a builder, it cannot accomplish that because it has no control over the content of that block. And the solution we found to that is that we would modify the EPBS pipeline to accomplish this exact goal. So the preconf node would go and tell, basically, the MEV boost that there is a constraint on the builder and that the builder needs to include a bunch of L2 blocks, which are basically a couple of L1 transactions, or it could be one L1 transaction and their blobs. And it should include them. And that's why the PBS relayer would return with, okay, yeah, I can honor that. And they build a block including these transactions at the end. And we propose the L1 block. This is the way that... And currently we're using Bolt Smith Boost because they have that constraint API already implemented. So we didn't want to implement, re-implement the wheel. But of course, there is also CommitBoost, which can have that Bolt MIFBoost module built on it. And you can have it. For more details on those, please search them on Google, CommitBoost and Bolt MIFBoost. Both of them, they have open source GitHub repositories that you can look at. Pre-conference selection, I think Anshu will take over here. Thank you, sir. Thank you. Do you want the mic? Yeah, just this one, yeah. Okay, so now you have a good idea of what the overall design looks like. And let's talk about pre-conferred selection, because we did speak about how we want the L1 proposer to be the only one who can pre-conf and then propose the L2 blocks in a particular slot. Now, this is actually a very hard problem, even though it doesn't look like, because at Ethereum, we love patching things up. And in the process of patching things up, we develop new problems. So when ETH moved from POW to POS, what happened was we introduced a new layer, the consensus layer, besides the existing execution. So earlier it was merged into like one single thing and now we have it separate. We have a consensus layer which manages the POS part and we have the execution layer where, which is what we developers usually handle when we are deploying smart contracts or interacting with Ethereum using a wallet. The problem is the consensus layer is where the proposer's identity lives. And that has a BLS signature scheme. But the execution layer where all the inbox contracts are, where all the transactions are made, that has an ECDSA signature scheme. And that's a big problem. How do we make a connection between both of these? There's no way to make a connection. So what we need is we need a BLS to ECDSA mapping. So let's say I'm an entity, I have an ECDSA address, and I run a thousand validators with thousand different BLS public keys. I need a way that I can prove that I own those thousand validators. I can show it to the registry contract. So we have the pre-confirmation service contracts. I actually have three subcontracts, the pre-confirmation registry, which we'll be dealing with in this slide, and two other contracts that we'll be taking up later on. So this entity needs to prove that, hey, I own these validators, and I actually have the right to propose an L2 block in a particular slot. And how exactly do you prove ownership of a key through signatures, right? So we have this signature format here, which you can see it basically has the standard thing like having a chain ID, and then validator op is basically either 0 or 1. If it's 0, you're removing a validator from your list, 1 you're adding. And then there is an expiry and the actual pre-confer. So in here, the pre-confer in the signature message is the ECDSA address that I'm claiming. That is claiming the ownership of a BLS address. So the ECDSA address just pushes a signature, and the contract just verifies that the signature is correct and this BLS public key actually belongs to this ECDS key and inserts it into a simple map. Now the execution layer has no native way of verifying BLS signatures right now, but very soon in the next upgrade, the Pectra upgrade, a new precompile is being added via EIP-2537. That's where all the discussion has happened. And this precompile, or a set of precompiles, three precompiles actually, will help us verify BLS signatures. Now, these are really expensive because for verifying one signature, we need to spend like 300K units of gas. That's really expensive. So in our next POC, we are actually proposing an alternative where in the case of BLS, there's a great feature, and that's aggregation. So if you have 1,000 validators, what you can do is you can have 1,000 signatures off-chain, then add all of these signatures up. It's basically elliptic curve addition. You add these signatures up, and then on the contract, you just have to verify one signature. So essentially, you can add, like you can put thousands of validators in your registry via just one signature and just a bit more than 300k gas, which is amazing. And this is what we will potentially be putting in the next version. But yeah, right now it's one-to-one, single address, single signature every single time. So how is this used to construct the lookahead? Because the BLS lookahead is absolutely useless on the consensus layer. We need an EC-DSA lookahead because the BLS lookahead is absolutely useless on the consensus layer. We need an ECDSA lookahead on the execution layer. So every single time we know that only this ECDSA is supposed to propose. No other ECDSA can propose an L2 block. So in this case, it's kind of simple. The preconf node has the logic. The preconf node can take a look at the consensus layer because the preconf node has the view. The preconf node can take a look at the consensus layer, because the preconf node has the view of both the execution layer and the consensus layer. So the preconf node pulls all the proposers from the consensus layer for the next epoch. Then it fetches the associated ECDSA key from the preconfirmation registry, because we have the BLS to ECDSA mapping there and it just matches it. This BLS for the next slot belongs to this ECDSA. This one belongs to this and it creates the entire lookahead. Now in our design, we have assigned the duty to push the lookahead, of pushing the lookahead to the first pre-confer of every epoch ahead to the first pre-confer of every epoch so the first pre-confer of current epoch will be pushing the look ahead for the next epoch and they are basically bounded by this duty they must do it if they well it kind of there's no option of not doing it because the contract expects you to provide that and this is what a simple look-ahead, like one of the nodes in the look-ahead array or mapping looks like. I'll get to what the look-ahead, what data structure we actually use. But in here, the timestamp, the second field and the fourth field makes sense. The timestamp is the timestamp of the slot, and the pre-confer is whoever is supposed to be pre-conferring in that slot or proposing an L2 block. We have another field, fallback, and previous time stamp. Now, what are these? Well, the previous time stamp is just a link to the last look-ahead nodes timestamp. What this allows us to do is arrange the look-ahead as a link list within the contract, or sort of a link list. So every look-ahead structure is an item in a map, and the previous timestamp just points to one of the other timestamps. What this allows us to do is have advanced proposals because not every proposer will be opting in. And also not every proposer will be registering and exposing an ECDSA address. Some are just not interested in pre-config, right? So in this case, we cannot just have an entire epoch be empty if there are no pre-confers or if there are very few pre-confers. We need to do something in the empty slots. And what we do is we allow the next chosen pre-confer to pre-conf in advance. And because of this link, we can do that with a simple check, a simple if condition. That's why in here you can see that P2, pre-confer 2, can pre-conf in the second and third slot already. And P3 can pre-conf in three slots in advance. That is made possible because of this linkless design. Finally, if we have an epoch where there are no preconfers at all, and that's very much possible if none of the proposers in that epoch has opted in, we don't have anyone as a pre-confer. Then we have to select someone randomly. And that's a very simple selection. Like, we need a source of randomness and apply simple modulus to select one of the indices of who is exactly going to be the pre-confer from the registry. And the source of randomness comes from the beacon root contract. So we, as far as as far as i remember we basically end up choosing the beacon root of the first block in the last epoch because this gives us a pre a deterministic idea of who's going to be the random pre-confer in the next epoch so we use that as a source of randomness and we just use that to pick out a pre-confer. And this fallback pre-confer has an advantage, and that is it can pre-conf in every single slot of this epoch because no one else is there to pre-conf. Yeah? Yeah? Sorry, I have a question. If the pre-confer is not in the epoch, doesn't that mean they're highly unlikely to be In six months? Well, but we won't be stopping the system for six months, right? Yeah, so... If can uh get in here I understand the question um so you're asking is since the pre-confer does not have the right to pre uh to propose a block in the next epoch for example and we chose it at random um how will it be able to honor these pre-confirmations? And the answer to this question is that it might not be able to honor the pre-confirmations. But it will not be slashed if it does not honor them in this case because it's a random picked pre-confer. And also, we thought about this like, OK, maybe it shouldn't be providing pre-confirmation, but this would be a very bad UX. So shouldn't be providing pre-confirmation, but this would be a very bad UX. So we'd provide the pre-confirmation, we would send them in the P2P on the mempool, and potentially someone will pick them up and include them if the fee is right. And this was like the mechanism that we wanted to do. So what Tyco is doing also is that they're using this fallback mechanism to say, okay, if there is a no-up-recon for a register, then the next epoch, then we are going to propose just to keep the liveness of the chain. And of course, this proposal will go to the mempool and someone will pick it up. But if someone intentionally is censoring these transactions and there is no mechanism to force the builders to include them, then they will potentially not be embedded in time. Yeah. Thank you, Emma. And one thing to note is that throughout this POC, we have never touched the Tyco contracts, although eventually we might to add that delayed inbox. But in here, we have tried to not mess around with the Tyco contract. And what our task manager does is it simply routes the blocks that are being proposed over to Tyco contract. So because of this, the prover architecture doesn't have to change. That's the best part. Nothing in the prover has to change and the proposer also needs very minor modifications because the original contract, original contracts of Tyco have barely been changed. Go ahead for the next step. Thank you. Thank you. Okay. So now we're going to go back to The pre confirmation loop that we discussed that every three Seconds we go in a bit more details. I hope this clicker Works. Okay. I'll just stand here. All right. So first we start with The normal event loop for tyco. So what tyco does is that once Every slot it pulls the pending transactions from the Mempool, tyco proposes and forms the block and then pushes it Through the blob to the roll up in box contract through that proposed block function. Very simple, straightforward. What we have with the preconf solution is the following. So we have the preconf node every three seconds requesting from the Tyco proposal a batch of transaction. Tyco proposal goes and fetches those batch of transactions from Tyco get and forms a batch, gives that batch to the preconf node and the preconf node then goes and then um uh pushes this transaction to the P2P like we said the batch of transactions to the P2P and of course this also only happens after it made sure that it is the pre-confer for this specific uh for the for this slot and or an upcoming slot that is very near. So this is the new loop. So what do we sign when we provide the pre-confirmations through P2P? What's the signature? So the structure that we use is we use the block ID because we need to commit to a specific block height or that pre-confer could potentially sandwich transaction if it wanted to. If we don't commit to a specific block height that it needs to propose this batch at. Also the chain ID in case we have multiple chains and transaction list hash which is basically just a hash of the RLP encoded transaction list. I see there are some hands for questions. Yeah, go ahead. You have the mic? It's fine. Just raise your hand. Yeah, so three seconds was just a conservative random number that we chose at this point because we weren't Yeah, so three seconds was like just a conservative random Number that we chose at this point because we weren't sure How fast the system would be, the latencies, all of that. What we're going to be working on later, as we will see in one Of the slides, is like getting that number lower and seeing how It's going to behave and how it will work. We will talk about that in later slides Yeah So the pre-confirmation structure We saw it And then there is the pre-confirmation object That is sent in P2P Because the pre- object that is sent in p2p. The pre confirmation structure is the one that can be pushed as a proof for fraud proof. But here we need to when we are pushing the preconf batch on p2p we need more details. That one is not enough. So we have the block height and the pending transaction list, the whole list and the pre confirmation structure enough. So we have the block height and the pending transaction list, the whole list, and the pending transaction bytes. I'm not entirely sure what that is for. And the proof of the preconf message. And this is what ends up being sent exactly in the P2P. And the reason we need the batch of transactions is because we need all the other pre-confers or all of the other participants in the network to be able to advance the head with this batch of transaction. And we also need them to be aware of this so when their role of pre-confing comes along they know that okay these transactions have been already pre-confed and potentially will be proposed in the next one and in the in the previous one block before we start pre-confing ourselves. So when when a preconf node receives the batch of transaction from The p2p once it has been preconfed, we currently the way It works in tyco before the preconf is that we get an event Which is called block proposed from the l1 and that goes tyco Driver is basically subscribed to this specific event and advances the head once it receives this event which means that the user will get a transaction receipt every 12 to 24 seconds. And this is the main point that pre-confirmation our pre-confirmation solution is trying to alleviate with this design. So now the pre-confirm node can, once it receives the pre-conf message with the pending transaction list, it can provide it to Tyco driver, and Tyco driver can basically advance the head of Tyco geth in around three seconds given that we choose a three second confirmation loop time. All right. So the non pre so there is the pre-config node so this is the previous slide was about the pre-config node. So this is the previous slide was about the pre-config node. So once it pre-confs the transaction list, it does this. And there is also the non-pre-config node receiving this message through P2P and then sending it to Tyco driver to advance the head. So the whole network advances together and not a single one of the nodes in the network. So we already kind of discussed this. So we have two ways that the pre-confer sends his transactions or the L1 transactions that will basically push the L2 transaction batches. So first we said, okay, let's say that we have a pre-conference slot number five. And this pre-conference is on duty for slot number one, two, three, four, and five. That means that they need to pre-conft a lot of batches in these slots, and then they can force include them in slot number 5. That's not a problem. That's easy to do by using the PBS software like we explained. The problem arises if there are sparse pre-conforts. There isn't a lot of pre-conforts, and that would mean that potentially one pre-confer could be in slot number 42 and he's responsible for slot uh i don't know five up until 42 and this would mean that he needs to push a lot of transactions in in his slot if we follow this model so instead we said okay maybe we should just push all the pre preconf transactions right away to the mempool. So we get batch A, batch B, and we just push them directly to the mempool. And at the same time, we put them in a queue, in a cache. And the idea of this cache is that once an L1 block comes and it has those batch transactions that has been pushed here in it, then we can remove this batch from the cache. If that doesn't happen, then that batch of L2 transactions stays in the cache. And then once it is, yeah, and so basically if it's included, we clear that batch from the cache. Okay. So on the proposal slot, we take the ones that hasn't been included, that we push to the mempool and hasn't been included yet, and we make sure that we push them through the constraints API of Bolt Smith Boost to be force-included by the builder. And then once we receive the L1 block, we have the guarantee from the relayer that these transactions, L1 transactions that contain the L2 transaction batches are included then in the L1 proposed L1 block. And she will talk about slashing and yeah. Okay. So now comes the pre confirmation service manager contract which we haven't spoken about yet. And this is a very interesting contract because what we have tried to achieve with this POC is a really flexible interfacing because we have a number of restaking solutions right now out there in the market. We have Eigenlehr, we have Karak. We might also have our own staking contract later on if a community decides on one, which is actually going on. A discussion is going on on having a unified registry. And that calls for having some kind of a middleware contract so that later on the core logic doesn't have to be changed and the staking is just abstracted away to a different set of contracts. And that's what we achieve with the service manager. It's essentially like a middleware that ensures that only those proposers who have a required stake are allowed to precomp or propose in the upcoming slots. So what kind of slashings do we have or when exactly do we slash? Well, one time that we do slash is when the lookahead being posted is incorrect or the lookahead posted is incorrect. A basic version of this is that when you have, like, let's say for a validator with BLS public key B5, in the lookahead mapping, sorry, in the BLS mapping, you have E4 as the associated individual who owns the B5 key, but the proposer who pushed the lookahead placed E2 as the associated ECDSA key, which is factually incorrect. Now, this might look like an easy way to just compare, hey, okay, both of these are not really equal, so why not we slash the person who posted a look-ahead? But it's kind of tricky to prove this inequality because we have B5 and E4 from the pre-confirmation registry, but in the task manager, we don't have access to who exactly was the validator and its associated BLS key for the current slot or any slot that is incorrect. We don't have that information in a simple way. And what we need to do is we basically need to have this kind of a match that, okay, B5 from the pre-confirmation registry and B5 in the current slot, both of them have different ECDSA keys. And then on the basis of that, we slash the poster of the lookahead. So, well, there is a way to do that. And I kind of have to make a correction on my previous statement that there is no connection between the consensus layer and execution layer. Well, there is, but it's kind of a complicated connection. And that's through the beacon block route, which I think Emma touched upon briefly. So you see, just like the consensus layer has blocks, sorry, the execution layer has blocks that we deal with, the consensus layer also has beacon blocks, because it's the beacon chain. And each beacon block has a state root. And if you unravel the entire state tree, then you'll realize that in beacon state, we have the validators field, which contains all the validators or the proposers. And that includes their unique index, as well as their BLS key. So what EIP-4788 proposes is that we get this eventual Merkle root. Like, if we Merkle-ize this entire beacon state and then eventually the beacon block, we eventually get a 132 byte long Merkle root. And we make this available within the execution layer. So the thing here is that there's a problem, and that's we only have access to historical roots. We don't have access to the current slots proposer because, well, the consensus layer is yet to create the block, so we can't really know who the proposer is. So we have to slash or prove that a lookahead is incorrect optimistically. yet to create the block, so we can't really know who the proposer is. So we have to slash or prove that a lookahead is incorrect optimistically. And the way we do it is we wait until the incorrect slot has passed, and we have the root. And once we have the root, we can basically make a static call to this contract, which is the beacon roots contract, and get that beacon root. And then just need to send two proofs so first the proposer or sorry the challenger posts the bls key of whoever was supposed to be the validator of that slot as well as their validator index and along with this it sends two proofs. First is the proof that in this case, if you see, we have a proposer index in the beacon block, but in the beacon state, well, we basically end up having BLS keys of the validators at respective indices. So via a Merkle proof, we have to make this connection that, hey, this proposer index actually belongs to this particular BLS key or BLS public key, and that's the step two. And then in the next step, the validator or once again, not the validator, the challenger needs to prove that this is the proposer index that is present in the beacon block, as you can see here, the second field. So very simply with two simple Merkle proofs, you are able to get whoever was the BLS key of whoever was the validator in the current slot or the slot that is being challenged. And well, once you get that, you can just go ahead and make, check if this inequality is actually satisfied. If it is, then well, the lookahead slot is incorrect and you can slash whoever posted it. Along with that, we have slashing of bad pre-confirmations, which I think was touched upon by Ahmed. But in this case, if we want to expand it, we have execution pre-comps that could be bad and we could also have bad inclusion pre-comps. In the case of execution pre-comps, well, the proposer did push the eventual block that he pre-confirmed, but he misordered the transactions or maybe inserted a new transaction that was not actually pre-confirmed. And in this case, what we end up having is that the transaction list hash is mismatched between the proposed block and the pre-confirmed block that was on the P2P. And in the case of inclusion precomps, that's a bit more simpler. We end up getting different proposers for the same block ID because it might have happened that, well, he pre-confirmed, but he never really proposed, and someone else ends up proposing in that particular block height eventually. So, yeah, these are the two kinds of slashings. I think that was quite a mouthful, so any questions? No? No. Yeah, so in this case, we actually have discovered a few issues. So another variant, a more simple variant of your issue would be, I pre-conf, and I don't include it, but then for a long period, no one actually includes any block. So what happens is a long time passes and the dispute period gets over. So even though there is no pre-confirmation, there is a pre-confirmation, but there is no associated block. Like the block ID has never really progressed on chain, and that's an issue. But you can't really prove the incorrectness of this. So we have a way of doing that, and that is, so we might have to go back to, so let's say this is like the slots, okay? So whenever a pre-confirmation is being made, we also include a look-ahead pointer, which basically points to which slot in the look ahead am I preconfing for. So in this way what we have to do is we have to make a connection between the preconfirmation and the proposed block via the look ahead. I think now that answers your question because if we have that look ahead pointer that basically states which look ahead slot have we made the pre confirmation for. And if it lands on at a different time on the L1, we can make the inequality. Okay, but is it possible that I pre confirm something and someone includes it because they are a proposer before me? Or that can happen because they would be pre-confirmers for that slot before, so I wouldn't be able to even include this as part of the pre-confirmations. I don't know if that makes sense. I mean, not really, because the proposer is, once again, I mean, this is advanced proposals. I mean, you just release it in the public mempool, and it might happen that the proposer does not include it, but then your slot will be coming in. So if you go back to this slide that Ahmed presented, in this case, when your slot comes in, you clear all those pending transactions that have never been included because the proposer didn't want to include. But your slot has now arrived. So you pick those transactions and you force include it via the inclusion list through the PBS pipeline. Yeah, the other answer to this question is that the previous pre-confer cannot, like, the previous slot, if you're talking about, if this slot is for pre-confer 2 to pre-confer at, the pre-confer 1 cannot then go and push transactions there. Every slot has an assigned pre-confer and only a specific pre-confer can push transactions or batches in those slots. So the contract will just ignore any batches that arrive from pre-conference that are not assigned to these specific slots. Okay. Perfect. Any more questions? No? I think we can touch upon the future work now. All right. So first we start with soft blocks. So currently what we do when we advance the head is that we push a whole full block to tyco geth and in this way what ends up happening is that it goes and lands in the canonical chain, and this is normal. And this is why those transactions are then removed from the mempool. So when you propose the next block, they don't get included twice. So that's one thing. The problem with this approach is that when we are proposing these blocks at the end on L1, we need to propose multiple blocks. So, for example, if I pre-confirm in every L1 slot around four batches, then that means that I have to make four proposals on L1. And this is costly. Proposers and verification of these blocks is costly. For a block proposal, it costs around 200,000 gas, right? Yeah, give or take. And around 400 for verifying the ZK proof for a single block, give or take as well. could be a bit more than 400 more closer to 500 so every block that we end up proposing is is is adding a lot of cost for proposal and verification so what did we end up with is a solution where when you preconf you're gonna add the batch to TycoGash, push it, and then when you push another batch, what ends up happening is that it gets added to the block instead of being, so it gets appended in the block. So what happens is that a new block is formed with the previous transactions that are in the block and the new transaction batch and the previous block gets reorged out and then we include the new block. And this keeps happening until the preconf node sends end of preconfirmation or and then this block becomes a standard block and gets pushed To L1 as a whole single proposal. And this gives us the cost saving that we're looking for, But it's still not finalized. It's something that we're going to hopefully work on the next Couple of months. Another thing, like our friend here asked about before, is the Slot time for the L2. So currently we have a constant three-second block time. And what we're looking for to accomplishing is one-second block time. And it's not that this is not possible currently. It just hasn't been tried yet. And we're not sure what limitations we're going to hit with the P2P, et cetera. Yeah. And then in the last one, I think you will talk about it. Yeah. So when I was talking about the pre-confirmation Service manager contract, I said we are trying to make it Like a middleware so that as and when we want to change the Restaking service change the restaking service or the staking service, we can just swap it out and use a new one. And in regards to that, the community is planning to launch a universal pre-confirmation registry. So chances are there will be many more based roll-ups down the line, not just Tyco. And when you have so many based rollups, it could be the case that one proposer wants to propose or pre-conf for multiple based rollups, but it doesn't really want to register continuously because that will cost him a lot of gas. And also, it won't be that credibly neutral because it might be the case that every rollup starts up its own staking service. And that's not reasonable for the proposer, and that's a waste of ETH, not a good use of collateral. So in this case, there will be a universal pre-confirmation registry where any proposer who wants to become a pre-confer can go in, register the BLS mapping instead of being in the protocol-owned registry, it will be here in the universal pre-confirmation registry. Along with that, so the slashing condition opt-in, this is a very important aspect. So the roll-up protocol, like whether whoever makes another base roll-up, might want to have their own set of conditions based on which they want to slash. So maybe they don't want to slash inclusion precoms. They want to just slash execution precoms. So they can define their own slashing conditions and then only have those proposers who have opted into these slashing conditions be preconfers for their rollup. So yeah, this is still an ongoing discussion in the community, and eventually we will be speccing it all out and releasing it, but it's going to be a while. Well, that's it. Thank you so much. Okay, there's a question here. How do you see this integrating into existing KYC systems or rather the path to scale and adoption on the side of users and institutions? I'm not sure that's a question for this session. Yeah. Yeah. All right, so any questions? Final. All right, so any questions? Final? How do I evaluate the security of a free account? Say, I'm sending $10, I'm probably not going to get it. If I send $10 million, how do I know if they're staying? Do I wait? Do I know? Right. So what you're asking about is the fair exchange problem. And this is something that our research team has been looking into for quite some time now. As of now, we don't have a solution for the fair exchange problem. And I think most of the existing or proposed solutions depend on a reputation system as of now, where if the pre-confer or the gateway providing the pre-confirmation is acting maliciously, this would affect the reputation in a bad way, and they would potentially be cut out of pre-confirming for the specific protocol they're working on, and this would potentially lead them for losses. So the reputation based system is being proposed for now but we are working on a non-reputation based system where there is some oversight over how the pre-confers are acting and if they are actually pre-confing in the correct time, providing pre-confirmations in a timely manner without delaying the pre-confirmations coming from the users or reordering transactions with this delay to just generate more MIP or profit for themselves. There's another question here. What kind of slashing conditions do you see? Is it a penalty or more tougher? So I think right now our slashing condition is very straightforward. We just slash the entire stake, which is not ideal, and it's definitely not going to be this eventually. I think there is one proposal by the research team and that's we have, let's say, slashable tickets. So for every pre-confirmation that you provide and every validator that you register, you have a fixed amount of stake. So for pre-confirmations, let's say you are staking one ETH for every pre-confirmation that you provide. And once the pre-confirmation that you provide, and once the pre-confirmation is settled, you can basically reuse those tickets. But if you mess up that pre-confirmation, only that one ETH amount will be slashed, and not your entire stake. So this is open for discussion, and once again, I think this will be a part of the community-led discussion of how seriously we want to slash a malicious preconfer. How do you think distributed validator technology fits into your preconformation design? Do you think it could make it much more secure? Have you guys looked into that? So in DVT, I'm not entirely sure about the block proposal and how it works exactly. So I know that for the attestation, multiple, like three out of four nodes, they need sign, basically, to get a proper BLS signature. And I think it's kind of the same for the proposal, but I'm not sure how they choose which one of the nodes is the one who is responsible for forming the block. So honestly, no, we have not looked into how DVT would interact with this protocol at this moment. Because also DVT does not form a significant share in the market of proposers as of now. So it might not make sense for us to look into it in a serious, um, in a serious manner. But potentially if it becomes more popular, then yeah, this would make sense. I mean this is not yet on main net, because firstly, we don't even have the pre-compiles yet. We need to have the Pectra upgrade. So it's running on testnet, on Helder testnet. Have you heard of Helder testnet? It's a devnet, which was released during ECC. So Tyco already has not the final version, but one of the intermediate versions running on Helder, providing pre-confirmations But once again that's a dev net So it's only the validators that Tyco is running But yeah this was tested Using 300 validators And it worked great Thank you How much do you pay for the pre-conference? There might be multiple base roll-ups and they might be compete for one pre-conference. If you can speak to the mic because I didn't hear half of what you said. Yeah. How do you decide how much you pay for the pre-conference to propose the roll-up blocks? Because there might be multiple base roll-ups for the slot and they might be competing for the same pre-conference. So for the payment we have, we have lifted up for the market and because we decided that the priority fee will be used, then basically the pre-confers will just not pre-confirm transactions that are not profitable. So any transactions that do not have a high priority fee that is paying the pre-confer enough will just not be included because the pre-confer has the incentive to include as many transactions as it can in the block to potentially extract as much value as possible from the blob that it's pushing because the blob has quite a lot of space. So the more data I can fit in a blob, which is basically now free, one way to get a blob as of now, the more I can make money. So we lift the pricing to the market to decide. There are some research that is going on in the space by Connor and Lynn from Nethermind, and there's another guy I think called Finn Finn yeah I don't know their full name so I apologize for that about the economics of pre confirmations and the pricing for pre comps etc and this work there is a lot of talks that Connor Finn and Lynn has been talking about pre confirmation I advise you to go check the recordings for these talks by these people. They touch exactly on what you asked about. Thank you. How do you think the award-winning team affects the likeness of the program? So do you think it improves it? Is there a reason for concern? Personally, like MAV or not MAV, the protocol works with local built blocks and also works with forced inclusion through PBS pipeline. The only problem here that I see is that if relayers and builders decided that they're not going to adopt the constraint API that we talked about because it basically reduces their income because pre-confirmations are not profitable enough compared to what they can include other than these transactions, then we would have a problem. But as of now and as we heard from our partners, is that there is a lot of talk and the relayers and builders are agreeing to add the constraint API to the EPBS pipeline so they would be potentially including pre-confirmations. To add to this, I think we also have plans for L2 MEV extraction. When that is added to this, I think the dynamic changes a bit. Basically since we have currently three seconds and potentially one second block times for the L2. In this one second, you can, the pre-confer still at well can reorder these transactions. But we would not expect every pre-confer which is basically just an L1 validator to have the sophistication needed in the software and in the hardware to be able to support such ordering in one second. I think what's going to happen is that there will be a PBS pipeline for L2 blocks if this protocol gets adoption that tries to extract mev from these L2 blocks in this one second slot. Sorry, last question. What are the latency figures that you guys have regarding like the block transmission from the pre-conference? We don't have that yet. Because as of now we have only Dev Nets and Dev Nets are basically just kurtosis set up instances that are running on local machines. I mean we have Helder but Helder deployed a hybrid solution between gateway precomps and our solution. So our smart contracts but gateway software basically. And as of now that's what, no that's deployed in Hilder? Yeah. Okay, yeah. So that's what's deployed in Hilder right now. So it's not the same and we don't have that distribution of sidecars running so we can calculate this latency. We can do some simulations potentially but yeah reality always is different than simulations. Do we have more questions? All right. Thank you everyone for attending.", "eventId": "devcon-7", - "slot_start": 1731658800000, - "slot_end": 1731659400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1GqW9KTYWAB1IHrlktGM0ntHgWK-2umJNkyohBO811gU", - "resources_slides": null, + "slot_start": 1731655800000, + "slot_end": 1731661200000, + "slot_roomId": "classroom-e", + "resources_presentation": "https://docs.google.com/presentation/d/14eqnMC0_aJ3IguPD2egqY1ojHSZRxc4QPo5D4RhCje8", + "resources_slides": "https://drive.google.com/file/d/1qqAUySRK6gC0TvH8DcFecTFyVG5xEqfd/view", "speakers": [ - "paul-kohlhaas" + "anshu-jalan", + "ahmad-bitar" ] }, "vector": [ @@ -229822,8 +229209,8 @@ 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -230080,6 +229467,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -230579,6 +229967,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -230604,8 +229993,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -230626,6 +230017,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -230646,13 +230038,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -230678,6 +230063,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -230701,7 +230087,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -230835,8 +230220,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -231129,14 +230512,13 @@ 0, 0, 0, + 2, 0, 0, 0, 2, 0, 0, - 2, - 0, 0, 0, 0, @@ -231152,49 +230534,52 @@ }, { "session": { - "id": "designing-an-end-to-end-solution-for-based-preconfirmations", - "sourceId": "CRWBCC", - "title": "Designing an End to End Solution for Based Preconfirmations", - "description": "This workshop provides the audience with a foundation for building an end-to-end solution to deliver fast preconfirmation of transactions on a based-rollup like Taiko. In addition to understanding the basics of based sequencing and preconfirmations, attendees will learn about settling these preconfirmations as an Eigenlayer AVS, designing the AVS client, syncing L2 state using preconfirmed blocks, preconfer election, and managing a proposer lookahead using Beacon state within smart contracts.", - "track": "Layer 2", + "id": "designing-and-launching-a-retroround-incentivize-what-matters", + "sourceId": "39AVKD", + "title": "Designing and launching a RetroRound - Incentivize what matters", + "description": "Learn how to design, develop and launch a retroactive funding round. In this workshop we’ll explore the differences, similarities and best practices for running a local and ecosystem RetroRound. Participants will be able to set clear goals, define impactful behaviors to be incentivized, scope technical roadmaps, and formulate a sustainable strategy to fund public goods. Ideal for emerging markets community leaders and web3 Ecosystems looking for new resilient and diverse funding strategies.", + "track": "Coordination", "type": "Workshop", - "expertise": "Intermediate", - "audience": "Engineering", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", - "Rollups", - "User Experience", - "sequencer", - "based", - "Layer 2s", - "Rollups", - "User Experience" + "RPGF", + "Quadratic Voting", + "Public good", + "Design", + "Mechanism design", + "program", + "grants", + "Mechanism design", + "Public good", + "Quadratic Voting", + "RPGF" ], "keywords": [ - "Preconfirmations", - "Based Rollups", - "Based Sequencing" + "Emerging markets", + "Grant Program Design", + "" ], - "duration": 5149, + "duration": 5442, "language": "en", - "sources_swarmHash": "cf9df3cae1b815b47b992c112df3a3d160808224cc703fbcd2cf37543590dbc6", - "sources_youtubeId": "70xIIrGXDSo", + "sources_swarmHash": "5dd5ebab804d94005464c04cc83b0393d5fe0c7517d4ff7e86c54e1149ab100e", + "sources_youtubeId": "Ugxag4KRdds", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673869271b0f83434debead4", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673869271b0f83434debead4.vtt", - "transcript_text": " Thank you. Kind of. I mean, it's fine, but since we're here. Okay. Yeah, very good afternoon to all of you present here. So, yeah, we have reached the final day of DEF CON. We have had, like, ten different talks on pre-confirmations, three full-scale events, and the only question that I've been getting is, how do you make all of this? Like, fast transaction confirmations, yeah, we get it, but how do you make this? So yeah, now on the last day, welcome to the workshop on designing an end-to-end system for Bayes pre-conformations. A quick introduction of the speakers. Ahmed, go ahead. So my name is Ahmed Bitar. I work as an Ethereum core developer. Normally, I'm also the product manager of Surge and the technical lead for our pre-conformation solution. And I'm Anshu. I am working as a blockchain engineer at Nethermind. And I joined this industry like five years ago, mostly focused on DeFi. But for the past six months, my efforts have been mostly concentrated on base reconfirmations. So today this session is a no-code session. So I won't be like writing a line of code and then asking you to copy it and repeat it like 100 times over. I know most of you will leave in like half an hour if I do that. So instead, this is a session on design thinking, where we'll build the whole concept from ground up and share what exactly we did during our research over the past six months. But if you are interested in looking at the code, then I have linked it below. It's taikoprecomp-avs-repository, which you can find on the Nettermine GitHub page. So since we want to build it all up from ground up, we have to start with the foundational knowledge, which is rollups. Because the base in base pre-confirmations comes from base rollups. Just by show of hands, how many of you do understand what a roll-up is? Okay, pretty much everyone. That's great. Well, roll-up is a scaling solution. And why do we need a scaling solution? We need a scaling solution because the L1 is really slow. The throughput of L1 is not much. And why exactly is that the case? Well, in a blockchain, blocks are basically a consensus on state transition. That's the first section of the Ethereum white paper. You have state A, you put a bunch of transactions and apply it to state A, which is like a delta, and you get a state B. But now imagine millions of nodes doing this every single epoch. Now that's going to make this network really slow, and that's a major problem. So what's the roll-up way of doing it? Well, if processing state transitions is the biggest issue on the L1, what if we process these state transitions off-chain or apply these deltas off-chain? And that's exactly what a roll-up does. You can have one single roll-up node, or an L2 node, since it becomes a layer 2, that applies this delta off-chain, processes the state transition, and the L1 is just an observer. On the L1, you have a roll-up inbox contract, and you simply push the state transition and the delta, maybe as a form of a blob, or in the call data. In the image, it's a blob. And the L1 is just an observer. And in the most basic form, this is what a rollup is. But is this enough? Well, no. Because if it's just one node pushing the transition and the delta, how do we know whether it's actually correct or not? And this is where the flavors of the rollups come in, optimistic and ZK. In the case of an optimistic rollup, well, you push the state transition and the delta, and then you just wait. You just wait for someone to prove you incorrect. If you're correct, it's all good. But someone can just come by and say, oh, hey, it's a proof, here's a proof. This transition that you posted is not possible with the delta that you posted. And then you kind of get slashed if you have some stake in. Depends on what kind of process the rollup wants to do. And Arbitrum is an example of an optimistic rollup. I guess most of you must have used Arbitrum. And then we have ZK rollups. In the case of ZK rollups, instead of waiting on for someone to prove you incorrect, the moment you push the transition and the delta, you have to prove that this is correct. So it's called a validity proof. You're basically proving the validity of the transition. But the point here is that whatever time or computational effort it takes to verify this validity proof must be less than what it takes for the L1 itself to process all the data. Only then it actually makes sense, right? So these are the two variants for proving whether a transition is correct or not. Once again, by show of hands, how many of you have heard of centralized sequencing? Okay. It's the most cursed concept right now. And just to give a quick primer, when you're making a transaction on Arbitrum, you're not directly sending the transaction to a public mempool, like when you're doing when you're transacting on Ethereum L1. Instead, it's going to an Arbitrum sequencer. And it's a private server. What the sequencer does is it has complete control over arranging or ordering the transactions. And they usually promise that they will arrange the transaction in a certain way. We don't know whether they're actually doing it or not. But in the case of Arbitrum, the promise is that it's a first come, first serve. If your transaction comes before that other person, we'll put your transaction first in the block. But once again, it's a promise. We don't know whether that's actually happening or not. So let's get based and talk about based rollups and pre-confirmations. Well, the based part is actually not a different variant of a rollup, so it doesn't stand beside our optimistic or ZK rollup. Instead, based is a form of sequencing, just like we have centralized sequencing, we have base sequencing. And in the case of base sequencing, the L1 proposer is the sequencer. You don't have a centralized server sequencing the transactions. Instead, it's the L1 proposer. Let's say we have Tyco. Tyco is a base rollup. And in the case of Tyco, the L1 proposer literally runs a Tyco software alongside the usual consensus and execution clients. And whenever their block comes in, they literally just pull the L2 transactions from a public Tyco mempool, they order it in their L1 block and put it on the network. So yeah, you basically are inheriting the L1 security as well as the L1 liveness, because the L1 itself is your sequencer. So just a quick overview of how Tyco actually is arranged. Well, you have the rollups inbox contract that I have been talking about. And although Tyco doesn't really call it rollup inbox, this is the colloquial term. Tyco has like a bunch of contracts that work together and they just call it Tyco L1 contract. So that's the L1 component. Now, if you're running an Ethereum validator, you usually run an execution client and a consensus client, right? So in a similar way, if you want to run or be a part of an L2 network, the proposer has to run an L2 execution client and an L2 consensus client. So in this case, TycoGet is the execution client, which is a modification of the standard Go Ethereum. And this is where you have the mempool, you have the actual L2 chain, all the blocks are formalized, and you also have the EVM on the L2 network. And then we have the Tyco client, which has some subcomponents. And this forms the consensus client, and it deals with proving the blocks and proposing the blocks whenever required. Now today, the most important thing for us are block proposals, because that's where the whole concept of pre-conformations will be built upon. So how exactly block proposals work in taiko or in a base roll up so well when you're making a transaction using any kind of standard wallet it goes to the mempool or the public mempool which is offered by taiko geth and every few seconds or depending upon what algorithm the proposer is using, transactions are fetched from the mempool. And this transaction batch forms the actual delta. And when you're calling the grow up inbox contract, you're basically calling a function, which is proposed block, and you're just passing this delta along. Now you might be wondering, well, that's the delta, fine. Where's the transition? So in Tyco, it's a two-step process. The delta is gone, and now we have the Tyco prover, which comes in later on and just says, okay, remember that delta that was pushed a few seconds ago or a few minutes ago? You see, this is the transition that that delta causes. And since Tyco is a ZK rollup, it pushes a concise proof along with it, like, okay, this is the transition, and this is the proof that the delta caused this transition. Now the most important aspect, Tyco driver. So once these L2 blocks are put on L1, there is a block proposed event that is released, which shows that, hey, okay, this L2 block was put in the L1 contract. And whenever that block proposed event is released, Tyco driver listens to it and advances the head in get. Advancing the head basically means you're formalizing and putting an L2 block within the L2 network. And this is when the wallets end up getting the transaction receipt or the confirmation. So you see, the catch here is that Tyco driver only receives the block proposed event every 12 seconds. That means when you're making a transaction on Tyco, you get a transaction receipt after 12 seconds, which is huge. That's not ideal for a rollup or a scaling solution. And that's very evident from the Dune analytics graph of Tycho's block times. It's average between 12 to 24 seconds, which makes sense. It's inheriting L1 block time. That's not ideal at all. So we need pre-conformations. And pre-conformations is not something new. We have already had it on Arbitrum for a long while and several other roll-ups. Again, show of hands, how many of you have transacted on Arbitrum? And when you transact on Arbitrum, you basically get a transaction receipt in a very small period of time, like half a second, one second, sometimes two seconds. But the point here is that Arbitrum only posts blocks every two minutes on L1. So how are you getting this transaction receipt immediately? It's because that's a pre-confirmation. They're giving you a promise that, hey, see, this is the receipt, and we will be putting this on the L1 eventually. In Tyco, you have to wait for it to be put on the L1. But, well, this gives better UX for Arbitrum and bad UX to Tyco. Now what if we want to put pre-conformations on Tyco? It's really tricky. Because in the case of Arbitrum, we have one server, literally one server in one corner of the world, just running, ordering transactions, providing pre-confirmations. Easy. In the case of based rollups, the sequencer is changing every single slot. The proposer changes every single slot. So, well, the sequencer changes every single slot. And then, these days, we don't have the proposer building the blocks. The blocks are actually built via a PBS pipeline, like MEV Boost by Flashbots, where builders build the blocks for you as the proposer, and then you just propose the block, whichever builder gives you the highest bid, you just take their block and propose it. You don't even have any control over what's in the block. So how exactly will you put in base preconformations with three layers of complexity? Well, we have done it and let's start with the design principles. Maybe a quick round of questions if anyone has any questions. No? Okay. Take it over. Alright. Great, thank you. Okay, so the first thing we wanted to do was we wanted to not introduce centralization again. And the way we wanted... The idea here is that if we wanted to introduce centralization, we wouldn't have built it as a base roll-up in the first place. Then how are we going to solve this complex problem? So let me explain how gateways work first. So gateways are basically centralized servers that expose an RPC for the user to be able to provide them that pre-conf. The user sends the transaction to the RPC, and then it selects which transactions it wants to pre-conf, and then it proposes that block to L1. Of course, the confirmation receipt that goes to the user is given in a matter of milliseconds, between 100 and 200 milliseconds, which is very fast. Of course, the UX is very cool, but the compromise is very high. Now, also, there is another concept here in pre-confirmation that is important, which is not all validator has signed up to become pre-confers. And because of this, you sometimes have some validators who have decided to register as a pre-confer and some others that haven't registered. So the gateway can provide pre-confirmation for users, and it not necessarily will push a block, a proposed block for the L2 on this slot, but it could potentially push it here. And the way to do this will be explained in a later stage when we talk about forced inclusion lists. All right. The other thing that we wanted to focus on, which was important for us, is that we wanted to use the existing transaction structure, the existing wallet. We didn't want to invent a new complexity to the already existing situation when you are sending transactions through wallets. So some suggestions in the pre-confing space were like, oh, we should potentially put an inclusion pre-conf fee premium. So like what are you going to pay the pre-confer so he can provide you with this fast service? And the base fee per gas for that as well. Or, for example, an execution pre-conf, yeah, it's basically very similar to each other. They're all the same. So what we decided is, no, we're going to choose something that is basic. So we're going to choose the same exact EIP 1559 fields which is the priority fee. So the priority fee pays for the pre-confirmation, the proposing and the proving of that transaction. And the user does not have to worry about all of these complex things, other things. Okay. It's not moving. Okay. Alright. So I'll start now explaining what we designed. So So, in Tyco, like I explained, we have the, in Tyco, we have that Tyco client and we also have Tyco GAT. In Tyco also we have, before these, before we come to these, we have the contract that receives the block proposals. And so what we did is we added something we call a pre-conferring node. And that sits between the proposer and the contracts. And we added some contracts. One contract is the pre-conformation service contract that basically receives the blocks that are coming from the pre-confer. And also, we added a re-staking contract that will basically allow the proposers to register as a pre-confer. And by this, these proposers, whichever they are, any validator, can basically just run this set of software as a sidecar to whatever they're running for the validator. So alongside L1, you're just running these three Docker images three docker images, docker containers, and then you are able to pre-conf transactions when your slot is up. So how does this exactly work? So we have a loop that happens every three seconds. When you are chosen as the pre-confer for the upcoming slot, what happens is that you as a pre-conferring node will fetch the transactions every three seconds from the Tyco proposer, which will basically fetch them from Tyco get. The user will have sent this transaction to the mempool, so there is no centralizing aspect here. You don't have to connect to a specific endpoint to send the transaction. And every three seconds, the pre-confer will sign this batch of transactions that it has received from the Taiko proposer, and then broadcast it to other pre-confers through P2P. Oops, okay. Okay, permissionless, okay. So here we look at, okay, so how do we choose which of the registered pre-confers are going to be used or have the right to propose these blocks? And this is when we use the look-ahead that is provided by the consensus layer to know which exactly is the proposer that has been registered in the upcoming 64 or like in this epoch and in the epoch after. So in consensus layer in beacon chain you can query the CL client for the existing epoch, the current epoch, which proposer, which validators have the right to propose and for the existing epoch, the current epoch, which proposer, which validators have the right to propose and for the upcoming epoch. So it provides you with a list, 32 long list of after you specify the epoch that you want. And so basically what we did is that we made the preconf node fetch this list from the CL client and push it to the preconfirmation service. And this way we know, for example, that since this proposer is not registered, this proposer is not registered, we know that this proposer is registered, so then we choose this proposer to be the one who is pre-confining those blocks. This thing doesn't always work. Okay. So then there is like, with any system, you have to have incentive to act correctly. Some systems depend on only rewarding good behavior. Some systems depend on punishing bad behavior. Most systems, or a lot of systems, depend on doing both. So in this case, the pre-confer gets the pre-confirmation fees and the proposing fees. In return, he needs to pre-conf and provide good information and honor the pre-confirmations that he gave to the users. The way we check that is that in the case, the pre-conred node reveals the signed malicious pre-conformation. The way it's done is that, so let's say I pre-confed a batch of transaction, and then I didn't end up pushing this batch of transactions on chain. But I have already broadcasted that I have pre-conf this batch of transaction with my signature. So what would happen is another pre-confer that was listening on P2P would then pick this signature with the batch of transactions and push that to the pre-confirmation service contract. The pre-confirmation service contract would check if this is a valid signature for the pre-confer of the slot and if it is then it will go and ask the restaking or staking contract to slash the signer. And in this case, yeah, there is also the slashing for the incorrect look ahead because you fished the slashing for the incorrect look ahead because you fish the look ahead from the CL but how does the EL know that this look ahead is correct? You can use 4788 EIP which basically provides you with the CL beacon route and using that you can push a proof if the look ahead is incorrect and the pre-confer who sent an incorrect look ahead would be slashed. Okay. So before I move on if you guys have any questions it's good to have them now. So yeah please go ahead. Before I move on, if you guys have any questions, it's good to have them now. So yeah, please go ahead. I have two questions. First one is, based on, I'm not familiar with 4788, but could you not, like, can it only prove that you can look at it as a law? Yeah, so the thing about 4788 is that it provides you with the beacon root of the parent block and not the current block. And because of this, when the look ahead is pushed, I cannot in that moment verify its correctness. But after one single block, I can, with the proof, using 4788, make sure that if it's not correct, that it is not. So this is why we opted for this. In the case that it was possible to verify, we would have potentially opted for pushing the proof with the look ahead. The problem with that normally entails that you would pay more because the proof needs to be verified on chain in every single submission. Whereas if you do it in a fraud proof manner, only when someone's acting maliciously that you need to do this. So the cost drops dramatically. And there is precautions to this. So whoever is submitting the look ahead will get rewarded submitting the proof, the fraud proof for the look ahead will get rewarded for this by the slashing of the other pre-confer. Other questions? Yeah. Okay. Let me try to make this work somehow. Okay. Work please. Okay. No. Okay. Yeah. Over here. Sure. I mean, here. This one is now opted in and this one is . Yeah. . Okay. So this is probably not mentioned in this presentation, which is a problem. Hey, sorry. Basically, based rollups depend on giving the user the ability to always push transactions on the L1 permissionlessly. And that means that there is no one who could prevent the user from pushing a transaction that could invalidate future transactions that have been pre-confed that will be pushed later that have been pre-confed. And this leads to execution pre-confs not being able to be provided. And this leads to execution preconf not being able to be provided. And the only solution that we have to that is delayed inbox. So preconfers are able to push directly to the Tyco smart contract to propose blocks whereas normal users who don't want to use the preconfing system that is built and want to push directly to avoid potentially censorship by the pre-confer, even though this is a decentralized solution, it could potentially have some type of censorship. So if they push their transaction to this delayed inbox, what happens is that we wait for the pre-confirmations to land and then we include those transactions that have been pushed by the user. So in this instance, just to be clear, any transactions that come in this slot will go to the queue and will not be proposed. No, it will not be denied. It will go to the queue. Yeah, delayed. Yes. It will be delayed after the pre-confer has pushed his pre-confirmation, and then it will be included. What if my pre-confirmation ended on L1 state, which is like a really nice component of basic L1s? Yeah, no. I could expect a pre-confirmation with interacting with L1. Yes. So if we engage composability in here, then it could, yes, any L1 state change could potentially affect this L2 transaction that depends on it. And this is not a problem that the solution is trying to solve, unfortunately. Yeah, I don't think, like, as of now, there is, potentially, there's some people working on this, but I'm not aware of any solution to this particular problem. This is a very good question. Thank you, sir. Any other questions? Yeah, go ahead. Does this require at least one recall first? Yeah, so I think this might be talked about in a later stage, so I'm not going to touch on it more. But there is a way for choosing a random pre-confer in the case that there isn't a pre-confer available for the next 64 slots. Any other questions? Yeah, go ahead. The gateway. Okay. So the gateway basically buys the right from the proposers that have registered at Pconfer to propose at a certain slot. I don't think so. No, they can have varying tips between one gateway to another. The problem is that with gateways is that the gateways will have to compete. In the beginning, we might have a couple of gateways out there. The problem with that is that people normally end up converging into a couple of gateways that then would have a monopoly. So like two or one gateway that would then be dominating. Because the idea here is that if a gateway cannot secure validators or proposers that are willing to sell it, to sell the gateway, their right to propose these Tyco transactions, the gateway cannot operate. Do they have a business where pre-confirmations are built on other pre-confirmations? gateway cannot operate. . Yeah. So you do. And that's why we have the P2P. I'll explain that in this slide. So when we said here, sorry, this thing is not, okay, yeah, so when we said that the preconf node every three seconds batches a transaction, takes a batch of transaction and signs it, and then it broadcasts the signed preconfirmation preconf blocks on P2P. This is important because anyone who's listening can do a bunch of things. First, they can advance the head of the TychoGath so people can keep up with these pre-confirmation. Are you moving to a safe head there? Sorry? Is it moving just to a safe head? It's not a safe head, no. It's not even a safe head at this point because it's basically a soft head, No, it's not even a safe head at this point because it's basically a soft head, very soft head. Because as long as it hasn't been proposed on L1, it's not fully safe. I mean, there is financial precautions to the pre-confer if he doesn't obey or honor these pre-confirmations, but software can always fail or they could potentially have other incentives. So it's a very soft head, but at least you get consistent block times, which is a better user experience than what you get right now. I mean, Anshu said that transactions normally take 12 seconds, and on Tyco, sometimes they take 20 seconds. So there is some kind of extra delay. It's a very long time. Any other questions before moving on? Sorry, I just wanted to understand the consensus at the P2P layer that you were describing. What do you mean, the consensus? So my original question was, if you have pre-confirmations being built upon by... Sorry, new pre-confirmations being built upon by, sorry, new pre-confirmations being built upon the state of previous pre-confirmations, how does that, like consensus is needed there between two different proposals? So the pre-confer that comes, so let's say that, okay, let's say that all of these are pre-confers. And this one is a pre-confer and has been pushing, it pushed basically four blocks. It pre-conf first one, second one, third one, fourth one, and then it basically just went and pushed it to L1. This one would be listening to these pre-confirmation batches on the P2P and would receive them. And it would just wait for a confirmation that they have landed on L1, and then it would start building on top of that. Would the middle one be accepting pre-confirmations before it's been posted to L1? NOAM CHOMSKY- No. Because since it's pre-confirmation, depends on the state of what happens in that chain here. So it can't do that. Or it will collect, I mean, it can, but it will collect a bunch of transactions that has been already included, for example. So it would potentially lose that space on the L1 side. And also, if it does not, if it sees, for example, pre-confs, a bunch of transactions that then already has landed, the execution pre-conf is not honored, that doesn't mean that it will be slashed. It won't be slashed in the system because the inclusion preconf is still there. But it will lose that. And if it does not include them, it will be slashed. So in both cases, it's kind of losing something. Of course, non-inclusion makes it lose more. So it would include them even though they're already processed and will be invalid if they're included again, but at least it won't be slashed. Yeah, so this is the consensus. I mean, this is a problem with also varying types of pre-confs because you can always... So in the system right now we're trying to build with pre-confirmations is that we're trying to avoid a singular solution to dominate. So there is multiple solutions. So there is the gateway, there is what we're building, and there is multiple ones. So if each one kind of needs to build on the other one, it needs to wait for the state of L1 to be updated to start building on top of it. Additionally to that, if a user pushes a transaction directly to L1 without pushing it through a pre-confer, then that also changes the state. So it doesn't make any sense at all to start building pre-confirmations before knowing the exact state on the L1. Thank you. Okay. Let's hope this marker starts working. Okay. Yeah, so we talked about slashing. Now, so as we said, the pre-conference needs to include and propose these batches of transactions on the L1. But if this validator is selling his right to build the block through PBS to a builder, it cannot accomplish that because it has no control over the content of that block. And the solution we found to that is that we would modify the EPBS pipeline to accomplish this exact goal. So the preconf node would go and tell, basically, the MEV boost that there is a constraint on the builder and that the builder needs to include a bunch of L2 blocks, which are basically a couple of L1 transactions, or it could be one L1 transaction and their blobs. And it should include them. And that's why the PBS relayer would return with, okay, yeah, I can honor that. And they build a block including these transactions at the end. And we propose the L1 block. This is the way that... And currently we're using Bolt Smith Boost because they have that constraint API already implemented. So we didn't want to implement, re-implement the wheel. But of course, there is also CommitBoost, which can have that Bolt MIFBoost module built on it. And you can have it. For more details on those, please search them on Google, CommitBoost and Bolt MIFBoost. Both of them, they have open source GitHub repositories that you can look at. Pre-conference selection, I think Anshu will take over here. Thank you, sir. Thank you. Do you want the mic? Yeah, just this one, yeah. Okay, so now you have a good idea of what the overall design looks like. And let's talk about pre-conferred selection, because we did speak about how we want the L1 proposer to be the only one who can pre-conf and then propose the L2 blocks in a particular slot. Now, this is actually a very hard problem, even though it doesn't look like, because at Ethereum, we love patching things up. And in the process of patching things up, we develop new problems. So when ETH moved from POW to POS, what happened was we introduced a new layer, the consensus layer, besides the existing execution. So earlier it was merged into like one single thing and now we have it separate. We have a consensus layer which manages the POS part and we have the execution layer where, which is what we developers usually handle when we are deploying smart contracts or interacting with Ethereum using a wallet. The problem is the consensus layer is where the proposer's identity lives. And that has a BLS signature scheme. But the execution layer where all the inbox contracts are, where all the transactions are made, that has an ECDSA signature scheme. And that's a big problem. How do we make a connection between both of these? There's no way to make a connection. So what we need is we need a BLS to ECDSA mapping. So let's say I'm an entity, I have an ECDSA address, and I run a thousand validators with thousand different BLS public keys. I need a way that I can prove that I own those thousand validators. I can show it to the registry contract. So we have the pre-confirmation service contracts. I actually have three subcontracts, the pre-confirmation registry, which we'll be dealing with in this slide, and two other contracts that we'll be taking up later on. So this entity needs to prove that, hey, I own these validators, and I actually have the right to propose an L2 block in a particular slot. And how exactly do you prove ownership of a key through signatures, right? So we have this signature format here, which you can see it basically has the standard thing like having a chain ID, and then validator op is basically either 0 or 1. If it's 0, you're removing a validator from your list, 1 you're adding. And then there is an expiry and the actual pre-confer. So in here, the pre-confer in the signature message is the ECDSA address that I'm claiming. That is claiming the ownership of a BLS address. So the ECDSA address just pushes a signature, and the contract just verifies that the signature is correct and this BLS public key actually belongs to this ECDS key and inserts it into a simple map. Now the execution layer has no native way of verifying BLS signatures right now, but very soon in the next upgrade, the Pectra upgrade, a new precompile is being added via EIP-2537. That's where all the discussion has happened. And this precompile, or a set of precompiles, three precompiles actually, will help us verify BLS signatures. Now, these are really expensive because for verifying one signature, we need to spend like 300K units of gas. That's really expensive. So in our next POC, we are actually proposing an alternative where in the case of BLS, there's a great feature, and that's aggregation. So if you have 1,000 validators, what you can do is you can have 1,000 signatures off-chain, then add all of these signatures up. It's basically elliptic curve addition. You add these signatures up, and then on the contract, you just have to verify one signature. So essentially, you can add, like you can put thousands of validators in your registry via just one signature and just a bit more than 300k gas, which is amazing. And this is what we will potentially be putting in the next version. But yeah, right now it's one-to-one, single address, single signature every single time. So how is this used to construct the lookahead? Because the BLS lookahead is absolutely useless on the consensus layer. We need an EC-DSA lookahead because the BLS lookahead is absolutely useless on the consensus layer. We need an ECDSA lookahead on the execution layer. So every single time we know that only this ECDSA is supposed to propose. No other ECDSA can propose an L2 block. So in this case, it's kind of simple. The preconf node has the logic. The preconf node can take a look at the consensus layer because the preconf node has the view. The preconf node can take a look at the consensus layer, because the preconf node has the view of both the execution layer and the consensus layer. So the preconf node pulls all the proposers from the consensus layer for the next epoch. Then it fetches the associated ECDSA key from the preconfirmation registry, because we have the BLS to ECDSA mapping there and it just matches it. This BLS for the next slot belongs to this ECDSA. This one belongs to this and it creates the entire lookahead. Now in our design, we have assigned the duty to push the lookahead, of pushing the lookahead to the first pre-confer of every epoch ahead to the first pre-confer of every epoch so the first pre-confer of current epoch will be pushing the look ahead for the next epoch and they are basically bounded by this duty they must do it if they well it kind of there's no option of not doing it because the contract expects you to provide that and this is what a simple look-ahead, like one of the nodes in the look-ahead array or mapping looks like. I'll get to what the look-ahead, what data structure we actually use. But in here, the timestamp, the second field and the fourth field makes sense. The timestamp is the timestamp of the slot, and the pre-confer is whoever is supposed to be pre-conferring in that slot or proposing an L2 block. We have another field, fallback, and previous time stamp. Now, what are these? Well, the previous time stamp is just a link to the last look-ahead nodes timestamp. What this allows us to do is arrange the look-ahead as a link list within the contract, or sort of a link list. So every look-ahead structure is an item in a map, and the previous timestamp just points to one of the other timestamps. What this allows us to do is have advanced proposals because not every proposer will be opting in. And also not every proposer will be registering and exposing an ECDSA address. Some are just not interested in pre-config, right? So in this case, we cannot just have an entire epoch be empty if there are no pre-confers or if there are very few pre-confers. We need to do something in the empty slots. And what we do is we allow the next chosen pre-confer to pre-conf in advance. And because of this link, we can do that with a simple check, a simple if condition. That's why in here you can see that P2, pre-confer 2, can pre-conf in the second and third slot already. And P3 can pre-conf in three slots in advance. That is made possible because of this linkless design. Finally, if we have an epoch where there are no preconfers at all, and that's very much possible if none of the proposers in that epoch has opted in, we don't have anyone as a pre-confer. Then we have to select someone randomly. And that's a very simple selection. Like, we need a source of randomness and apply simple modulus to select one of the indices of who is exactly going to be the pre-confer from the registry. And the source of randomness comes from the beacon root contract. So we, as far as as far as i remember we basically end up choosing the beacon root of the first block in the last epoch because this gives us a pre a deterministic idea of who's going to be the random pre-confer in the next epoch so we use that as a source of randomness and we just use that to pick out a pre-confer. And this fallback pre-confer has an advantage, and that is it can pre-conf in every single slot of this epoch because no one else is there to pre-conf. Yeah? Yeah? Sorry, I have a question. If the pre-confer is not in the epoch, doesn't that mean they're highly unlikely to be In six months? Well, but we won't be stopping the system for six months, right? Yeah, so... If can uh get in here I understand the question um so you're asking is since the pre-confer does not have the right to pre uh to propose a block in the next epoch for example and we chose it at random um how will it be able to honor these pre-confirmations? And the answer to this question is that it might not be able to honor the pre-confirmations. But it will not be slashed if it does not honor them in this case because it's a random picked pre-confer. And also, we thought about this like, OK, maybe it shouldn't be providing pre-confirmation, but this would be a very bad UX. So shouldn't be providing pre-confirmation, but this would be a very bad UX. So we'd provide the pre-confirmation, we would send them in the P2P on the mempool, and potentially someone will pick them up and include them if the fee is right. And this was like the mechanism that we wanted to do. So what Tyco is doing also is that they're using this fallback mechanism to say, okay, if there is a no-up-recon for a register, then the next epoch, then we are going to propose just to keep the liveness of the chain. And of course, this proposal will go to the mempool and someone will pick it up. But if someone intentionally is censoring these transactions and there is no mechanism to force the builders to include them, then they will potentially not be embedded in time. Yeah. Thank you, Emma. And one thing to note is that throughout this POC, we have never touched the Tyco contracts, although eventually we might to add that delayed inbox. But in here, we have tried to not mess around with the Tyco contract. And what our task manager does is it simply routes the blocks that are being proposed over to Tyco contract. So because of this, the prover architecture doesn't have to change. That's the best part. Nothing in the prover has to change and the proposer also needs very minor modifications because the original contract, original contracts of Tyco have barely been changed. Go ahead for the next step. Thank you. Thank you. Okay. So now we're going to go back to The pre confirmation loop that we discussed that every three Seconds we go in a bit more details. I hope this clicker Works. Okay. I'll just stand here. All right. So first we start with The normal event loop for tyco. So what tyco does is that once Every slot it pulls the pending transactions from the Mempool, tyco proposes and forms the block and then pushes it Through the blob to the roll up in box contract through that proposed block function. Very simple, straightforward. What we have with the preconf solution is the following. So we have the preconf node every three seconds requesting from the Tyco proposal a batch of transaction. Tyco proposal goes and fetches those batch of transactions from Tyco get and forms a batch, gives that batch to the preconf node and the preconf node then goes and then um uh pushes this transaction to the P2P like we said the batch of transactions to the P2P and of course this also only happens after it made sure that it is the pre-confer for this specific uh for the for this slot and or an upcoming slot that is very near. So this is the new loop. So what do we sign when we provide the pre-confirmations through P2P? What's the signature? So the structure that we use is we use the block ID because we need to commit to a specific block height or that pre-confer could potentially sandwich transaction if it wanted to. If we don't commit to a specific block height that it needs to propose this batch at. Also the chain ID in case we have multiple chains and transaction list hash which is basically just a hash of the RLP encoded transaction list. I see there are some hands for questions. Yeah, go ahead. You have the mic? It's fine. Just raise your hand. Yeah, so three seconds was just a conservative random number that we chose at this point because we weren't Yeah, so three seconds was like just a conservative random Number that we chose at this point because we weren't sure How fast the system would be, the latencies, all of that. What we're going to be working on later, as we will see in one Of the slides, is like getting that number lower and seeing how It's going to behave and how it will work. We will talk about that in later slides Yeah So the pre-confirmation structure We saw it And then there is the pre-confirmation object That is sent in P2P Because the pre- object that is sent in p2p. The pre confirmation structure is the one that can be pushed as a proof for fraud proof. But here we need to when we are pushing the preconf batch on p2p we need more details. That one is not enough. So we have the block height and the pending transaction list, the whole list and the pre confirmation structure enough. So we have the block height and the pending transaction list, the whole list, and the pending transaction bytes. I'm not entirely sure what that is for. And the proof of the preconf message. And this is what ends up being sent exactly in the P2P. And the reason we need the batch of transactions is because we need all the other pre-confers or all of the other participants in the network to be able to advance the head with this batch of transaction. And we also need them to be aware of this so when their role of pre-confing comes along they know that okay these transactions have been already pre-confed and potentially will be proposed in the next one and in the in the previous one block before we start pre-confing ourselves. So when when a preconf node receives the batch of transaction from The p2p once it has been preconfed, we currently the way It works in tyco before the preconf is that we get an event Which is called block proposed from the l1 and that goes tyco Driver is basically subscribed to this specific event and advances the head once it receives this event which means that the user will get a transaction receipt every 12 to 24 seconds. And this is the main point that pre-confirmation our pre-confirmation solution is trying to alleviate with this design. So now the pre-confirm node can, once it receives the pre-conf message with the pending transaction list, it can provide it to Tyco driver, and Tyco driver can basically advance the head of Tyco geth in around three seconds given that we choose a three second confirmation loop time. All right. So the non pre so there is the pre-config node so this is the previous slide was about the pre-config node. So this is the previous slide was about the pre-config node. So once it pre-confs the transaction list, it does this. And there is also the non-pre-config node receiving this message through P2P and then sending it to Tyco driver to advance the head. So the whole network advances together and not a single one of the nodes in the network. So we already kind of discussed this. So we have two ways that the pre-confer sends his transactions or the L1 transactions that will basically push the L2 transaction batches. So first we said, okay, let's say that we have a pre-conference slot number five. And this pre-conference is on duty for slot number one, two, three, four, and five. That means that they need to pre-conft a lot of batches in these slots, and then they can force include them in slot number 5. That's not a problem. That's easy to do by using the PBS software like we explained. The problem arises if there are sparse pre-conforts. There isn't a lot of pre-conforts, and that would mean that potentially one pre-confer could be in slot number 42 and he's responsible for slot uh i don't know five up until 42 and this would mean that he needs to push a lot of transactions in in his slot if we follow this model so instead we said okay maybe we should just push all the pre preconf transactions right away to the mempool. So we get batch A, batch B, and we just push them directly to the mempool. And at the same time, we put them in a queue, in a cache. And the idea of this cache is that once an L1 block comes and it has those batch transactions that has been pushed here in it, then we can remove this batch from the cache. If that doesn't happen, then that batch of L2 transactions stays in the cache. And then once it is, yeah, and so basically if it's included, we clear that batch from the cache. Okay. So on the proposal slot, we take the ones that hasn't been included, that we push to the mempool and hasn't been included yet, and we make sure that we push them through the constraints API of Bolt Smith Boost to be force-included by the builder. And then once we receive the L1 block, we have the guarantee from the relayer that these transactions, L1 transactions that contain the L2 transaction batches are included then in the L1 proposed L1 block. And she will talk about slashing and yeah. Okay. So now comes the pre confirmation service manager contract which we haven't spoken about yet. And this is a very interesting contract because what we have tried to achieve with this POC is a really flexible interfacing because we have a number of restaking solutions right now out there in the market. We have Eigenlehr, we have Karak. We might also have our own staking contract later on if a community decides on one, which is actually going on. A discussion is going on on having a unified registry. And that calls for having some kind of a middleware contract so that later on the core logic doesn't have to be changed and the staking is just abstracted away to a different set of contracts. And that's what we achieve with the service manager. It's essentially like a middleware that ensures that only those proposers who have a required stake are allowed to precomp or propose in the upcoming slots. So what kind of slashings do we have or when exactly do we slash? Well, one time that we do slash is when the lookahead being posted is incorrect or the lookahead posted is incorrect. A basic version of this is that when you have, like, let's say for a validator with BLS public key B5, in the lookahead mapping, sorry, in the BLS mapping, you have E4 as the associated individual who owns the B5 key, but the proposer who pushed the lookahead placed E2 as the associated ECDSA key, which is factually incorrect. Now, this might look like an easy way to just compare, hey, okay, both of these are not really equal, so why not we slash the person who posted a look-ahead? But it's kind of tricky to prove this inequality because we have B5 and E4 from the pre-confirmation registry, but in the task manager, we don't have access to who exactly was the validator and its associated BLS key for the current slot or any slot that is incorrect. We don't have that information in a simple way. And what we need to do is we basically need to have this kind of a match that, okay, B5 from the pre-confirmation registry and B5 in the current slot, both of them have different ECDSA keys. And then on the basis of that, we slash the poster of the lookahead. So, well, there is a way to do that. And I kind of have to make a correction on my previous statement that there is no connection between the consensus layer and execution layer. Well, there is, but it's kind of a complicated connection. And that's through the beacon block route, which I think Emma touched upon briefly. So you see, just like the consensus layer has blocks, sorry, the execution layer has blocks that we deal with, the consensus layer also has beacon blocks, because it's the beacon chain. And each beacon block has a state root. And if you unravel the entire state tree, then you'll realize that in beacon state, we have the validators field, which contains all the validators or the proposers. And that includes their unique index, as well as their BLS key. So what EIP-4788 proposes is that we get this eventual Merkle root. Like, if we Merkle-ize this entire beacon state and then eventually the beacon block, we eventually get a 132 byte long Merkle root. And we make this available within the execution layer. So the thing here is that there's a problem, and that's we only have access to historical roots. We don't have access to the current slots proposer because, well, the consensus layer is yet to create the block, so we can't really know who the proposer is. So we have to slash or prove that a lookahead is incorrect optimistically. yet to create the block, so we can't really know who the proposer is. So we have to slash or prove that a lookahead is incorrect optimistically. And the way we do it is we wait until the incorrect slot has passed, and we have the root. And once we have the root, we can basically make a static call to this contract, which is the beacon roots contract, and get that beacon root. And then just need to send two proofs so first the proposer or sorry the challenger posts the bls key of whoever was supposed to be the validator of that slot as well as their validator index and along with this it sends two proofs. First is the proof that in this case, if you see, we have a proposer index in the beacon block, but in the beacon state, well, we basically end up having BLS keys of the validators at respective indices. So via a Merkle proof, we have to make this connection that, hey, this proposer index actually belongs to this particular BLS key or BLS public key, and that's the step two. And then in the next step, the validator or once again, not the validator, the challenger needs to prove that this is the proposer index that is present in the beacon block, as you can see here, the second field. So very simply with two simple Merkle proofs, you are able to get whoever was the BLS key of whoever was the validator in the current slot or the slot that is being challenged. And well, once you get that, you can just go ahead and make, check if this inequality is actually satisfied. If it is, then well, the lookahead slot is incorrect and you can slash whoever posted it. Along with that, we have slashing of bad pre-confirmations, which I think was touched upon by Ahmed. But in this case, if we want to expand it, we have execution pre-comps that could be bad and we could also have bad inclusion pre-comps. In the case of execution pre-comps, well, the proposer did push the eventual block that he pre-confirmed, but he misordered the transactions or maybe inserted a new transaction that was not actually pre-confirmed. And in this case, what we end up having is that the transaction list hash is mismatched between the proposed block and the pre-confirmed block that was on the P2P. And in the case of inclusion precomps, that's a bit more simpler. We end up getting different proposers for the same block ID because it might have happened that, well, he pre-confirmed, but he never really proposed, and someone else ends up proposing in that particular block height eventually. So, yeah, these are the two kinds of slashings. I think that was quite a mouthful, so any questions? No? No. Yeah, so in this case, we actually have discovered a few issues. So another variant, a more simple variant of your issue would be, I pre-conf, and I don't include it, but then for a long period, no one actually includes any block. So what happens is a long time passes and the dispute period gets over. So even though there is no pre-confirmation, there is a pre-confirmation, but there is no associated block. Like the block ID has never really progressed on chain, and that's an issue. But you can't really prove the incorrectness of this. So we have a way of doing that, and that is, so we might have to go back to, so let's say this is like the slots, okay? So whenever a pre-confirmation is being made, we also include a look-ahead pointer, which basically points to which slot in the look ahead am I preconfing for. So in this way what we have to do is we have to make a connection between the preconfirmation and the proposed block via the look ahead. I think now that answers your question because if we have that look ahead pointer that basically states which look ahead slot have we made the pre confirmation for. And if it lands on at a different time on the L1, we can make the inequality. Okay, but is it possible that I pre confirm something and someone includes it because they are a proposer before me? Or that can happen because they would be pre-confirmers for that slot before, so I wouldn't be able to even include this as part of the pre-confirmations. I don't know if that makes sense. I mean, not really, because the proposer is, once again, I mean, this is advanced proposals. I mean, you just release it in the public mempool, and it might happen that the proposer does not include it, but then your slot will be coming in. So if you go back to this slide that Ahmed presented, in this case, when your slot comes in, you clear all those pending transactions that have never been included because the proposer didn't want to include. But your slot has now arrived. So you pick those transactions and you force include it via the inclusion list through the PBS pipeline. Yeah, the other answer to this question is that the previous pre-confer cannot, like, the previous slot, if you're talking about, if this slot is for pre-confer 2 to pre-confer at, the pre-confer 1 cannot then go and push transactions there. Every slot has an assigned pre-confer and only a specific pre-confer can push transactions or batches in those slots. So the contract will just ignore any batches that arrive from pre-conference that are not assigned to these specific slots. Okay. Perfect. Any more questions? No? I think we can touch upon the future work now. All right. So first we start with soft blocks. So currently what we do when we advance the head is that we push a whole full block to tyco geth and in this way what ends up happening is that it goes and lands in the canonical chain, and this is normal. And this is why those transactions are then removed from the mempool. So when you propose the next block, they don't get included twice. So that's one thing. The problem with this approach is that when we are proposing these blocks at the end on L1, we need to propose multiple blocks. So, for example, if I pre-confirm in every L1 slot around four batches, then that means that I have to make four proposals on L1. And this is costly. Proposers and verification of these blocks is costly. For a block proposal, it costs around 200,000 gas, right? Yeah, give or take. And around 400 for verifying the ZK proof for a single block, give or take as well. could be a bit more than 400 more closer to 500 so every block that we end up proposing is is is adding a lot of cost for proposal and verification so what did we end up with is a solution where when you preconf you're gonna add the batch to TycoGash, push it, and then when you push another batch, what ends up happening is that it gets added to the block instead of being, so it gets appended in the block. So what happens is that a new block is formed with the previous transactions that are in the block and the new transaction batch and the previous block gets reorged out and then we include the new block. And this keeps happening until the preconf node sends end of preconfirmation or and then this block becomes a standard block and gets pushed To L1 as a whole single proposal. And this gives us the cost saving that we're looking for, But it's still not finalized. It's something that we're going to hopefully work on the next Couple of months. Another thing, like our friend here asked about before, is the Slot time for the L2. So currently we have a constant three-second block time. And what we're looking for to accomplishing is one-second block time. And it's not that this is not possible currently. It just hasn't been tried yet. And we're not sure what limitations we're going to hit with the P2P, et cetera. Yeah. And then in the last one, I think you will talk about it. Yeah. So when I was talking about the pre-confirmation Service manager contract, I said we are trying to make it Like a middleware so that as and when we want to change the Restaking service change the restaking service or the staking service, we can just swap it out and use a new one. And in regards to that, the community is planning to launch a universal pre-confirmation registry. So chances are there will be many more based roll-ups down the line, not just Tyco. And when you have so many based rollups, it could be the case that one proposer wants to propose or pre-conf for multiple based rollups, but it doesn't really want to register continuously because that will cost him a lot of gas. And also, it won't be that credibly neutral because it might be the case that every rollup starts up its own staking service. And that's not reasonable for the proposer, and that's a waste of ETH, not a good use of collateral. So in this case, there will be a universal pre-confirmation registry where any proposer who wants to become a pre-confer can go in, register the BLS mapping instead of being in the protocol-owned registry, it will be here in the universal pre-confirmation registry. Along with that, so the slashing condition opt-in, this is a very important aspect. So the roll-up protocol, like whether whoever makes another base roll-up, might want to have their own set of conditions based on which they want to slash. So maybe they don't want to slash inclusion precoms. They want to just slash execution precoms. So they can define their own slashing conditions and then only have those proposers who have opted into these slashing conditions be preconfers for their rollup. So yeah, this is still an ongoing discussion in the community, and eventually we will be speccing it all out and releasing it, but it's going to be a while. Well, that's it. Thank you so much. Okay, there's a question here. How do you see this integrating into existing KYC systems or rather the path to scale and adoption on the side of users and institutions? I'm not sure that's a question for this session. Yeah. Yeah. All right, so any questions? Final. All right, so any questions? Final? How do I evaluate the security of a free account? Say, I'm sending $10, I'm probably not going to get it. If I send $10 million, how do I know if they're staying? Do I wait? Do I know? Right. So what you're asking about is the fair exchange problem. And this is something that our research team has been looking into for quite some time now. As of now, we don't have a solution for the fair exchange problem. And I think most of the existing or proposed solutions depend on a reputation system as of now, where if the pre-confer or the gateway providing the pre-confirmation is acting maliciously, this would affect the reputation in a bad way, and they would potentially be cut out of pre-confirming for the specific protocol they're working on, and this would potentially lead them for losses. So the reputation based system is being proposed for now but we are working on a non-reputation based system where there is some oversight over how the pre-confers are acting and if they are actually pre-confing in the correct time, providing pre-confirmations in a timely manner without delaying the pre-confirmations coming from the users or reordering transactions with this delay to just generate more MIP or profit for themselves. There's another question here. What kind of slashing conditions do you see? Is it a penalty or more tougher? So I think right now our slashing condition is very straightforward. We just slash the entire stake, which is not ideal, and it's definitely not going to be this eventually. I think there is one proposal by the research team and that's we have, let's say, slashable tickets. So for every pre-confirmation that you provide and every validator that you register, you have a fixed amount of stake. So for pre-confirmations, let's say you are staking one ETH for every pre-confirmation that you provide. And once the pre-confirmation that you provide, and once the pre-confirmation is settled, you can basically reuse those tickets. But if you mess up that pre-confirmation, only that one ETH amount will be slashed, and not your entire stake. So this is open for discussion, and once again, I think this will be a part of the community-led discussion of how seriously we want to slash a malicious preconfer. How do you think distributed validator technology fits into your preconformation design? Do you think it could make it much more secure? Have you guys looked into that? So in DVT, I'm not entirely sure about the block proposal and how it works exactly. So I know that for the attestation, multiple, like three out of four nodes, they need sign, basically, to get a proper BLS signature. And I think it's kind of the same for the proposal, but I'm not sure how they choose which one of the nodes is the one who is responsible for forming the block. So honestly, no, we have not looked into how DVT would interact with this protocol at this moment. Because also DVT does not form a significant share in the market of proposers as of now. So it might not make sense for us to look into it in a serious, um, in a serious manner. But potentially if it becomes more popular, then yeah, this would make sense. I mean this is not yet on main net, because firstly, we don't even have the pre-compiles yet. We need to have the Pectra upgrade. So it's running on testnet, on Helder testnet. Have you heard of Helder testnet? It's a devnet, which was released during ECC. So Tyco already has not the final version, but one of the intermediate versions running on Helder, providing pre-confirmations But once again that's a dev net So it's only the validators that Tyco is running But yeah this was tested Using 300 validators And it worked great Thank you How much do you pay for the pre-conference? There might be multiple base roll-ups and they might be compete for one pre-conference. If you can speak to the mic because I didn't hear half of what you said. Yeah. How do you decide how much you pay for the pre-conference to propose the roll-up blocks? Because there might be multiple base roll-ups for the slot and they might be competing for the same pre-conference. So for the payment we have, we have lifted up for the market and because we decided that the priority fee will be used, then basically the pre-confers will just not pre-confirm transactions that are not profitable. So any transactions that do not have a high priority fee that is paying the pre-confer enough will just not be included because the pre-confer has the incentive to include as many transactions as it can in the block to potentially extract as much value as possible from the blob that it's pushing because the blob has quite a lot of space. So the more data I can fit in a blob, which is basically now free, one way to get a blob as of now, the more I can make money. So we lift the pricing to the market to decide. There are some research that is going on in the space by Connor and Lynn from Nethermind, and there's another guy I think called Finn Finn yeah I don't know their full name so I apologize for that about the economics of pre confirmations and the pricing for pre comps etc and this work there is a lot of talks that Connor Finn and Lynn has been talking about pre confirmation I advise you to go check the recordings for these talks by these people. They touch exactly on what you asked about. Thank you. How do you think the award-winning team affects the likeness of the program? So do you think it improves it? Is there a reason for concern? Personally, like MAV or not MAV, the protocol works with local built blocks and also works with forced inclusion through PBS pipeline. The only problem here that I see is that if relayers and builders decided that they're not going to adopt the constraint API that we talked about because it basically reduces their income because pre-confirmations are not profitable enough compared to what they can include other than these transactions, then we would have a problem. But as of now and as we heard from our partners, is that there is a lot of talk and the relayers and builders are agreeing to add the constraint API to the EPBS pipeline so they would be potentially including pre-confirmations. To add to this, I think we also have plans for L2 MEV extraction. When that is added to this, I think the dynamic changes a bit. Basically since we have currently three seconds and potentially one second block times for the L2. In this one second, you can, the pre-confer still at well can reorder these transactions. But we would not expect every pre-confer which is basically just an L1 validator to have the sophistication needed in the software and in the hardware to be able to support such ordering in one second. I think what's going to happen is that there will be a PBS pipeline for L2 blocks if this protocol gets adoption that tries to extract mev from these L2 blocks in this one second slot. Sorry, last question. What are the latency figures that you guys have regarding like the block transmission from the pre-conference? We don't have that yet. Because as of now we have only Dev Nets and Dev Nets are basically just kurtosis set up instances that are running on local machines. I mean we have Helder but Helder deployed a hybrid solution between gateway precomps and our solution. So our smart contracts but gateway software basically. And as of now that's what, no that's deployed in Hilder? Yeah. Okay, yeah. So that's what's deployed in Hilder right now. So it's not the same and we don't have that distribution of sidecars running so we can calculate this latency. We can do some simulations potentially but yeah reality always is different than simulations. Do we have more questions? All right. Thank you everyone for attending.", + "sources_streamethId": "6735ccd29dbb7a90e1c675d9", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ccd29dbb7a90e1c675d9.vtt", + "transcript_text": " Hi everyone. For all of those of you joining for the Designing a Retro Round workshop, please, we're going to ask you to seat according to which group you belong to. So the exercise that we're going to be doing in this workshop is, I'm not sure how many of you are familiar with retro rounds, retroactive funding rounds. Yes, I see some hands going up. If you're part of an ecosystem, like you are a protocol or you want to design a round to incentivize growth or different things in an ecosystem, we're going to ask you to please sit on the left side of this area. Everyone, please try to come over to the middle. If you are looking to design around for a community, say you're a local community and what you're trying to incentivize is growth or different goals within your local, geographically local community or community of a different type, we're going to ask you to sit on the right side. And this is because we're going to be making... You're right. So here. Yeah. Perfect. So communities over here, ecosystems on this side. And this is because we're going to have breakout groups. So this is going to be a very hands-on workshop. You'll be working or collaborating with two or three other people that are here to go through the process of designing a reach around. And it's best if you are sitting or partnered with those that are on the same wavelength of who your audiences are going to be, what are the type of values or behaviors that you want to incentivize. So that is why. Please, ecosystems to the left and communities to the right. It's your right. . Yeah, ecosystems is on the left, which is your left. So if you're, I'm sorry, I think... Oh, that's more... So ecosystems to the left and communities to the right. We're going to be handing out some post-it notes so that it's easier for you to brainstorm. Usual advice from workshops, one idea per post-it note is going to make it a lot easier to know what to do with these different ideas. Okay. Come in here. To the right. And I think this will also come clear and happy to go over any questions around seeding when we get to the breakout groups. Perfect. So before diving right into it, how many of you know what retroactive funding rounds are? Can I? There we go. Yes? Yes. Can I get a hands up from those of you that know what a retro round is? If not, because I'm just going to walk through it. Perfect. So one of the reasons why we've been experimenting with Retour Rounds as of late is the fact that it is meant to be easier than running a proactive grants program. In a proactive grants program, what you're trying to do basically is predict the future. You're trying to understand if people are gonna do what they say and promise that they are gonna do. You're trying to predict if this is still going to be relevant to your ecosystem or to your community by the time it is done. And you're trying to assess as well if the team that is doing it is going to be capable of doing it. So basically, we're trying to fulfill the role of a VC when we're not VCs, when we don't have that information, when we don't have the tools that VCs have. And this is why, or through this logic, this other mechanism came up and Vitalik came up with it, or right about it. Actually, not came up with it, but right about it. And the most relevant thing here is that in reacharounds, we expect that it's easier to agree on what was useful, rather than trying to predict what is going to be useful. And so retroactive rounds are a funding mechanism. You might have heard them as retroactive rounds, which is the latest rebranding that Optimism has given to this funding mechanism. But previously, and I think it's more widely known as RetroPGF, Retroactive Public Goods Funding Rounds. And this is a funding mechanism that has two sides of it. The first one is we have a results oracle. This results oracle can be a group of people. It can be a smart contract. But right now, all of the forms in which we've seen it have been groups of people that reward projects that they recognize as having already provided value, either to an ecosystem or to a community. However, we know that the design of this oracle is actually a complicated thing. So what we've seen as well, and what is expected of these retroactive rounds, is that we have them running over and over again because we're experimenting with the design of these rounds, which is also what we're going to be exploring today in this workshop. We're not expecting to have the best design from the get-go. We know that we're going to learn and get closer to this best design in an iterative way. We're going to be trying out different hypotheses. We're going to be trying out different variables. We will be engaging different audiences. And through that process, we expect to get to a better outcome of how these allocations are done. And I will mention that retroactive funding is not a new thing. This is not something that was invented in the Web3 space. We actually have institutions that have been running this type of funding rounds for really long times. And maybe I'm not sure who knows what this is, but this is the Nobel Prize, or like the insignia that is given as a Nobel Prize. And the Nobel Prize is one of the best examples of a retroactive funding program. People that are granted Nobel Prizes are people that have done research usually like decades ago and it's only until it has been proven that this research has been really meaningful to society that they get awarded a cash prize and a lot of other things that come with being a noble laurelate. But it's a similar experiment that we're running with retroactive rounds, in which we're looking at what has yielded a very valuable contribution to an ecosystem or to a community, and we want to reward that. Because we want to incentivize more people to have this type of behaviors of contributing to the communities or to the ecosystems. So one of the things that I suggest for all of those thinking of running retroactive rounds is this is an iterative process and it takes time. It is resource intensive. So don't try to run. It's not going to be perfect at the first time. It's going to require several iterations and you will need to take your time and going through this process. So this is a mechanism that was made popular by Optimism. Optimism has run already six rounds. We are currently, or we just closed, I think one day ago, the latest sixth round. Optimism, for those of you that don't know it, is an L2 that has committed to this funding model from inception. From the moment in which Optimism minted its token, they decided that the fees generated by the sequencer were going to be directed to this mechanism and they are going to be used to fund public goods and to fund projects that are providing an impact or benefits to the ecosystem. They've been also pioneering in the experimentation of this mechanism. So while they started with a small round back in 2021, in which they gave out about 1 million USD, and had a small subset of voters, this has continued to grow and evolve throughout the years. Their largest round was in 2023, in which they allocated about 30 million OP through different categories, and they had a set of about 140 voters. They've also been experimenting with other type of rounds like the latest fourth round in which they used voting style called metrics, like impact metrics, people voted and impact metrics instead of projects, but it's been going back and forth in between voting for projects or for metrics. And this is something that they've committed 850 million OP throughout the rest of the life. And this is only what was allocated at the inception of the token. There's also all of the funds that are being generated through sequencer fees that are going to continue with this, that are meant to continue this mechanism or funding this mechanism. It's important to note, though, that Optimism is not the only ecosystem running retro rounds. There's already been several rounds run by other ecosystems, such as Filecoin, that is currently running its second retro round. There's also Pocket, which ran its own round at the meet this year. Interestingly, also using the funds that they received from the Optimism Reacher round and funding that they received from Arbitrum to reward its own contributors. There's also the Celo ecosystem that ran a round earlier this year and is looking to run another round at the end of this year. And the LEAP P2P library or LEAP P2P ecosystem that has also run its own retro round to fund those that have been providing value to this library. And in addition to that, it's not only ecosystems that can use this mechanism, but we've also seen local communities using this mechanism to incentivize specific types of behaviors. So one of the really nice things about reach arounds is that what you're doing is you're creating a feedback loop of behaviors that you're incentivizing that are going to enable your community or your ecosystem to get to a goal, right? So while we can see that for ecosystems this may be growing your TVL, making your users more sticky, or growing the number of your users, we can also see that local communities that are trying to onboard more people into Web3, that are trying to become more regenerative, can also use this mechanism to incentivize this type of positive behaviors, and people will know that this is something that is going to continue to happen, and this type of behavior will continue to be rewarded. As long as these retro rounds are run, there is this expectation that these rounds will continue to run. So we've also seen groups such as Dow Drops, which ran one retroactive round as such earlier last year, whom they were targeting was Ethereum contributors, regardless of which L2 or which ecosystem within the Ethereum ecosystem they were contributing to and what their type of contributions were. We've also seen local communities such as ETH Colombia and Ethereum Mexico host their own rounds. And we've also seen other educational communities such as CryptoVersidad also doing this type of experiments to incentivize more people in particular behaviors that they find relevant to their communities. And so as I mentioned before, reach arounds are mechanisms. And there's five steps that I usually look at when designing mechanisms. And that I usually look at when designing mechanisms. And that it's good to keep in mind. Because these affect, like the steps, and as we scope them, affect the rest of the process. And it's also important to think of it as multiple steps, because this means that you can change the things and the sub-mechanisms that are part of the mechanism design, or in this case, the retro mechanism. So first, we have funding, and when we're thinking of the retro rounds, sometimes you don't need to worry about funding. Sometimes you're optimistic and you just have funding. But if you're a local round or you're a local community that wants to host a round, you need to think about where am I going to get the funding for running this round? Not only to run the operations of this round, but also where am I going to get the funds to give out to the community because they've done the behaviors that I want to incentivize. Second is the design of the round. And when I talk about design, I'm not talking about the images that we're gonna use or the funds or the colors that we're gonna use, but who the audience is, what is it that I'm trying to incentivize. All of this work that leads up to the round. And we also have the data aggregation. So based on the design that you're using, based on the things that you're trying to incentivize, and based on your audience, you're going to be able to determine what are the type of metrics and data that I can use to measure if people have actually, people or projects have actually generated value to my ecosystem or to my community. This is the why, the how, and the what. Then third, we have the decision-making. So what we've seen so far has been groups of people in different sizes voting on who gets the funding. But there's other ways in which we can have this decision-making process take place. This could be two different groups of voting, groups that vote. You can have one group that votes, then people review the vote. They approve it or decline it. And this is why I wanted to call out there's this decision-making process, which can be its own mechanism. Then there's a disbursement process or disbursement mechanism, which can be you're streaming the tokens over a certain amount of time, or you're giving the lump sum. And then you have really important post-mortem analysis. You definitely want to look at how your round went to understand if you did the right things when you were designing your round. And then in this workshop, we're going to focus on the design because this is the most relevant part of your round. If you don't get the design for your round right, you won't know if you're actually achieving the goal that you wanted to achieve. therefore you won't know if you're using your resources in the most efficient and effective way to achieve your goal which means you're not going to be able to evaluate if the program that you just ran actually made a difference for your ecosystem or for your community and if you should even think of running it again so why are we going to focus on the design and the scope first is we community and if you should even think of running it again. So why are we going to focus on the design and the scope? First is we have a lot of participants and a lot of audiences that participate in a round. Each of them have different incentives, but this is a game that we're all playing together, and we want to make sure that we're aligning everyone's incentives so that we're able to get to the goal. So we want to be specific also around what the incentives are that is going to be essential for us to know if it worked. Second, it's also going to help us improve the stakeholder management. This can be a very time-intensive process. So we want to make sure that people know what they are supposed to do, when they are supposed to do the things, and why they are supposed to do these things. If we're asking them to vote, if we're asking them to review projects, if we're asking them to provide specific data, we need them to understand why they are doing these things so that the retro round itself can be successful. Number three is we want to mitigate risks through the design. We want to understand and recognize what are the unknowns in this design, what are the variables that we're unable to influence or that we're unable to control in these experiments, and we also want to know what could go wrong and if there is something that we can avoid, that we can do to avoid it going wrong. And the fourth one is we need to optimize our resources, specifically for local community rounds. I know that ecosystems tend to have a lot of funds and this is also what enables them to experiment with this type of mechanisms. But this may be something that local communities cannot afford. Because they are either being funded one by one or they have a more limited runway. Which means that you need to be able to pick your battles in terms of where you're allocating your funding. You need to be able to clearly identify where should you be putting your time, where should you be putting your funds, and where should you be directing the attention of your community when you're running this type of rounds. You want to identify what are the most high impact areas that you want to be focusing your initial or your first iterations of the rounds on. And in terms of incentives, as I mentioned before, with this type of mechanism, we're looking at not predicting. We're not trying to predict the future. We're trying to assess what has already been useful. So we want to align incentives. So the hypothesis here with this mechanism is that people out there will build what matters to this ecosystem or to this community. And once they've generated this value, they're going to be rewarded for it. But if we're not telling them properly, what is it that this ecosystem needs. But if we're not telling them properly what is it that this ecosystem needs or if that is not clear and it's not of common understanding for the community, it's going to be very hard to have people building the things that are actually doing meaningful work for the ecosystem. And for example this is something that we've seen in Optimism and that we've seen in some of the other retro rounds, in which there's a lot of work that is being done, but this is not exactly what is needed. Or we see multiple versions of the same thing being replicated and not being used by the target audiences that people are trying to save time or improve their work. And this stems from a lack of communication and a lack of clarity in terms of where is the ecosystem headed? What is actually needed? What is still going to be useful in three, four, five, six months from now? So if we give this information, either as local community or ecosystems to the audience like to the community members we're going to be able to get them excited of contributing to the ecosystem and know that they will be receiving a reward and we will avoid having to like Have them lose faith or hope in this mechanism or process by building something that is not going to be rewarded because it wasn't impactful or useful. And as I was mentioning, this predictability of having these rounds is going to create reliability. You want to have your builders or the members of your community know that the round, there's going to be multiple rounds. So they are going to continue behaving in this particular way that you've set. Through this mechanism, what we're doing is we're creating new behaviors. We're asking people to actively change what they do with the expectation of a future reward. And this change in behaviors happens over time. It's not going to happen from one day to another, and it's not going to be cemented only after one round. So it's also important that as we continue to iterate on these designs and on these rounds, we make sure that we're not completely disrupting the expectations from the participants, because it's going to make it really hard to keep incentivizing the behaviors that we want to see in the ecosystem. So I'm going to stop very quickly there, because now we're going to jump into the breakout groups. Does anyone have any questions so far on to reach arounds? No. Perfect. So next thing that we're going to do is we're going to design our own reach around. Yes. Can I project? Okay. All right. Testing. I'm just curious if you could share some more examples of retro rounds, and what are the differences in the round as you change what you're funding? Like if it's software or if it's governance or if it's something else. Thanks. Yes. So depending on what it is, on who your audience is, and what their capabilities are, for example, in terms of, ooh, perfect, in terms of generating different data that you want to aggregate for people to vote, depending on how much funding you have, depending on precisely the things that you're looking to fund and incentivize, you're going to have different, one, different types of communication that you're having with them. There's also going to be, as I mentioned, different types of data that you're going to be gathering. So, for example, I designed and ran the Ethereum Mexico round. What we were focusing on there was this is a geographically specific round, and what we were looking to incentivize were five different things in the community. We wanted people that were prioritizing onboarding others into the Web3 ecosystem. So we were looking at an increase on wallets, regardless of which chain this was happening in. So I think this is also going to be one of the main differences that you might see from local communities versus ecosystem-specific communities. Sorry, ecosystem-specific rounds, in which ecosystem-specific rounds will look to increase the value to their own ecosystems, which makes sense in that it is their funding, and they have goals that they need to achieve within their own ecosystem to be successful. So you'd be looking at more specific metrics such as, as mentioned, TVL, number of users, number of transactions, transaction size, or if it's libraries, number of projects that are using this library within my ecosystem. Then on the community rounds, what we were experimenting with in the Ethereum Mexico community was looking at number of people onboarded into Web3. We were also looking at reducing the gender gap. So number of projects that were specifically targeting onboarding women or supporting women in the Web3 space. We were also looking at refi communities in Mexico. So looking at how many had interacted with local regenerative projects, how many of them had onboarded local regenerative projects, or use cases in the refi space. We were also looking at education, so partnerships with universities. And these were some of the type of behaviors that we were trying to incentivize in the long term as well so for us let's say that an ecosystem would be pushing for a growth on number of users in their own ecosystem we were pushing for a number of new users or new attendees to Web3 events. Because our hypothesis, and this is also where the designing and understanding, well, what is your North Star and what is your goal, is really relevant. Because what we are trying to incentivize in the long term is we want to have more people educated and participating in the Web3 space. That would be one in terms of onboarding. Second, we want to see more women integrated into the space. So allocating funding to that as well. I would say those are two of the main difference that we've observed. I know as well that I believe Ethereum Colombia, one of the things that they were incentivizing as well is number of people running their own nodes. So how people are actively participating and securing Ethereum. But those are some of the things that are stark differences. And I think also another thing that is very dependent on whether it's an ecosystem or a local community thing is local communities are very different from one another in terms of the things that are needed in one country or one city are going to be very different from what's needed in a different country or a different city. And how we framed as well the round that Ethereum Mexico ran is we want to fund the edges where impact is happening. And this impact might not be clearly understood by someone that is very far away. So say even if this is impact that they are onboarding everyone into optimism, this might be very difficult for optimism to recognize from such a far away point of like, oh, why is this relevant in Mexico in this type of scenario? But this is something that local communities, if they are the ones running the rounds, if they are the ones designing and determining what is impactful in this specific situation or in this specific reality, they're going to be better able to identify and understand why this was impactful and why it should be rewarded. So I also think that one of the big differences is how close you are to the impact that is being generated so that people are able to identify it and fund it. That's a very broad spectrum. Hello. So I have a question. I had a question about how you can measure the post-mortem analysis. And I was thinking, like, should that be done? I was thinking about how an ecosystem that runs retro-PGF rounds can make the post-mortem analysis, and I was thinking this is a whole design space on how you can do that, how you can measure how much you incentivize the project from the previous round compared to the next round after one year. So, yeah, my question is how, for example, the analysis of previous Optimism rounds happened. Perfect. So for each of the optimism rounds, there has been hypotheses that are set at the beginning of the rounds in terms of lessons learned from the previous rounds. with, it's sort of like, we are, say, now we're going for impact metrics-based voting, and we're doing this because we expect that people are going to have an easier time deciding which type of metric is more relevant to them. They won't have to, we're going to be reducing time because they're not be reviewing each of the projects. And then once the voting comes to an end and allocations are done, there's usually also an exit survey that is sent. And all of this data is then aggregated to understand, do we, I think this one has been more of a sentiment analysis, and like, do we feel confident that the allocation was done better in comparison to round three. There's still room, a lot of room for improvement, to be quite honest, in terms of analyzing the postmortem of the rounds, specifically when it comes to the impact that funding has had on the projects. I think for some of the retro rounds, this is still to be done. And there are some, I think Open Source Observer has done some in terms of one year after Retrip BGF3 to have projects continue to build on optimism and continue to ship and generate value in the ecosystem. I think that's one good approach. Something else that I know, like Sejal and myself and others have been exploring is, can we create counterfactuals to understand if this injection of capital or injection of funding to the projects actually made a difference? If we were to look at this in the future, had they never received funding, would it still look the same? Which can help us understand if this actually makes a difference for this type of projects or in this type of situation. But I think that is still pretty nascent. And not a lot of it, yeah, I know that not a lot of it or none of it has been done yet. OK, perfect. has been done yet. Okay. Perfect. So... Okay. So we're going to get into breakout groups of three to four people. They can... Let's see. I think five can also be good. But if you can just get together. So... Perfect. I think we have three here. CJAL is going to help us as well in terms of putting together the groups. Do you want to get together? Hello. Perfect. Do you have things to write with? Yeah. Okay. Perfect. And what we're going to be doing is we're going to go through a set of prompts. And the first one is we're going to go through a set of prompts. And this is going to be the process through which we design a retroactive round. You can choose whichever, whether you want to design a retroactive round. You can choose whichever, whether you want to design it for a community. You can choose whether you want to design it for an ecosystem. For example, I see the Gitcoin team here. Maybe they want to design a retro round for members in their ecosystem. I see different people from LATAM communities over there. Maybe they want to design it for a Latin American community. Something that Sejal and I were also brainstorming earlier on this workshop is someone, and it might be an experiment that Sejal and I do right now as well, is what if we were to design a reach around for DEF CON? So anyone can take that prompt up as well. And so the first prompt is we're going to ask you to introduce yourselves in your group in case you don't know each other. Please make it short because we are going to have eight minutes for this first prompt. So introduce yourselves and choose your adventure. Choose who are you designing this retro round for. Is it for a local community? Is it for an ecosystem? And just agree with the other members of your team which audience you're looking to engage with. Is it going to be, again, a local community? Is it going to be an ecosystem? And if you have any questions throughout this process, please raise your hand and Sejal or myself will walk to you and answer any questions that there may be. We're going to have eight minutes for this. So please introduce yourself, share why you're in this workshop, and if you're part of a community or an ecosystem, and then decide who are you designing this round for. Go. Yes. If you have any questions, just raise your hands and Sejal or myself will come. I feel like I'm focusing on something. Oh, yeah. I'm going to take a picture. Can we focus in Mary? Can we carry on something? I feel like I'm focusing on something. I feel like I'm focusing on something. Thank you. ¶¶ Thank you. Hello? Thank you. Thank you. Thank you. Terima kasih telah menonton! Thank you. Thank you. We have one minute left, so please come to a decision of what adventure you're going to follow for the next prompt ¶¶ And time is up. So please, I hope you've written down either in one of the post-it notes or somewhere else. What is the adventure that you're going to follow if you're going to be designing this retro round for a local community, or if you're thinking of it as a Web3 ecosystem or Ethereum as a whole or DEF CON. So the next prompt that we're going to be looking at is based on your groups and based on who you've decided to design this round for, you will be defining what is the North Star that you want to achieve. And here is what the North Star is. What is the end goal that we want to reach? What is the end goal that you want to reach? What is the end goal that you want to reach within this community? What is the end goal that you want to reach within this ecosystem? And it's important as I mentioned before to know this because if we don't know where we're going then we're not going to be able to tell if we've gotten there or how we're gonna get there. And this means we're not going to be able to measure what's the type of data and what are the type of metrics that would be useful for us to leverage when we're deciding who should be funded. And we're also not going to know what are the type of behaviors that we want to incentivize for people to follow that are going to get us closer to this hypothetical goal. And I will incite you as well to think of this not as a short-term goal. It could be a short-term goal, but it's better to think of it as a long-term goal. Like if you're doing this for a local community, how do you want this community to look 50 years from now, 60 years from now? What is this like bright future that we're aiming for? And as I mentioned before, you can have it be a one-year goal. I know that a lot of the ecosystems plan in more short term because it makes sense for what they're trying to do. But try to aim for this long-term vision. Even if you're an ecosystem, we want to play infinite games, which means we're incentivizing behaviors that are going to be healthy for what we're trying to achieve in the long run. And so the second prompt is define with your team members what is the North Star that you're going to be fulfilling for this either local community or ecosystem. What is the overarching? And here are some questions that will help you in defining this North Star. And here, feel free to use the post-it notes. And remember, one idea per post-it note. That's going to make it a lot easier when you're looking at the different ideas that you have. What is the overarching objective that you hope to achieve by running this round as well? Not only this round, but like in the long term. And how does this round, how can this line around with this overarching goal? Then what is the long-term vision that you have for your community or for your ecosystem and what are going to be your goals for this round? And so for this one we're gonna have about 10 to 12 minutes. I think it might be too much but we'll be going around and if it is too much we'll just cut it short so go ¶¶ Thank you. Vielen Dank. Thank you. Yeah, and we just had a really good question on whether it's like, oh, should we just have one North Star? What I would suggest is each of you write what are some of the things that would be very relevant to have in this round. Write it in a Post-it note, and then you share them, like you paste them in the middle for everyone else to see, and then people can either agree or disagree with the things that you've identified are valuable for there to be, or that could be the North Star that you're going after. Thank you. Let's pray. Amen. Thank you. There we go. And I think it's a flex. so Thank you. © transcript Emily Beynon I love you. Thank you. We have one more minute. So please start gathering your ideas and choose what the North Star is going to be for your round. We're done. And we're up to the next prompt. So now you've already defined what is going to be the North Star for your community or your ecosystem. And the thing that you're going to have in mind as you're designing the rest of the steps of the richer round. Next is we're going to identify who are the audiences that are involved in these richer rounds. Who are, one, the projects or people that you expect to fund through this round, it's going to be either the projects that are applying or if you're not asking them to apply, you already have a way in which you identify who's going to be eligible to participate in this round. Who are they? What do they look like? Are they individuals? Are they individuals? Are they groups? Second, as I mentioned before, most of the retro rounds as we've seen them is people vote on who gets allocated the funds. So who are going to be your voters? Is it going to be everyone that belongs to the community? Is it going to be a selected group of people that are very and have a lot of high context into your ecosystem. And third is you're also going to identify who is benefiting from this overall impact that the people that you're funding or the projects that you're funding are generating. Who are the end users or end receivers of the impact that is being generated? And map out who these audiences are so that you can understand what are their incentives, what are they trying to achieve, whether it is within your ecosystem or within your community, but what moves them forward and what are they trying to achieve so that you can then continue to design something that will ensure that you're aligning their incentives with the incentives that you're trying to achieve through this round. And here there are some questions to guide you through thinking through this. The first one is, oops, this is not, OK. So apparently this slide is wrong. But here it is, as I mentioned before, identifying who's the audience in terms of who are benefiting from the impact that's been generated, who are the people that would be voting in year round, and who are the projects or people that would be receiving funding through this round. And for this, we are going to give eight minutes. Let's start. Vielen Dank. Thank you. Amen. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. And And the time is up for this next prompt. And we're going to move to the next prompt in which we are going to be looking at setting behaviors and defining what these behaviors are. So what we are, as I had mentioned before, in these retro rounds, what we're doing is we're incentivizing very specific behaviors that we want to see, that we have a loose idea that are going to take us to this North Star, that are going to enable us to get to these goals. So for this next prompt, what you're going to be doing is you're going to be thinking about what are the type of behaviors that you want to see, the type of benefits, the type of recognition that you want people to have or you think people will appreciate from participating in this type of actions that are going to get you closer to the North Star. So think also about what are some of the milestones that are in between where you are right now and getting to the North Star so that you can identify what are the type of behaviors that I'm trying to incite for people to do that are going to get me closer. If you're looking at an ecosystem round, maybe this behavior is contributing to the open source code. Maybe this behavior looks like deploying smart contracts and attracting new users into my ecosystem. If we're thinking of a local round, if one of the things that you're incentivizing or you value the most is, say, protecting the forest, maybe the type of behavior that you're looking for is planting trees or protecting trees that are already in a specific place, taking care of the trees. These are protecting trees that are already in a specific place, taking care of the trees. These are the type of behaviors that you might want to incentivize that people have or do as you're going through, and that is going to be rewarded at the end of the round. So we're actually going to give, I know it says 12 minutes, but we're only going to give eight minutes for this part as well. And you can start now. And try to be as specific as possible when you're thinking of the behaviors that people are going to have. Because this part is going to help us to identify what is the data and what are the metrics that we can use to measure the type of behaviors that people have already completed and that we could reward in the round. So again, sort of like if we are looking to protect the, like if it's a local community and what they want is to preserve a particular area, then maybe we're going to be looking at planting trees or having a more healthy ecosystem. And then how do we define what a healthy ecosystem looks like? Is it the number of trees that are in a specific space? Is it the type of behavior that people are having when going around? So this is what we're going to be looking at. Thank you. Thank you. ¶¶ ¶¶ Thank you. um ¶¶ Thank you. Gracias. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay, we have one more minute before we go to the last prompt. Okay. We're going to move on to the last prompt. Since both, since the groups are already done with this prompt. And as I mentioned to both of the teams, the way in which you're designing your round is going to inform the evaluation process that that you're able to already extract from existing sources or that you need to create. The best thing there is to create it in a standardized way so that people can cross-compare it amongst different projects. So for the last prompt, we're going to be looking at data and metrics for this evaluation. And there's two actions here that you're going to be looking into. Or questions that you can use to guide your process through. The first one is, now you know, based on the behaviors that you want to see happen from these different audiences, what you're aiming for, and what are the potential actions that the members or people or projects can undertake to get to the North Star, how can you measure these actions? Like over here, we were talking about, oh, we want to have more developers building in this particular ecosystem. And we talked about developing looking like launching their own smart contracts. It could look like contributions into the githubs and into the repos from the different libraries or tools that are relevant to the ecosystem. Then you can also explore, are there already existing data sources for this information? And if there are not existing data sources for it, how can we measure this in a standardized way? And we're going to give three minutes for that. And then we'll start wrapping up. Thank you. Gracias. Thank you. Is it moving forward? We have one more minute left. Okay, so you were talking about . GitHub repo commits, stuff like that. And we were talking about maybe that's spammable, it's not good, and maybe we should look at the quality of the codes. And I was like, how do you even standardize that? Is it all right? It's very subjective. It's the same level we have in OPP. Exactly, no, but it is subjective. I think the good thing, though, is how can we do subjective evaluations to become as most objective as possible? And that can look by, like, for example, if it's very, very subjective, maybe you have five different things within the code that you're reading. And then that helps, like, reduce the subjectivity of it. But I'm reviewing it to a degree. No, you're welcome. Perfect. So we're coming to the end. And I think now what would be really interesting, this was the last prompt. And so as I mentioned, this was a taste of what the design phase looks like when we're looking into mechanism design. The second, there's several other steps that would follow, but right now we only focus on the design space of the problem. And so we're going to just ask real quick the two teams if you can share what your round, whom you were designing this round for, what is the type of incentives that you wanted to generate, what was going to be rewarded and who the audiences are. We have very limited time, so if you can explain it in like one minute or two, that would be great. Hello. So retrofinding the round is for ecosystem. Specifically, it's unique swap ecosystem. And the round we were thinking about is the hook round that they just released. So the goal would be to increase the builder that build on the Uniswap hook ecosystem, increase the numbers of high-quality hooks, or build hook tools, making it easier for developers to work on it, and also like onboard builders on the ecosystem. Some of the audience would include Web3 builders, educators who help and also the benefit, this will benefit like the Web3 developers, Uniswap users, Uniswap token holders. And who will vote on this? We're still debating on this, but we think probably like the Uniswap token holders. But yeah. Yes, so the behavior we wanna see is we want them to build on the Hoke ecosystem, them to deploy the smart contracts, and also we want some educators, content creators to create content for Hoke to spread the information to help onboard people and also yeah and people to participate in voting and stuff like that perfect thank you and so please I know we're missing the last group but I don't think we have time for that anymore as we're well just make it really really really brief. We still have two minutes, but this is the QR code for the group. Please feel free to join as well, and we can continue exploring what these designs look like. We designed retrofunding, retroactivity funding to incentivize more women developers to onboard to Web3. que nos incentiva a crear o tener más workshops para esta audiencia específicamente. Y la audiencia o el financiamiento serán de personas que pueden ser individuales y también comunidades desarrolladoras. Y los comportamientos que estamos buscando son, primero, communities. And the behaviors that we are looking for is first onboarding more women with workshops, focus on them. And also more women contributing to open source projects. In the milestone, we have increased women attendance by 25% in six months. And in Greece, and then have more women meetups during a year. Okay. Thank you. Awesome. Thank you. So please feel free to share the designs in the Telegram group. Thanks a lot. Sejal and I were really happy to host this workshop, and we're excited to see more people approaching mechanism design in a more methodological way so that we can have better outcomes for how we're allocating funding in this space. Thank you.", "eventId": "devcon-7", - "slot_start": 1731655800000, - "slot_end": 1731661200000, + "slot_start": 1731571200000, + "slot_end": 1731576600000, "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/14eqnMC0_aJ3IguPD2egqY1ojHSZRxc4QPo5D4RhCje8", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1GTU723iYMOTD9COHjYQdSKNFi7gSZc88-BnP7Co9jE4", + "resources_slides": "", "speakers": [ - "ahmad-bitar", - "anshu-jalan" + "launamu", + "sejal-rekhan" ] }, "vector": [ @@ -231205,11 +230590,12 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -231948,27 +231334,11 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, 0, + 6, 0, 0, 0, @@ -231991,31 +231361,9 @@ 0, 0, 0, - 2, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, - 2, 0, 0, 0, @@ -232061,7 +231409,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -232093,6 +231440,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -232126,6 +231474,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -232191,6 +231540,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -232250,6 +231600,35 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -232512,7 +231891,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -232523,6 +231901,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -232534,56 +231914,49 @@ }, { "session": { - "id": "designing-and-launching-a-retroround-incentivize-what-matters", - "sourceId": "39AVKD", - "title": "Designing and launching a RetroRound - Incentivize what matters", - "description": "Learn how to design, develop and launch a retroactive funding round. In this workshop we’ll explore the differences, similarities and best practices for running a local and ecosystem RetroRound. Participants will be able to set clear goals, define impactful behaviors to be incentivized, scope technical roadmaps, and formulate a sustainable strategy to fund public goods. Ideal for emerging markets community leaders and web3 Ecosystems looking for new resilient and diverse funding strategies.", - "track": "Coordination", - "type": "Workshop", - "expertise": "Beginner", + "id": "designing-conditional-markets-and-futarchy", + "sourceId": "EWJNVJ", + "title": "Designing Conditional Markets and Futarchy", + "description": "Conditional markets allow predicting outcomes from potential decisions, enabling what is called futarchy governance, but key design questions remain open. We'll examine specific challenges: aligning founders with investors in protocols, encouraging meaningful participation in decentralized governance, and integrating futarchy modules into existing governance systems.", + "track": "Cryptoeconomics", + "type": "Talk", + "expertise": "Intermediate", "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "RPGF", - "Quadratic Voting", - "Public good", - "Design", - "Mechanism design", - "program", - "grants", - "Mechanism design", - "Public good", - "Quadratic Voting", - "RPGF" + "market", + "prediction", + "DAO", + "Futarchy", + "Public good" ], "keywords": [ - "Emerging markets", - "Grant Program Design" + "Prediction", + "markets" ], - "duration": 5442, + "duration": 1519, "language": "en", - "sources_swarmHash": "5dd5ebab804d94005464c04cc83b0393d5fe0c7517d4ff7e86c54e1149ab100e", - "sources_youtubeId": "Ugxag4KRdds", + "sources_swarmHash": "5e074344751472dc78fed2d0f7259d0d3f840e54a5bd75116712a1e19a7cdcc9", + "sources_youtubeId": "iEjrdYReNnc", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735ccd29dbb7a90e1c675d9", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ccd29dbb7a90e1c675d9.vtt", - "transcript_text": " Hi everyone. For all of those of you joining for the Designing a Retro Round workshop, please, we're going to ask you to seat according to which group you belong to. So the exercise that we're going to be doing in this workshop is, I'm not sure how many of you are familiar with retro rounds, retroactive funding rounds. Yes, I see some hands going up. If you're part of an ecosystem, like you are a protocol or you want to design a round to incentivize growth or different things in an ecosystem, we're going to ask you to please sit on the left side of this area. Everyone, please try to come over to the middle. If you are looking to design around for a community, say you're a local community and what you're trying to incentivize is growth or different goals within your local, geographically local community or community of a different type, we're going to ask you to sit on the right side. And this is because we're going to be making... You're right. So here. Yeah. Perfect. So communities over here, ecosystems on this side. And this is because we're going to have breakout groups. So this is going to be a very hands-on workshop. You'll be working or collaborating with two or three other people that are here to go through the process of designing a reach around. And it's best if you are sitting or partnered with those that are on the same wavelength of who your audiences are going to be, what are the type of values or behaviors that you want to incentivize. So that is why. Please, ecosystems to the left and communities to the right. It's your right. . Yeah, ecosystems is on the left, which is your left. So if you're, I'm sorry, I think... Oh, that's more... So ecosystems to the left and communities to the right. We're going to be handing out some post-it notes so that it's easier for you to brainstorm. Usual advice from workshops, one idea per post-it note is going to make it a lot easier to know what to do with these different ideas. Okay. Come in here. To the right. And I think this will also come clear and happy to go over any questions around seeding when we get to the breakout groups. Perfect. So before diving right into it, how many of you know what retroactive funding rounds are? Can I? There we go. Yes? Yes. Can I get a hands up from those of you that know what a retro round is? If not, because I'm just going to walk through it. Perfect. So one of the reasons why we've been experimenting with Retour Rounds as of late is the fact that it is meant to be easier than running a proactive grants program. In a proactive grants program, what you're trying to do basically is predict the future. You're trying to understand if people are gonna do what they say and promise that they are gonna do. You're trying to predict if this is still going to be relevant to your ecosystem or to your community by the time it is done. And you're trying to assess as well if the team that is doing it is going to be capable of doing it. So basically, we're trying to fulfill the role of a VC when we're not VCs, when we don't have that information, when we don't have the tools that VCs have. And this is why, or through this logic, this other mechanism came up and Vitalik came up with it, or right about it. Actually, not came up with it, but right about it. And the most relevant thing here is that in reacharounds, we expect that it's easier to agree on what was useful, rather than trying to predict what is going to be useful. And so retroactive rounds are a funding mechanism. You might have heard them as retroactive rounds, which is the latest rebranding that Optimism has given to this funding mechanism. But previously, and I think it's more widely known as RetroPGF, Retroactive Public Goods Funding Rounds. And this is a funding mechanism that has two sides of it. The first one is we have a results oracle. This results oracle can be a group of people. It can be a smart contract. But right now, all of the forms in which we've seen it have been groups of people that reward projects that they recognize as having already provided value, either to an ecosystem or to a community. However, we know that the design of this oracle is actually a complicated thing. So what we've seen as well, and what is expected of these retroactive rounds, is that we have them running over and over again because we're experimenting with the design of these rounds, which is also what we're going to be exploring today in this workshop. We're not expecting to have the best design from the get-go. We know that we're going to learn and get closer to this best design in an iterative way. We're going to be trying out different hypotheses. We're going to be trying out different variables. We will be engaging different audiences. And through that process, we expect to get to a better outcome of how these allocations are done. And I will mention that retroactive funding is not a new thing. This is not something that was invented in the Web3 space. We actually have institutions that have been running this type of funding rounds for really long times. And maybe I'm not sure who knows what this is, but this is the Nobel Prize, or like the insignia that is given as a Nobel Prize. And the Nobel Prize is one of the best examples of a retroactive funding program. People that are granted Nobel Prizes are people that have done research usually like decades ago and it's only until it has been proven that this research has been really meaningful to society that they get awarded a cash prize and a lot of other things that come with being a noble laurelate. But it's a similar experiment that we're running with retroactive rounds, in which we're looking at what has yielded a very valuable contribution to an ecosystem or to a community, and we want to reward that. Because we want to incentivize more people to have this type of behaviors of contributing to the communities or to the ecosystems. So one of the things that I suggest for all of those thinking of running retroactive rounds is this is an iterative process and it takes time. It is resource intensive. So don't try to run. It's not going to be perfect at the first time. It's going to require several iterations and you will need to take your time and going through this process. So this is a mechanism that was made popular by Optimism. Optimism has run already six rounds. We are currently, or we just closed, I think one day ago, the latest sixth round. Optimism, for those of you that don't know it, is an L2 that has committed to this funding model from inception. From the moment in which Optimism minted its token, they decided that the fees generated by the sequencer were going to be directed to this mechanism and they are going to be used to fund public goods and to fund projects that are providing an impact or benefits to the ecosystem. They've been also pioneering in the experimentation of this mechanism. So while they started with a small round back in 2021, in which they gave out about 1 million USD, and had a small subset of voters, this has continued to grow and evolve throughout the years. Their largest round was in 2023, in which they allocated about 30 million OP through different categories, and they had a set of about 140 voters. They've also been experimenting with other type of rounds like the latest fourth round in which they used voting style called metrics, like impact metrics, people voted and impact metrics instead of projects, but it's been going back and forth in between voting for projects or for metrics. And this is something that they've committed 850 million OP throughout the rest of the life. And this is only what was allocated at the inception of the token. There's also all of the funds that are being generated through sequencer fees that are going to continue with this, that are meant to continue this mechanism or funding this mechanism. It's important to note, though, that Optimism is not the only ecosystem running retro rounds. There's already been several rounds run by other ecosystems, such as Filecoin, that is currently running its second retro round. There's also Pocket, which ran its own round at the meet this year. Interestingly, also using the funds that they received from the Optimism Reacher round and funding that they received from Arbitrum to reward its own contributors. There's also the Celo ecosystem that ran a round earlier this year and is looking to run another round at the end of this year. And the LEAP P2P library or LEAP P2P ecosystem that has also run its own retro round to fund those that have been providing value to this library. And in addition to that, it's not only ecosystems that can use this mechanism, but we've also seen local communities using this mechanism to incentivize specific types of behaviors. So one of the really nice things about reach arounds is that what you're doing is you're creating a feedback loop of behaviors that you're incentivizing that are going to enable your community or your ecosystem to get to a goal, right? So while we can see that for ecosystems this may be growing your TVL, making your users more sticky, or growing the number of your users, we can also see that local communities that are trying to onboard more people into Web3, that are trying to become more regenerative, can also use this mechanism to incentivize this type of positive behaviors, and people will know that this is something that is going to continue to happen, and this type of behavior will continue to be rewarded. As long as these retro rounds are run, there is this expectation that these rounds will continue to run. So we've also seen groups such as Dow Drops, which ran one retroactive round as such earlier last year, whom they were targeting was Ethereum contributors, regardless of which L2 or which ecosystem within the Ethereum ecosystem they were contributing to and what their type of contributions were. We've also seen local communities such as ETH Colombia and Ethereum Mexico host their own rounds. And we've also seen other educational communities such as CryptoVersidad also doing this type of experiments to incentivize more people in particular behaviors that they find relevant to their communities. And so as I mentioned before, reach arounds are mechanisms. And there's five steps that I usually look at when designing mechanisms. And that I usually look at when designing mechanisms. And that it's good to keep in mind. Because these affect, like the steps, and as we scope them, affect the rest of the process. And it's also important to think of it as multiple steps, because this means that you can change the things and the sub-mechanisms that are part of the mechanism design, or in this case, the retro mechanism. So first, we have funding, and when we're thinking of the retro rounds, sometimes you don't need to worry about funding. Sometimes you're optimistic and you just have funding. But if you're a local round or you're a local community that wants to host a round, you need to think about where am I going to get the funding for running this round? Not only to run the operations of this round, but also where am I going to get the funds to give out to the community because they've done the behaviors that I want to incentivize. Second is the design of the round. And when I talk about design, I'm not talking about the images that we're gonna use or the funds or the colors that we're gonna use, but who the audience is, what is it that I'm trying to incentivize. All of this work that leads up to the round. And we also have the data aggregation. So based on the design that you're using, based on the things that you're trying to incentivize, and based on your audience, you're going to be able to determine what are the type of metrics and data that I can use to measure if people have actually, people or projects have actually generated value to my ecosystem or to my community. This is the why, the how, and the what. Then third, we have the decision-making. So what we've seen so far has been groups of people in different sizes voting on who gets the funding. But there's other ways in which we can have this decision-making process take place. This could be two different groups of voting, groups that vote. You can have one group that votes, then people review the vote. They approve it or decline it. And this is why I wanted to call out there's this decision-making process, which can be its own mechanism. Then there's a disbursement process or disbursement mechanism, which can be you're streaming the tokens over a certain amount of time, or you're giving the lump sum. And then you have really important post-mortem analysis. You definitely want to look at how your round went to understand if you did the right things when you were designing your round. And then in this workshop, we're going to focus on the design because this is the most relevant part of your round. If you don't get the design for your round right, you won't know if you're actually achieving the goal that you wanted to achieve. therefore you won't know if you're using your resources in the most efficient and effective way to achieve your goal which means you're not going to be able to evaluate if the program that you just ran actually made a difference for your ecosystem or for your community and if you should even think of running it again so why are we going to focus on the design and the scope first is we community and if you should even think of running it again. So why are we going to focus on the design and the scope? First is we have a lot of participants and a lot of audiences that participate in a round. Each of them have different incentives, but this is a game that we're all playing together, and we want to make sure that we're aligning everyone's incentives so that we're able to get to the goal. So we want to be specific also around what the incentives are that is going to be essential for us to know if it worked. Second, it's also going to help us improve the stakeholder management. This can be a very time-intensive process. So we want to make sure that people know what they are supposed to do, when they are supposed to do the things, and why they are supposed to do these things. If we're asking them to vote, if we're asking them to review projects, if we're asking them to provide specific data, we need them to understand why they are doing these things so that the retro round itself can be successful. Number three is we want to mitigate risks through the design. We want to understand and recognize what are the unknowns in this design, what are the variables that we're unable to influence or that we're unable to control in these experiments, and we also want to know what could go wrong and if there is something that we can avoid, that we can do to avoid it going wrong. And the fourth one is we need to optimize our resources, specifically for local community rounds. I know that ecosystems tend to have a lot of funds and this is also what enables them to experiment with this type of mechanisms. But this may be something that local communities cannot afford. Because they are either being funded one by one or they have a more limited runway. Which means that you need to be able to pick your battles in terms of where you're allocating your funding. You need to be able to clearly identify where should you be putting your time, where should you be putting your funds, and where should you be directing the attention of your community when you're running this type of rounds. You want to identify what are the most high impact areas that you want to be focusing your initial or your first iterations of the rounds on. And in terms of incentives, as I mentioned before, with this type of mechanism, we're looking at not predicting. We're not trying to predict the future. We're trying to assess what has already been useful. So we want to align incentives. So the hypothesis here with this mechanism is that people out there will build what matters to this ecosystem or to this community. And once they've generated this value, they're going to be rewarded for it. But if we're not telling them properly, what is it that this ecosystem needs. But if we're not telling them properly what is it that this ecosystem needs or if that is not clear and it's not of common understanding for the community, it's going to be very hard to have people building the things that are actually doing meaningful work for the ecosystem. And for example this is something that we've seen in Optimism and that we've seen in some of the other retro rounds, in which there's a lot of work that is being done, but this is not exactly what is needed. Or we see multiple versions of the same thing being replicated and not being used by the target audiences that people are trying to save time or improve their work. And this stems from a lack of communication and a lack of clarity in terms of where is the ecosystem headed? What is actually needed? What is still going to be useful in three, four, five, six months from now? So if we give this information, either as local community or ecosystems to the audience like to the community members we're going to be able to get them excited of contributing to the ecosystem and know that they will be receiving a reward and we will avoid having to like Have them lose faith or hope in this mechanism or process by building something that is not going to be rewarded because it wasn't impactful or useful. And as I was mentioning, this predictability of having these rounds is going to create reliability. You want to have your builders or the members of your community know that the round, there's going to be multiple rounds. So they are going to continue behaving in this particular way that you've set. Through this mechanism, what we're doing is we're creating new behaviors. We're asking people to actively change what they do with the expectation of a future reward. And this change in behaviors happens over time. It's not going to happen from one day to another, and it's not going to be cemented only after one round. So it's also important that as we continue to iterate on these designs and on these rounds, we make sure that we're not completely disrupting the expectations from the participants, because it's going to make it really hard to keep incentivizing the behaviors that we want to see in the ecosystem. So I'm going to stop very quickly there, because now we're going to jump into the breakout groups. Does anyone have any questions so far on to reach arounds? No. Perfect. So next thing that we're going to do is we're going to design our own reach around. Yes. Can I project? Okay. All right. Testing. I'm just curious if you could share some more examples of retro rounds, and what are the differences in the round as you change what you're funding? Like if it's software or if it's governance or if it's something else. Thanks. Yes. So depending on what it is, on who your audience is, and what their capabilities are, for example, in terms of, ooh, perfect, in terms of generating different data that you want to aggregate for people to vote, depending on how much funding you have, depending on precisely the things that you're looking to fund and incentivize, you're going to have different, one, different types of communication that you're having with them. There's also going to be, as I mentioned, different types of data that you're going to be gathering. So, for example, I designed and ran the Ethereum Mexico round. What we were focusing on there was this is a geographically specific round, and what we were looking to incentivize were five different things in the community. We wanted people that were prioritizing onboarding others into the Web3 ecosystem. So we were looking at an increase on wallets, regardless of which chain this was happening in. So I think this is also going to be one of the main differences that you might see from local communities versus ecosystem-specific communities. Sorry, ecosystem-specific rounds, in which ecosystem-specific rounds will look to increase the value to their own ecosystems, which makes sense in that it is their funding, and they have goals that they need to achieve within their own ecosystem to be successful. So you'd be looking at more specific metrics such as, as mentioned, TVL, number of users, number of transactions, transaction size, or if it's libraries, number of projects that are using this library within my ecosystem. Then on the community rounds, what we were experimenting with in the Ethereum Mexico community was looking at number of people onboarded into Web3. We were also looking at reducing the gender gap. So number of projects that were specifically targeting onboarding women or supporting women in the Web3 space. We were also looking at refi communities in Mexico. So looking at how many had interacted with local regenerative projects, how many of them had onboarded local regenerative projects, or use cases in the refi space. We were also looking at education, so partnerships with universities. And these were some of the type of behaviors that we were trying to incentivize in the long term as well so for us let's say that an ecosystem would be pushing for a growth on number of users in their own ecosystem we were pushing for a number of new users or new attendees to Web3 events. Because our hypothesis, and this is also where the designing and understanding, well, what is your North Star and what is your goal, is really relevant. Because what we are trying to incentivize in the long term is we want to have more people educated and participating in the Web3 space. That would be one in terms of onboarding. Second, we want to see more women integrated into the space. So allocating funding to that as well. I would say those are two of the main difference that we've observed. I know as well that I believe Ethereum Colombia, one of the things that they were incentivizing as well is number of people running their own nodes. So how people are actively participating and securing Ethereum. But those are some of the things that are stark differences. And I think also another thing that is very dependent on whether it's an ecosystem or a local community thing is local communities are very different from one another in terms of the things that are needed in one country or one city are going to be very different from what's needed in a different country or a different city. And how we framed as well the round that Ethereum Mexico ran is we want to fund the edges where impact is happening. And this impact might not be clearly understood by someone that is very far away. So say even if this is impact that they are onboarding everyone into optimism, this might be very difficult for optimism to recognize from such a far away point of like, oh, why is this relevant in Mexico in this type of scenario? But this is something that local communities, if they are the ones running the rounds, if they are the ones designing and determining what is impactful in this specific situation or in this specific reality, they're going to be better able to identify and understand why this was impactful and why it should be rewarded. So I also think that one of the big differences is how close you are to the impact that is being generated so that people are able to identify it and fund it. That's a very broad spectrum. Hello. So I have a question. I had a question about how you can measure the post-mortem analysis. And I was thinking, like, should that be done? I was thinking about how an ecosystem that runs retro-PGF rounds can make the post-mortem analysis, and I was thinking this is a whole design space on how you can do that, how you can measure how much you incentivize the project from the previous round compared to the next round after one year. So, yeah, my question is how, for example, the analysis of previous Optimism rounds happened. Perfect. So for each of the optimism rounds, there has been hypotheses that are set at the beginning of the rounds in terms of lessons learned from the previous rounds. with, it's sort of like, we are, say, now we're going for impact metrics-based voting, and we're doing this because we expect that people are going to have an easier time deciding which type of metric is more relevant to them. They won't have to, we're going to be reducing time because they're not be reviewing each of the projects. And then once the voting comes to an end and allocations are done, there's usually also an exit survey that is sent. And all of this data is then aggregated to understand, do we, I think this one has been more of a sentiment analysis, and like, do we feel confident that the allocation was done better in comparison to round three. There's still room, a lot of room for improvement, to be quite honest, in terms of analyzing the postmortem of the rounds, specifically when it comes to the impact that funding has had on the projects. I think for some of the retro rounds, this is still to be done. And there are some, I think Open Source Observer has done some in terms of one year after Retrip BGF3 to have projects continue to build on optimism and continue to ship and generate value in the ecosystem. I think that's one good approach. Something else that I know, like Sejal and myself and others have been exploring is, can we create counterfactuals to understand if this injection of capital or injection of funding to the projects actually made a difference? If we were to look at this in the future, had they never received funding, would it still look the same? Which can help us understand if this actually makes a difference for this type of projects or in this type of situation. But I think that is still pretty nascent. And not a lot of it, yeah, I know that not a lot of it or none of it has been done yet. OK, perfect. has been done yet. Okay. Perfect. So... Okay. So we're going to get into breakout groups of three to four people. They can... Let's see. I think five can also be good. But if you can just get together. So... Perfect. I think we have three here. CJAL is going to help us as well in terms of putting together the groups. Do you want to get together? Hello. Perfect. Do you have things to write with? Yeah. Okay. Perfect. And what we're going to be doing is we're going to go through a set of prompts. And the first one is we're going to go through a set of prompts. And this is going to be the process through which we design a retroactive round. You can choose whichever, whether you want to design a retroactive round. You can choose whichever, whether you want to design it for a community. You can choose whether you want to design it for an ecosystem. For example, I see the Gitcoin team here. Maybe they want to design a retro round for members in their ecosystem. I see different people from LATAM communities over there. Maybe they want to design it for a Latin American community. Something that Sejal and I were also brainstorming earlier on this workshop is someone, and it might be an experiment that Sejal and I do right now as well, is what if we were to design a reach around for DEF CON? So anyone can take that prompt up as well. And so the first prompt is we're going to ask you to introduce yourselves in your group in case you don't know each other. Please make it short because we are going to have eight minutes for this first prompt. So introduce yourselves and choose your adventure. Choose who are you designing this retro round for. Is it for a local community? Is it for an ecosystem? And just agree with the other members of your team which audience you're looking to engage with. Is it going to be, again, a local community? Is it going to be an ecosystem? And if you have any questions throughout this process, please raise your hand and Sejal or myself will walk to you and answer any questions that there may be. We're going to have eight minutes for this. So please introduce yourself, share why you're in this workshop, and if you're part of a community or an ecosystem, and then decide who are you designing this round for. Go. Yes. If you have any questions, just raise your hands and Sejal or myself will come. I feel like I'm focusing on something. Oh, yeah. I'm going to take a picture. Can we focus in Mary? Can we carry on something? I feel like I'm focusing on something. I feel like I'm focusing on something. Thank you. ¶¶ Thank you. Hello? Thank you. Thank you. Thank you. Terima kasih telah menonton! Thank you. Thank you. We have one minute left, so please come to a decision of what adventure you're going to follow for the next prompt ¶¶ And time is up. So please, I hope you've written down either in one of the post-it notes or somewhere else. What is the adventure that you're going to follow if you're going to be designing this retro round for a local community, or if you're thinking of it as a Web3 ecosystem or Ethereum as a whole or DEF CON. So the next prompt that we're going to be looking at is based on your groups and based on who you've decided to design this round for, you will be defining what is the North Star that you want to achieve. And here is what the North Star is. What is the end goal that we want to reach? What is the end goal that you want to reach? What is the end goal that you want to reach within this community? What is the end goal that you want to reach within this ecosystem? And it's important as I mentioned before to know this because if we don't know where we're going then we're not going to be able to tell if we've gotten there or how we're gonna get there. And this means we're not going to be able to measure what's the type of data and what are the type of metrics that would be useful for us to leverage when we're deciding who should be funded. And we're also not going to know what are the type of behaviors that we want to incentivize for people to follow that are going to get us closer to this hypothetical goal. And I will incite you as well to think of this not as a short-term goal. It could be a short-term goal, but it's better to think of it as a long-term goal. Like if you're doing this for a local community, how do you want this community to look 50 years from now, 60 years from now? What is this like bright future that we're aiming for? And as I mentioned before, you can have it be a one-year goal. I know that a lot of the ecosystems plan in more short term because it makes sense for what they're trying to do. But try to aim for this long-term vision. Even if you're an ecosystem, we want to play infinite games, which means we're incentivizing behaviors that are going to be healthy for what we're trying to achieve in the long run. And so the second prompt is define with your team members what is the North Star that you're going to be fulfilling for this either local community or ecosystem. What is the overarching? And here are some questions that will help you in defining this North Star. And here, feel free to use the post-it notes. And remember, one idea per post-it note. That's going to make it a lot easier when you're looking at the different ideas that you have. What is the overarching objective that you hope to achieve by running this round as well? Not only this round, but like in the long term. And how does this round, how can this line around with this overarching goal? Then what is the long-term vision that you have for your community or for your ecosystem and what are going to be your goals for this round? And so for this one we're gonna have about 10 to 12 minutes. I think it might be too much but we'll be going around and if it is too much we'll just cut it short so go ¶¶ Thank you. Vielen Dank. Thank you. Yeah, and we just had a really good question on whether it's like, oh, should we just have one North Star? What I would suggest is each of you write what are some of the things that would be very relevant to have in this round. Write it in a Post-it note, and then you share them, like you paste them in the middle for everyone else to see, and then people can either agree or disagree with the things that you've identified are valuable for there to be, or that could be the North Star that you're going after. Thank you. Let's pray. Amen. Thank you. There we go. And I think it's a flex. so Thank you. © transcript Emily Beynon I love you. Thank you. We have one more minute. So please start gathering your ideas and choose what the North Star is going to be for your round. We're done. And we're up to the next prompt. So now you've already defined what is going to be the North Star for your community or your ecosystem. And the thing that you're going to have in mind as you're designing the rest of the steps of the richer round. Next is we're going to identify who are the audiences that are involved in these richer rounds. Who are, one, the projects or people that you expect to fund through this round, it's going to be either the projects that are applying or if you're not asking them to apply, you already have a way in which you identify who's going to be eligible to participate in this round. Who are they? What do they look like? Are they individuals? Are they individuals? Are they groups? Second, as I mentioned before, most of the retro rounds as we've seen them is people vote on who gets allocated the funds. So who are going to be your voters? Is it going to be everyone that belongs to the community? Is it going to be a selected group of people that are very and have a lot of high context into your ecosystem. And third is you're also going to identify who is benefiting from this overall impact that the people that you're funding or the projects that you're funding are generating. Who are the end users or end receivers of the impact that is being generated? And map out who these audiences are so that you can understand what are their incentives, what are they trying to achieve, whether it is within your ecosystem or within your community, but what moves them forward and what are they trying to achieve so that you can then continue to design something that will ensure that you're aligning their incentives with the incentives that you're trying to achieve through this round. And here there are some questions to guide you through thinking through this. The first one is, oops, this is not, OK. So apparently this slide is wrong. But here it is, as I mentioned before, identifying who's the audience in terms of who are benefiting from the impact that's been generated, who are the people that would be voting in year round, and who are the projects or people that would be receiving funding through this round. And for this, we are going to give eight minutes. Let's start. Vielen Dank. Thank you. Amen. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. And And the time is up for this next prompt. And we're going to move to the next prompt in which we are going to be looking at setting behaviors and defining what these behaviors are. So what we are, as I had mentioned before, in these retro rounds, what we're doing is we're incentivizing very specific behaviors that we want to see, that we have a loose idea that are going to take us to this North Star, that are going to enable us to get to these goals. So for this next prompt, what you're going to be doing is you're going to be thinking about what are the type of behaviors that you want to see, the type of benefits, the type of recognition that you want people to have or you think people will appreciate from participating in this type of actions that are going to get you closer to the North Star. So think also about what are some of the milestones that are in between where you are right now and getting to the North Star so that you can identify what are the type of behaviors that I'm trying to incite for people to do that are going to get me closer. If you're looking at an ecosystem round, maybe this behavior is contributing to the open source code. Maybe this behavior looks like deploying smart contracts and attracting new users into my ecosystem. If we're thinking of a local round, if one of the things that you're incentivizing or you value the most is, say, protecting the forest, maybe the type of behavior that you're looking for is planting trees or protecting trees that are already in a specific place, taking care of the trees. These are protecting trees that are already in a specific place, taking care of the trees. These are the type of behaviors that you might want to incentivize that people have or do as you're going through, and that is going to be rewarded at the end of the round. So we're actually going to give, I know it says 12 minutes, but we're only going to give eight minutes for this part as well. And you can start now. And try to be as specific as possible when you're thinking of the behaviors that people are going to have. Because this part is going to help us to identify what is the data and what are the metrics that we can use to measure the type of behaviors that people have already completed and that we could reward in the round. So again, sort of like if we are looking to protect the, like if it's a local community and what they want is to preserve a particular area, then maybe we're going to be looking at planting trees or having a more healthy ecosystem. And then how do we define what a healthy ecosystem looks like? Is it the number of trees that are in a specific space? Is it the type of behavior that people are having when going around? So this is what we're going to be looking at. Thank you. Thank you. ¶¶ ¶¶ Thank you. um ¶¶ Thank you. Gracias. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay. Okay, we have one more minute before we go to the last prompt. Okay. We're going to move on to the last prompt. Since both, since the groups are already done with this prompt. And as I mentioned to both of the teams, the way in which you're designing your round is going to inform the evaluation process that that you're able to already extract from existing sources or that you need to create. The best thing there is to create it in a standardized way so that people can cross-compare it amongst different projects. So for the last prompt, we're going to be looking at data and metrics for this evaluation. And there's two actions here that you're going to be looking into. Or questions that you can use to guide your process through. The first one is, now you know, based on the behaviors that you want to see happen from these different audiences, what you're aiming for, and what are the potential actions that the members or people or projects can undertake to get to the North Star, how can you measure these actions? Like over here, we were talking about, oh, we want to have more developers building in this particular ecosystem. And we talked about developing looking like launching their own smart contracts. It could look like contributions into the githubs and into the repos from the different libraries or tools that are relevant to the ecosystem. Then you can also explore, are there already existing data sources for this information? And if there are not existing data sources for it, how can we measure this in a standardized way? And we're going to give three minutes for that. And then we'll start wrapping up. Thank you. Gracias. Thank you. Is it moving forward? We have one more minute left. Okay, so you were talking about . GitHub repo commits, stuff like that. And we were talking about maybe that's spammable, it's not good, and maybe we should look at the quality of the codes. And I was like, how do you even standardize that? Is it all right? It's very subjective. It's the same level we have in OPP. Exactly, no, but it is subjective. I think the good thing, though, is how can we do subjective evaluations to become as most objective as possible? And that can look by, like, for example, if it's very, very subjective, maybe you have five different things within the code that you're reading. And then that helps, like, reduce the subjectivity of it. But I'm reviewing it to a degree. No, you're welcome. Perfect. So we're coming to the end. And I think now what would be really interesting, this was the last prompt. And so as I mentioned, this was a taste of what the design phase looks like when we're looking into mechanism design. The second, there's several other steps that would follow, but right now we only focus on the design space of the problem. And so we're going to just ask real quick the two teams if you can share what your round, whom you were designing this round for, what is the type of incentives that you wanted to generate, what was going to be rewarded and who the audiences are. We have very limited time, so if you can explain it in like one minute or two, that would be great. Hello. So retrofinding the round is for ecosystem. Specifically, it's unique swap ecosystem. And the round we were thinking about is the hook round that they just released. So the goal would be to increase the builder that build on the Uniswap hook ecosystem, increase the numbers of high-quality hooks, or build hook tools, making it easier for developers to work on it, and also like onboard builders on the ecosystem. Some of the audience would include Web3 builders, educators who help and also the benefit, this will benefit like the Web3 developers, Uniswap users, Uniswap token holders. And who will vote on this? We're still debating on this, but we think probably like the Uniswap token holders. But yeah. Yes, so the behavior we wanna see is we want them to build on the Hoke ecosystem, them to deploy the smart contracts, and also we want some educators, content creators to create content for Hoke to spread the information to help onboard people and also yeah and people to participate in voting and stuff like that perfect thank you and so please I know we're missing the last group but I don't think we have time for that anymore as we're well just make it really really really brief. We still have two minutes, but this is the QR code for the group. Please feel free to join as well, and we can continue exploring what these designs look like. We designed retrofunding, retroactivity funding to incentivize more women developers to onboard to Web3. que nos incentiva a crear o tener más workshops para esta audiencia específicamente. Y la audiencia o el financiamiento serán de personas que pueden ser individuales y también comunidades desarrolladoras. Y los comportamientos que estamos buscando son, primero, communities. And the behaviors that we are looking for is first onboarding more women with workshops, focus on them. And also more women contributing to open source projects. In the milestone, we have increased women attendance by 25% in six months. And in Greece, and then have more women meetups during a year. Okay. Thank you. Awesome. Thank you. So please feel free to share the designs in the Telegram group. Thanks a lot. Sejal and I were really happy to host this workshop, and we're excited to see more people approaching mechanism design in a more methodological way so that we can have better outcomes for how we're allocating funding in this space. Thank you.", + "sources_streamethId": "67346f489dbb7a90e1e59ed9", "eventId": "devcon-7", - "slot_start": 1731571200000, - "slot_end": 1731576600000, - "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/1GTU723iYMOTD9COHjYQdSKNFi7gSZc88-BnP7Co9jE4", - "resources_slides": null, + "slot_start": 1731487800000, + "slot_end": 1731489600000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1xu1ruVYDwVrtPaBTfIRAfXMJa5j_5CZosQxtJM57H9c", + "resources_slides": "https://drive.google.com/file/d/1vqfv0UsbmEkDyDq88Di2oNwny9_fCMcK/view", "speakers": [ - "launamu", - "sejal-rekhan" + "kaseth", + "robin-hanson" ] }, "vector": [ 0, 0, + 6, 0, 0, 0, @@ -232593,7 +231966,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -232795,6 +232167,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -232849,7 +232222,6 @@ 0, 0, 6, - 6, 0, 0, 0, @@ -233336,13 +232708,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 6, - 0, - 0, 0, 0, 0, @@ -233370,8 +232735,11 @@ 0, 0, 0, + 2, 0, 0, + 2, + 2, 0, 0, 0, @@ -233435,6 +232803,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -233476,7 +232845,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -233542,7 +232910,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -233603,8 +232970,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -233905,6 +233270,8 @@ 0, 0, 0, + 0, + 0, 2, 0, 0, @@ -233918,49 +233285,30 @@ }, { "session": { - "id": "designing-conditional-markets-and-futarchy", - "sourceId": "EWJNVJ", - "title": "Designing Conditional Markets and Futarchy", - "description": "Conditional markets allow predicting outcomes from potential decisions, enabling what is called futarchy governance, but key design questions remain open. We'll examine specific challenges: aligning founders with investors in protocols, encouraging meaningful participation in decentralized governance, and integrating futarchy modules into existing governance systems.", - "track": "Cryptoeconomics", + "id": "deva-awards-sea", + "sourceId": "KGA9ZA", + "title": "DEVA Awards", + "description": "The DEVA Awards at Devcon, are lighthearted accolades designed to celebrate and acknowledge outstanding contributions within the Ethereum ecosystem. These awards allow the community to express appreciation for projects and individuals who have significantly enhanced the utility and usability of Web3 technologies since the previous Devcon. It's important to note that the DEVA Awards are intended to be fun and not taken too seriously.", + "track": "Entertainment", "type": "Talk", - "expertise": "Intermediate", - "audience": "Community", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "market", - "prediction", - "DAO", - "Futarchy", - "Public good" - ], - "keywords": [ - "Prediction", - "markets" - ], - "duration": 1519, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "5e074344751472dc78fed2d0f7259d0d3f840e54a5bd75116712a1e19a7cdcc9", - "sources_youtubeId": "iEjrdYReNnc", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "67346f489dbb7a90e1e59ed9", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731487800000, - "slot_end": 1731489600000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1xu1ruVYDwVrtPaBTfIRAfXMJa5j_5CZosQxtJM57H9c", - "resources_slides": null, - "speakers": [ - "kaseth", - "robin-hanson" - ] + "slot_start": 1731653700000, + "slot_end": 1731655200000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1kqglc9q5GnXKZpzbgqVtlSzsWyTWh7CppeW0EvBT9mc", + "resources_slides": "" }, "vector": [ 0, 0, - 6, 0, 0, 0, @@ -233968,6 +233316,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -234171,7 +233520,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -234226,7 +233574,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -234742,11 +234089,8 @@ 0, 0, 0, - 2, 0, 0, - 2, - 2, 0, 0, 0, @@ -234810,7 +234154,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -234818,7 +234161,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -235272,16 +234614,18 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, + 0, + 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -235296,7 +234640,7 @@ "session": { "id": "deva-awards-sea", "sourceId": "KGA9ZA", - "title": "DEVA Awards SEA", + "title": "DEVA Awards", "description": "The DEVA Awards at Devcon, are lighthearted accolades designed to celebrate and acknowledge outstanding contributions within the Ethereum ecosystem. These awards allow the community to express appreciation for projects and individuals who have significantly enhanced the utility and usability of Web3 technologies since the previous Devcon. It's important to note that the DEVA Awards are intended to be fun and not taken too seriously.", "track": "Entertainment", "type": "Talk", @@ -235304,24 +234648,15 @@ "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], "keywords": [], - "duration": 1134, + "tags": [], "language": "en", - "sources_swarmHash": "f751be56f3a505e454185e95b39fe31afc7fce540bff6b5961f21170ad16c8ae", - "sources_youtubeId": "gD1som6fjNY", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673818451b0f83434d226075", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673816891b0f83434ddb4f43.vtt", - "transcript_text": " Awesome. Hi, everybody. I'm Hart, and today I'm going to be talking about how to open source code. I think this is going to be a little more pragmatic than some of the previous talks, but I'm going to start by talking about some open source background. Why do we open source? And a little bit about the Linux Foundation with a focus on why you should care about my opinion on how to open source code. Then I'm going to go into some open source table stakes, some stuff that you really need to get right if you want to properly open source code. This will include software licensing, IP protections, and best security practices. And finally, I'm going to talk about open governance and open communities. So let's get started. So let's recall, why do we open source code? There are two main reasons. The first is so others can verify, examine, and learn from our code, right? If we want people to be able to trust what we're doing, we have to show them the code. And open sourcing also has many benefits around things like software security. But probably the main reason why we open source code is so that others can use and even contribute to our code. There are many economic efficiencies of working together in open source, and a large ecosystem around a piece of open source code ensures that even if one organization goes away, development will continue. So open source software has won in the broader ecosystem. If you're a developer, you probably know this, but for many commercial applications, even a typical commercial application, even if it's closed sourced, 90% of a modern application's code base is open source. And if we look at how software is built today in the world, and even commercial software, sourced, 90% of a modern application's code base is open source. And if we look at how software is built today in the world, and even commercial software, this is sort of what it looks like, right? People start with an open source software framework, and then they use custom code and existing libraries to solve problems. And this can lead to tremendous economic efficiencies. I know this is a cypherpunk track, but if you're trying to convince your boss that you should be able to open source your code, the economic efficiencies are a great argument. But open source is most efficient when multiple companies, entities, or people collaborate to build software that they all need in the open. And you can think of this as decentralized development. And this is the problem that the Linux Foundation solves. We solve decentralized development for open source code. And when multiple companies, entities, or individuals want to collaborate on open source software but don't trust one single party to own the code, they turn to the Linux Foundation. And this is something that blockchain people tend to be inherently comfortable with, which is fantastic. So I don't want this to be an infomercial for the Linux Foundation, but I do want to present some facts and figures that can hopefully convince you that we have the data and the experience to properly give advice on open source code. So we are behind some of the most critical projects in the world. Probably everybody knows the Linux kernel. Maybe you use Kubernetes if you do cloud development. But you may not know that a large part of the world's telecom stack runs on Linux Foundation open source code. Or that if you have a new car, it probably runs on Linux. Maybe using something like automotive-grade Linux. There's fun projects too, like the Academy Software Foundation, which hosts software where Hollywood uses to make animations, and also stuff like RISC-V. Just some numbers. I'm not going to read these to you, but hopefully I can convince you that we've seen it all when it comes to open source. And why am I here? Well we have a number of projects in the Ethereum space as well. Many people here may know Basu which is one of the core Ethereum execution clients. We also have a number of other tools for Ethereum in our open source ecosystem like Web3J which you might have used to develop on Ethereum or Pal, which is a brand new lab for privacy for EVMs. And again, all of these code bases are completely free and open to use. So you can go do it today if you want. All right, so let's get into the meat of the talk. Let's talk about open source software table stakes. What do you need to do to make sure you have a good experience open sourcing your code? And let's look at a definition of open source software. It's a type of software in which source code is released under a license. And the key word here is license. And obviously, a license is, in this case, a legal document that expresses rights and responsibilities around the code. So there are three typical types of open source license we see today. We have business licenses, which are typically very restrictive software licenses that may require payments to a developing company for use. We have copy left licenses, like a GPL license, where modifying the software requires you to contribute back any derivative works or things you build on top of the software. And finally, we have permissive licenses, which really let you use and modify the software in any way that you like. And I'm going to go into a little more detail about all of these licenses. So let's start with BSL licenses. So technically, a business source license is not an open source license, it's a source available license. And the source code is public, but you're only allowed to use the source code if you've met certain characteristics. And this is sort of viewed as a compromise between proprietary software and open source software. If you want to gain some of the benefits of people seeing and trusting your code, but you want people to pay for it, this is what people typically do. BUSL is an example of this. And in the Ethereum space, Arbitrum Nitro has this license. Now, let's talk about copy left licenses. These are open source licenses that require users to make available all derivative works. So if you modify the code and use it in something, you typically have to make the source of that something available for free and also licensed under this copyleft license. Copyleft licenses are often very hard to use commercially. Notable examples are MPL, GPL, and LGPL. And in the Ethereum community, Geth is a notable example. So I'll put this quote from Richard Stallman about GNU and copyleft licenses and I will issue a warning about copyleft licenses. The goal of copyleft licenses is to increase contributions by legally requiring them and the unfortunate reality of these licenses is that people just won't use your code if there are any viable alternatives due to the potentially cumbersome legal requirements. And I'll say it's very hard to relicense to a permissive license from a copyleft license, so choose carefully. And finally, there are permissive licenses. These are open source licenses that let you modify and use code freely. Most commercially used open source licenses that let you modify and use code freely. Most commercially used open source code at this point in time does use permissive licenses because they're by far the easiest to use. And if you want to maximize community adoption of your code, we definitely recommend you use a permissive license. You've probably seen Apache 2 and MIT as examples of this. There are lots of projects, but in the Ethereum space, Basu has an Apache license. So before we go any further, I just want to mention some common licensing pitfalls. Most of these I've talked to at this conference and seen people make these mistakes. So what I want to talk about is being very careful with code you use, right? And this is kind of an LGPL dependency issue. So when you use a combined work with LGPL code, which means you're putting LGPL code into your something bigger and using that, people sometimes think that you can link it if it's in a separate folder. But as this provision shows, you can't really have compile time dependencies for LGPL code. It has to be a runtime dependency. So carefully check your dependencies. And finally, you know, be careful with licenses that don't have IP protections. This is an example from an anonymized company documentation that basically says, we're going to use a permissive license for code, but we're going to enforce patents on this code, so you still need to pay us to use this code. Read carefully. This company is actually very transparent, and if you use code from less ethical and transparent people, you might be rolling the dice on a lawsuit. So in summary, I'll say pay very close attention to the licenses and licensing requirements of open source projects you use. I personally recommend that you use Apache 2 for your own code because it's a permissive license and it has explicit patent grant protections. And if you want to make a proprietary code vase visible to others, maybe use a BSL license. Right. Let's move on. We've been talking about IP protections. So what do you do when you need to protect your code against IP issues? Sometimes contributors might unintentionally or maliciously add some code that has some form of IP protection to your project. And it turns out we at Linux have had a big issue with this. Does anybody know of the SEO Linux disputes? Yeah, some people. So these were a huge number of lawsuits, essentially because the kernel did not have explicit IP protection on it. And to do this, folks at the LF created the developer certificate of origin. And we recommend that for your code you either use a developer certificate of origin or a contributor license agreement. It's very important to have these legal frameworks around your code and this is especially important if you're taking contributions from outsiders. So just as an example, this is the DCO right here. That's it. It's very easy to set up on GitHub using commits signed with the dash S flag. It takes a little bit getting used to, but once your contributors are used to it, it's very, very easy. Right. So in summary, be sure to have legal protections for your code. If you don't, people that can be sued will be very hesitant to use your code. We actually require this kind of protection for all Linux Foundation projects. If you contribute code, you have to have this protection. And you should have it, too, for your open source software. We use the DCO, but again, a CLA is perfectly acceptable. So what about security? Obviously, security is critical for any software, not just open source software. And we could do a whole conference about open source security, and there was one last month, actually. But I want to briefly emphasize some things that might be different for open source. And those are vulnerability disclosures, SBOMs, or software bills of materials, and authenticating software. So I will say we have a great organization in the Linux Foundation called the OpenSSF, which has all of your open source software best practices. Go check it out if you have any questions on security. It's a great source just to make sure you're doing everything you need to be doing. Right. So what about security vulnerability disclosures and pipelines? An obvious advantage of open source software is that community members can find and report bugs. And the key point is you want to make this as easy as possible for them, right? You don't want contributor friction to be an issue. It doesn't really matter the exact method in which you set things up, as long as it's easy to follow. I'll say that in the LF, many of our projects have been using the GitHub tooling, not all of them. BASU, for instance, has seven reporting channels, which can be a challenge for us to handle, but we do. Now, moving on. Recall that I said modern software was built like this, right? This is very nice and efficient, but it introduces a number of problems, right? If you build on software with security bugs, you might have a real problem. And it turns out that a lot of the big attacks that have been publicized and caused a lot of problems recently have been due to these so-called software supply chain attacks. And the solution to stopping these attacks is generally what's called a software build of materials, or SBOM. Has everybody here heard of SBOMs before? Some people? Great. OK, if you haven't, you should definitely check this out. What SBOMs let you do is they let you keep track of all the code you pulled into your open source software project. If there are bugs, you can quickly find them and update them, right? And I also encourage you to carefully examine the code you use to make sure it's well maintained. You know, don't pull in code from some random developer who hasn't updated it four years ago. And I will, again, well-run open source projects are very careful with the dependencies they use. And there are a lot of good tools to help with this. Guac is excellent, but there are many others as well. Right. So I'll briefly touch on software and artifact authentication. If you build in a popular open source project, people will attempt to impersonate you. This constantly happens for us at the LF. You know, just last month we had people create a bunch of fake NPM packages to try to impersonate you. This constantly happens for us at the LF. Just last month, we had people create a bunch of fake NPM packages to try to impersonate us. But there are plenty of tools that you can use to sign and authenticate code. Six Store, I think, is an excellent example of this, although I do wish and think they should be run on the blockchain. All right, so we've covered some basic stuff. What about building an open community? So if we go back to our definition of open source software, one key thing here is this phrase collaborative public manner. And this is important because the freedoms provided by particularly a permissive license allow large and diverse communities to form around popular software, right? This allows for those economic efficiencies of open source. So when we talk about open development and open collaboration, we don't just mean open source, right? We also mean open development, which means there's a community actively building open source code in the open, and open governance, which means that procedures and roles for the community, how decisions and priorities are made for the project, and the roadmap are openly defined and managed. So I'm going to go through a few open source models of governance here. And this is a continuum. It's not sort of, you know, it's only these four things, just to give you a flavor of how you should set up your open source project. So the sort of least open form is called what I like to call a public demonstration. And sort of the code is just open source. It's just there. It's not really updated. There's not necessarily a roadmap. You could be less polite and call this a code dump. Then we sort of move on to open company products. And this is when a company or entity open sources a piece of software that constitutes a product, right? It doesn't allow outside contributions or makes them very difficult, but it keeps a roadmap, you know, has regular releases and follows best security practices. And as we sort of get more open, we have benevolent dictator projects. And this is when an organization or individual open sources their code and allows contributions from external contributors but ultimate authority of the project is still that one benevolent organization and Finally we have true open governance and this is code with you know transparent documentation decentralized meritocratic governance hosted under a neutral entity and Anyone can join their, clear processes for leadership, and no restrictions on who can join and become a leader. Right? So we can go a little bit more into details on these sort of code models, right? I certainly believe that opening code in a kind of public demonstration is better than not opening code at all, right? Third parties can audit your code, see what you're doing. People will trust you. The biggest pushback we get from this, particularly from big companies, is that there's reputational risk. And my response is usually that if you're trying to build a production system, you shouldn't be embarrassed to let others see your code. That's a problem if that's the case. Once we sort of move on to a more open product, it can be a little bit easier than a public demonstration, but it's still hard to get others to use your code, because the model requires a very high level of trust in the company. What happens if the company changes direction and abandons the software or goes out of business? That might be a problem for you and your business if you're building on this code. And then sort of a benevolent dictator model, right? This is increasing in openness but it still means that competitors in your space or those who don't trust your company or entity are unlikely to participate. So it's easier to get contributions and have you have people participate than just an open product, because people can make typically small changes. They can fix bugs. They can work on their pain points. But ultimately, it's not open governance. And then finally, we have fully open governed projects. This strongly incentivizes companies or other entities or people to contribute because they can play a role in governance proportional to their contributions. A diverse set of contributors means that potential users can easily be convinced of the long-term stability and viability of the project. And this will increase overall use of the project. And the main drawback of this is it's the community project now. It's not just your project. And people that participate should receive governance powers proportional to their contributions. Right. So about governance, I will encourage you to carefully choose projects to use or contribute to based on which style of governance is utilized by the community. And whatever governance you do choose, make sure you document it clearly. If your main goal is growing a community, we strongly recommend openly governed projects, as we've talked about for the last two minutes. Right? So finally, I want to talk about some best practices for growing a community. So one best practice is open source bureaucracy. And this is what I've said before, although I haven't really called it this, is that the most successful projects that have stood the test of time have neutral open governance models where those who do the work make the decisions. Right. Again, not very cypherpunk, but the longest lasting, most successful open source products also have a good commercial support ecosystem, right? And a diverse commercial support ecosystem. Because the people that make money off the code can put it back into contributing to the code. It's difficult to attract investment from potential competitors if one entity owns or controls the code base. So we recommend a neutral home or setting up some kind of owns or controls the code base, so we recommend a neutral home or setting up some kind of neutrality around the code base. And finally, it's very important to have transparent documented governance. So people looking to contribute are going to want to know exactly how they can participate, and they don't want to be surprised. So right, this seems hard. What should I do? Well, take a look at the Linux Foundation or another open source software foundation, something like the Apache Software Foundation, as a home. So the LF's role, again, is to serve all of the big companies in technology. You know, it's hard to put an open source community together. We have experience there. And with us and all these other software foundations, you know, the same legal structure that for us at least protects the kernel and Kubernetes can also protect your project. And finally, you know, I do want to emphasize and mention that contributing code or a project is completely free and open to anyone. So any Linux Foundation project, you can come join, you can use the code, you can contribute the code. It's all free and completely open. So yeah, come find us at Linux Foundation Decentralized Trust if you have any questions. And thank you very much for your time. All right. Thank you once again, Hart, for that. We do have a few questions here. Can you explain the differences between MIT and Apache 2.0 licenses? Great question. This is a hard question. And the true answer may not even be legally known, because we have not had court cases that have separated these licenses. Again, not a lawyer. This is not legal advice. The main difference is the Apache 2.0 license has explicit patent protections. So this has not happened yet, but there might be a case where there's a successful patent lawsuit against an MIT code base, but not against an Apache 2 code base. For now, that hasn't existed. You know, for now, they're mostly identical. They're compatible licenses. You can use them interchangeably. But, you know, if you're extra paranoid, you might want to use Apache 2. Right. What are the most effective revenue strategies to ensure the financial sustainability of an open source project and how can you build enough community interest to reach that sustainability? This is also a great question. We could spend an entire talk on this. It really depends on your business model. There are a number of successful open source business models. I'm sure many people in the room have them. But the big thing is that people should be successfully using the open source software in commercial businesses. Whether it's selling services on the open source model. That's a very popular one, right? You know, selling premium products on top of the open source model, you know, or software. Those are sort of two of the main models. There are many more. But again, the key is building a commercial ecosystem around this. If I knew the answer to that in every kind of application, you know, I would be a billionaire software executive. I am not. So, you know, I'll say it does depend on the project. Maybe in another life. Okay, on to the next question. Fascinating talk, but how do we get the average people to actually care about open source software? That's a great question. We're always at the Linux Foundation, you know, looking to do that more. I think it's a much easier conversation to have with developers. I don't know, I don't know I have a necessarily good answer to that question. I think it's a difficult problem. People have to understand software and software development first. So it's a hard question. The next one says, you talk a lot about governance. Can on-chain governance help with this? Potentially in the long run, on-chain governance can help. Right now, lawyers are very uncomfortable and the precedent really hasn't been set. I'm optimistic in the long run that we can have at least some governance on-chain. Yes. All right. How many of these best practices do you wish were not needed in a world with better legal frameworks and or less frivolous lawfare? Well, I certainly wish we had a less crazy software patent system where we didn't have to, you know, constantly engage in, you know, mutually assured destruction patent practices and things like that. So, you know, I would love to see that cleaned up. You know, some of the licensing is that, you know, companies, you that companies do need to make money off of open source software, but I certainly think the patent system is a mess. How do I... Oh, sure, yeah. How do you envision open source software evolving in a world that's increasingly dependent on decentralized and privacy-focused solutions? Well, decentralized solutions inherently mean more people have to come together to solve problems, right? And that's going to mean a bigger role in open source because, you know, the only way people can build software together is effectively is really in a neutral home, right? We're going to have to have, you know, decentralized neutral homes for software. So I think decentralized technology is going to really accelerate the development of open source and the use. We're already seeing a lot of this in the banking industry. All right. Are there any resources to help transition from a benevolent dictatorship to an open governance model? Absolutely. We have a ton of this at the Linux Foundation. Go look at our website, come talk to us. This is a very common question we get asked quite a lot, and we have a lot of experience doing this. All right. On to the last question. Does the LF look at public good funding experiments in the Ethereum ecosystem? We periodically look at funding. We're a non-profit, so we don't have a ton of money to give away. We obviously don't have a token. But certainly our maintainers and projects look at funding opportunities for themselves. All right. Thank you so much for that. And thank you to every single one of you. Can we please give a hearty round of applause? Thank you.", + "speakers": [], "eventId": "devcon-7", "slot_start": 1731653700000, - "slot_end": 1731654900000, + "slot_end": 1731655200000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1kqglc9q5GnXKZpzbgqVtlSzsWyTWh7CppeW0EvBT9mc", - "resources_slides": null, - "speakers": [] + "resources_presentation": "https://docs.google.com/presentation/d/1kqglc9q5GnXKZpzbgqVtlSzsWyTWh7CppeW0EvBT9mc" }, "vector": [ 0, @@ -236634,11 +235969,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -236660,11 +235990,11 @@ }, { "session": { - "id": "deva-awards-sea", - "sourceId": "KGA9ZA", - "title": "DEVA Awards", - "description": "The DEVA Awards at Devcon, are lighthearted accolades designed to celebrate and acknowledge outstanding contributions within the Ethereum ecosystem. These awards allow the community to express appreciation for projects and individuals who have significantly enhanced the utility and usability of Web3 technologies since the previous Devcon. It's important to note that the DEVA Awards are intended to be fun and not taken too seriously.", - "track": "Entertainment", + "id": "devcon-sea-overview", + "sourceId": "HXNYDR", + "title": "Devcon SEA Overview", + "description": "Don’t miss the Devcon Opening Ceremony, where we’ll set the stage for an incredible event ahead, with talks from Vitalik Buterin (Founder of Ethereum), Aya Miyaguchi (Executive Director of the Ethereum Foundation), Josh Stark (Ethereum Foundation Leadership), Skylar Weaver (Devcon Team Lead), and more surprise guests.", + "track": "Real World Ethereum", "type": "Talk", "expertise": "", "audience": "Engineering", @@ -236673,12 +236003,18 @@ "keywords": [], "tags": [], "language": "en", - "speakers": [], + "speakers": [ + "skylar-weaver", + "nathan-sexer" + ], "eventId": "devcon-7", - "slot_start": 1731653700000, - "slot_end": 1731655200000, + "slot_start": 1731385800000, + "slot_end": 1731388500000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1kqglc9q5GnXKZpzbgqVtlSzsWyTWh7CppeW0EvBT9mc" + "sources_youtubeId": "c8suX-_PTo8", + "sources_swarmHash": "6579559384c3752ff4c849eff1d48dbe1dfc815f78de54dc6debbd2a36b5e991", + "resources_presentation": "https://docs.google.com/presentation/d/1Qo0Nhlnmak6ecCzF_nhTStunc63frayP5RYA5bLD3TQ", + "resources_slides": "" }, "vector": [ 0, @@ -236687,9 +236023,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -236864,6 +236197,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -236951,6 +236285,55 @@ 0, 0, 0, + 6, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -237944,58 +237327,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -238017,30 +237348,49 @@ }, { "session": { - "id": "devcon-sea-overview", - "sourceId": "HXNYDR", - "title": "Devcon SEA Overview", - "description": "Don’t miss the Devcon Opening Ceremony, where we’ll set the stage for an incredible event ahead, with talks from Vitalik Buterin (Founder of Ethereum), Aya Miyaguchi (Executive Director of the Ethereum Foundation), Josh Stark (Ethereum Foundation Leadership), Skylar Weaver (Devcon Team Lead), and more surprise guests.", - "track": "Real World Ethereum", + "id": "developing-and-using-a-modular-folding-schemes-library", + "sourceId": "PPFPQY", + "title": "Developing and using a modular folding schemes library", + "description": "We will present Sonobe, a modular folding-schemes library. It currently features implementations of Nova, CycleFold, Hypernova and ProtoGalaxy schemes and is compatible with a wide range of R1CS arithmetization libraries. we will briefly discuss what folding schemes are and how they fit into IVC-style proof systems. Next, we will explain how Sonobe was built and what features it supports. Finally, we will cover what has been built with Sonobe and how developers can start using it today.", + "track": "Applied Cryptography", "type": "Talk", - "expertise": "", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [ - "skylar-weaver", - "nathan-sexer" + "tags": [ + "Libraries", + "Zero-Knowledge", + "Cryptography", + "nova", + "Cryptography", + "Libraries", + "Zero-Knowledge" + ], + "keywords": [ + "Folding schemes", + "IVC", + "Nova" ], + "duration": 1574, + "language": "en", + "sources_swarmHash": "2e331d376e1fc71c8dba0d0ecc80ce2ae55d376fbce4fa2edba77aa3c2fca3ae", + "sources_youtubeId": "biK_NKwdBk4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673437c69dbb7a90e12bca3e", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731385800000, - "slot_end": 1731388500000, - "slot_roomId": "main-stage", - "sources_youtubeId": "c8suX-_PTo8", - "sources_swarmHash": "6579559384c3752ff4c849eff1d48dbe1dfc815f78de54dc6debbd2a36b5e991", - "resources_presentation": "https://docs.google.com/presentation/d/1Qo0Nhlnmak6ecCzF_nhTStunc63frayP5RYA5bLD3TQ" + "slot_start": 1731472200000, + "slot_end": 1731474000000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1IOfjp_pKz83JTceKqk5Rve7U1YRQJSc4MA5OPmnj6oE", + "resources_slides": "https://drive.google.com/file/d/14WvRoVTFSbrWP5sjs0vDsuhAPKoroIv4/view", + "speakers": [ + "arnaucube", + "pierre-daix-moreux" + ] }, "vector": [ 0, @@ -238049,11 +237399,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -238092,6 +237442,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -238227,7 +237578,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -238802,6 +238152,9 @@ 0, 0, 0, + 6, + 6, + 6, 0, 0, 0, @@ -239059,16 +238412,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -239361,6 +238705,7 @@ 2, 0, 0, + 0, 2, 0, 0, @@ -239379,48 +238724,37 @@ }, { "session": { - "id": "developing-and-using-a-modular-folding-schemes-library", - "sourceId": "PPFPQY", - "title": "Developing and using a modular folding schemes library", - "description": "We will present Sonobe, a modular folding-schemes library. It currently features implementations of Nova, CycleFold, Hypernova and ProtoGalaxy schemes and is compatible with a wide range of R1CS arithmetization libraries. we will briefly discuss what folding schemes are and how they fit into IVC-style proof systems. Next, we will explain how Sonobe was built and what features it supports. Finally, we will cover what has been built with Sonobe and how developers can start using it today.", + "id": "digital-pheromones-mpc-for-human-connection-and-coordination", + "sourceId": "LMCG3V", + "title": "Digital pheromones: MPC for human connection & coordination", + "description": "Recent MPC research from Cursive and PSE enables a new concept called \"digital pheromones\": the ability to produce lightweight, privacy-preserving signals that people can use to coordinate safely and efficiently.\r\n\r\nThe primary result we will cover is Trinity, a new 2PC scheme with nearly ideal UX/DevX, built on the trio of PLONK, Garbled Circuits, and KZG Witness Encryption. We will do a live demo with attendees and explore what a future filled with digital pheromones will enable!", "track": "Applied Cryptography", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Libraries", - "Zero-Knowledge", - "Cryptography", - "nova", - "Cryptography", - "Libraries", - "Zero-Knowledge" - ], - "keywords": [ - "Folding schemes", - "IVC", - "Nova" + "MPC", + "Privacy", + "Use cases of cryptography" ], - "duration": 1574, + "keywords": [], + "duration": 1517, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "1907282ffb309a0d8b977413439d8bf99c1a3372f0b59ab6607b011a0491ebce", + "sources_youtubeId": "TY2ZWmR_UqM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673437c69dbb7a90e12bca3e", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731472200000, - "slot_end": 1731474000000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1IOfjp_pKz83JTceKqk5Rve7U1YRQJSc4MA5OPmnj6oE", - "resources_slides": null, + "slot_start": 1731486000000, + "slot_end": 1731487800000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1VlzRulp0j62UZdPbUEc2y_6-IxSsimLBL_t3kn0xprA", + "resources_slides": "https://drive.google.com/file/d/1mxbzO7KuAwRpyJnV6QwNnfPLzsODzRRo/view", "speakers": [ - "arnaucube", - "pierre-daix-moreux" + "vivek-bhupatiraju" ] }, "vector": [ @@ -239473,10 +238807,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -239694,11 +239024,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -240186,9 +239516,6 @@ 0, 0, 0, - 6, - 6, - 6, 0, 0, 0, @@ -240207,6 +239534,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -240267,6 +239595,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -240293,6 +239622,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -240447,7 +239777,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -240742,8 +240071,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -240760,47 +240089,50 @@ }, { "session": { - "id": "digital-pheromones-mpc-for-human-connection-and-coordination", - "sourceId": "LMCG3V", - "title": "Digital pheromones: MPC for human connection & coordination", - "description": "Recent MPC research from Cursive and PSE enables a new concept called \"digital pheromones\": the ability to produce lightweight, privacy-preserving signals that people can use to coordinate safely and efficiently.\r\n\r\nThe primary result we will cover is Trinity, a new 2PC scheme with nearly ideal UX/DevX, built on the trio of PLONK, Garbled Circuits, and KZG Witness Encryption. We will do a live demo with attendees and explore what a future filled with digital pheromones will enable!", - "track": "Applied Cryptography", - "type": "Talk", + "id": "discovery-the-tool-at-the-core-of-l2beat", + "sourceId": "G9ESC7", + "title": "Discovery - the tool at the core of L2BEAT", + "description": "Hands on workshop about how to use an L2BEAT tool called discovery for mapping, researching and monitoring all the contracts involved in a project. We'll start by introducing the problem that discovery tries to solve and after that we'll get into trying to understand the architecture of a real world project by using the avenues this tool gives us. After this session the participant should feel empowered to use discovery to deepen his knowledge about all on-chain deployments.", + "track": "Developer Experience", + "type": "Workshop", "expertise": "Intermediate", - "audience": "Research", + "audience": "Developper", "featured": false, "doNotRecord": false, "tags": [ - "MPC", - "Privacy", - "Use cases of cryptography" + "Architecture", + "Tooling", + "DevEx", + "Event monitoring", + "research", + "DevEx", + "Event monitoring", + "Tooling" ], - "keywords": [], - "duration": 1517, + "keywords": [ + "Holistic monitoring", + "Architecture research" + ], + "duration": 4792, "language": "en", - "sources_swarmHash": "1907282ffb309a0d8b977413439d8bf99c1a3372f0b59ab6607b011a0491ebce", - "sources_youtubeId": "TY2ZWmR_UqM", + "sources_swarmHash": "2446734d4de4327fb1de791a429823052d3a3669c0f5d37607ec93e2160ffec5", + "sources_youtubeId": "azowA66W5UY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6738656320d1f9ac48c07177", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6738656320d1f9ac48c07177.vtt", + "transcript_text": " Hi, hello. So I'm Mateusz, or you can call me Matt, like the short for Matthew. I'm from L2Beat. I'm half of the tooling team in the L2Beat. And my workshop is on the discovery. It's the tool we use, we build, to help us research projects, and it turned out that it is really useful at solving all of our problems. I mean, not all, but like majority of problems I'm going to later talk about. So, a funny story is that I almost lost the demo part of this presentation. And the only surviving copy of it was like some random Vim buffer. So I just wanted to point it out. Okay. So there is a lot of projects on L2Beat. You know, the amount of projects is growing. And we internally expect there to be at least 1,000 L2s. So currently you can see that there is 51 rollups and 66 validium and optimiums. So around let's call it 110 projects of only L2s. We also track bridges. Not to the same standard, but for all of those old tools, we have a minimum bar of data we want to show, and that would be the risk rosette. We want the risk rosette to be obviously correct, and the standard for this data is actually quite high. We want to show data that is correct correct so you don't lose your funds, you don't make a decision based on our data that is incorrect. So how to even maintain all of this, right? So just like a quick math lesson, right? So imagine that we have 110 projects, and every project has a single update every two months, right? So it's actually quite sparse. We see projects that are updating way more often. And even if every project had a single update every two months, we would have an 84% chance of seeing at least a single update every two months, we would have an 84% chance of seeing at least a single update in a day. So it's basically guaranteed that every single day something new will change on chain. We need to be able to detect it, see it, see what's changed and act upon it. And also, like, even if we assume that an update happens every six months, the chance is still 45%, right? So it's like a coin toss, basically. And these chances are basically always working against us. The more and more projects are going to be added, the higher those chances are basically to be 100%. So what is our needs, right? So like I said, there is a high chance of an update happening. And we need to be able to fastly see that it happened. And we need to be fast in reacting to it. If, for example, our optimism, right, any of the top docs has an update and we take like three weeks to process that update or to even see that an update happened, then we are showing incorrect data for three weeks, which is actually a quite long amount of time when it comes to Web 3.0. When an update happens, we need to know what changed. So it's not enough to see that something has changed. We also need to know what has changed, right? So did any of the risks, assumption that we had before, are now different? So also, we need to be able to look inside the project and compare it to the previous version. And as always, we want to automate as much as possible. You cannot get to 110 projects listed on a website and do all of them manually. It is possible with human effort, but we don't have the amount of resources to do it. So right now we have four researchers and we maintain all of the 110 projects with only four people and we also manage to do stuff that's additional. So discovery essentially, I think, is at the core of solving all of those problems. Of course, discovery is not a panacea. It does not solve all of the problems by itself. But of the problems that I listed, discovery is always at the core of the solution. So the mental model of discovery is like this. I like to think about discovery this way. And also I like understanding things from the low level. So let's try to understand the inputs and outputs of discovery. The input, of course, is the Ethereum state. We want to see what is the state of each project on a given, let's say, block number. So the input to discovery is that Ethereum state. Of course, we are not sending, like, a snapshot.zip to discovery. We are doing something like... I mean, we use RPC providers, which is essentially having the entire Ethereum state at your fingertips. You can do any call you would like. And to facilitate the querying the data from the state, we have a config JSON, which instructs the discovery program, which data we are interested in and how to process it. And after passing those two inputs to discovery, an output is generated, which is a discovery JSON file. Of course, it's all a simplification. The more, like, the actual mental model, which I don't even think is correct, and there's missing some parts. It's more something like this. So that's what I mean when I say discovery is at the core, right? All roads lead back to discovery. I'll try to, during this workshop, I'll try to touch upon all of the things that are listed here. So you'll be able to understand at least how this flowchart happened. So since I myself am a visual person, I want to create, I want to show you a demo of something nice to look at. So this is something we have been working on, which will enable us to move from command line to a graphical user interface. So right now we are calling it Discovery UI. And let's see, like like all of the projects that are listed here are projects that we are tracking with Discovery. So I have picked Zora for the project we are going to be using during this workshop. So let's see what kind of data we can expect to see in this tool for Zora. So of course there is like a lot of things to take in. But let's start from the left and keep going right. There are on the left, the thing is that they look like files, but they are actually contracts. They are kind of inspired by the file view in Visual Studio. And you can see, maybe, I don't know, the contrast is not the best, but I hope you'll see that. We have two contracts, which are the initial contracts. And by initial, I mean the contracts from which we are going to be starting and all of the contracts that are on the left have been found based on those two initial contracts. So it is, as you can see, like it is quite useful to be able to find two contracts used in a project and then basically find all other contracts used by that project and also this updates automatically so if anything new happens, new is added or is removed, they are automatically updated. Each of those contracts has some values, right? So each contract has an address, a name, a description given by a researcher. And let's focus on the fields for now. So fields are state variables variables that we have found in the ABI which are either public variables or functions which don't take any arguments and just return something so we are trying to build the state of the contract through these. There is also one more part which will be important later during the demo. I mean during the workshop part. Which is that we try to build arrays from functions which take a single argument which is of type integer. So we just assume that this function is like get something by index and we try to get all of those things. And you can see that there are actually addresses inside of those values. And those addresses lead to other addresses. And this is how discovery works. It gets all of those state variables, and if it finds an address, it assumes that this address is also connected and just keeps discovering on those addresses. And the third view here is like a graph view. So all of those projects here are used in Zora. And the way that it works is, for example, let's focus on the security council. Maybe that's not the best. But the system config, right? So you can see that there are some state variables and they point to other contracts. For this view, it is really easy to understand how the project is built, what contracts reference each other, and how the data flow inside the project happens. Of course, this is not the default view. Like the view allows you to select them, move them around, you can color these, whatever your heart desires to make it easier for you to understand. We have two layout algorithms since it's a graph view. You can use the D3. We called it slow because it is not that fast. It uses force simulation to lay out the graph. We also have more like a hierarchical view which just lays the graph from left to right. And the third thing is the code. So all contracts have source code. And let's go back to the L1 standard bridge example. And we want to show the code to the researchers because to understand what a contract actually does, you need to look at the source code. And you might be weirded out by the fact that there are only two files. I'm going to touch upon this a little bit later because it is actually quite important why there's only two files and not like more. So you can just view the code in here. The part that I want to also show is that we see that it is important to be able to switch between views, right? So I can click L2 output Oracle on the left in the list view and it is selected in the values and the nodes panel. Vice versa, I can select something in the values and it is selected in the list and the nodes view and I can select something in the nodes view, and it is selected on every single audit review. So this is something which is really graphical and nice to show. And it basically is only the look inside the Discover JSON. This is just a nice way to visualize what's inside the Discover JSON. But it doesn't touch upon the way of how we even got that Discover JSON. So it is something we are working towards. It's not yet ready. You can only view things. It's basically read only at this time. But in the future, we hope to make Discovery this, so you'll be able to do your research in a nice graphical environment and do anything you need. So yeah, let's get back to, yeah, go ahead. So you were showing the source code, but the source code is not on-chain, right? Yeah, so the question is, how do I get the source code since it's not on chain? So I kind of skipped one assumption. It's that by the Ethereum state, we also kind of consider the EtherScan source code database. Even if you go to L2Beat and look into the products that we list, if a product does not verify their source code on Etherscan, we give them a big red warning. We expect you to verify your source code to show transparency to the users and to the researchers so they can do their stuff. It is something that I omitted, but we do use Etherscan or Etherscan derivatives like BlockScout or stuff like that to get the source code, and we heavily rely on the source code because if we didn't get the source code, we wouldn't be able to call anything because we don't know what the ADBI is, right? So, yeah. If you have any questions, do please shut them up during the workshop. But I'll get back to the presentation. Yeah. So if you run into something where you can't get access to the source, you don't try any kind of decompilation? We don't try that. If you don't try decompile? No. I mean, there was one case where we knew what the source was, it just wasn't verified, so we like forcefully, like we verified the source code for the project because they didn't want to do it for some reason, so we just did it. And we are not trying to decompile the bytecode in any way. I mean, if it's something that we can't get the project to verify and we need to look inside, we might decompile it, but Discovery does not try to do anything like that. It just assumes the happy case where the source is on ETHESCAN or whatever explorer the chain is using and just goes from there. But yeah, if we hit snag, like there is no source, we just either accept it as we cannot look inside, or we talk with the team to verify the source code. Okay, if there are no more questions, I'm going to keep moving forward. So, going back to the mental model, because I think it is really important, like, if you leave this place with a single thing, it's just that discovery is just a program. It has some input, it has some output, right? So the output is this discover.json, and the input is the config.json.c. And the way you can think about discovery, if it makes thinking about it easier for you, is that it's like a scraper for a website. So scraping a website looks like you put some address of a website, it downloads the content of that website, it tries to find any links, and it follows those links recursively. Discovery essentially does the same thing, but it doesn't download websites, it downloads the state of contracts and doesn't follow links but follows addresses to other contracts. And of course, discovery is not just like a simple thing that is like a black box. There are things happening inside that we are going to be talking about a little bit. So discovery is able to detect whether a contract is a proxy. It does the source code flattening. I'm going to be talking about it later, like I said. It does template matching. It is something that also we are going to be touching upon during the demo part. It has custom handlers, which it executes. We are also going to be doing that during the demo. And it has typecasting. I left typecasting out because it is really, it will take like 20 minutes or 25 minutes to explain all of it. So I just left it out. If we have time or you are interested, you can hit me up after the workshop and I'll be glad to talk about it with you. And of course there is like an engine that orchestrates everything. But yeah, it's like there are things inside that black box. So I have a demo prepared. There's a QR code. If you want to follow along with me, please do. And if you get stuck, I'll be able to help you gladly. I'll be going over the same thing personally. So if you just want to sit and just listen, no problem. The website has instructions so you can do it now or later, whatever. And also I really hope that the website works for you because it's self-hosted. So if it doesn't work, try disabling your VPN, try a different country. I tried it like five times. It worked without any VPNs. So I'm going to give you like a minute to get to the website if you want to follow along. And yeah. All right. Okay. I expect everybody to be on the website right now. If you didn't manage to scan the QR code or type it through, just ask your neighbor for a nice deed so they will be able to show you the URL. So like I said, I will be going over the same thing. Personally, I have it on my iPad so I will be going over all of the things that are written in here and also I will be like adding some additional context for it. And also, this is the important part. I forked discovery from the L2B repository just for the purposes of this demonstration. If you actually want to use discovery yourself, please use the original repository because I'm not going to be maintaining this fork and it's going to get really stale really quick. And why did I fork it? It's just because we are not, no, it's an internal tool. We will try to make it more available to the public. It's just, it is in a rough stage right now. But it is still cool to show what this tool is able to do. OK. So is this visible? Like the contrast is okay? Okay. So I have downloaded the, the only thing that I have done is just npm install. And if you, if it worked, you should be able to just do npx discovery. And something along the lines of this should appear. So there are two subcommands. You don't really need to worry about them. So there is the single discovery. It's just for convenience. If you really need to discover something like a single address, you put it there. But if you want to build like a project, you'll be writing a config JSON either way. So there is also invert. Invert is like we used it to build those graphs you saw. We used mermaid before we got the protocol bit I was showing. So don't worry about those. The only thing that you're interested in is this cover. So the most boring part of doing anything fun is setting it up and configuring it. Unfortunately, I'll have to leave the part of configuration to you. You need to configure two things. You need to configure the Etherscan API key. So, you can actually call Etherscan. And you need to configure an RPC URL. And the sad part is that only few RPCs work that well with discovery. I'll say that the free tier of Alchemy worked for me without problems. So if you have Alchemy, do use it. There is like an asterisk where RPCs that only support block ranges of 10,000 when you're querying for logs do not work with discovery. For simplicity, we are essentially expecting you to have an RPC where the block range for log querying is basically infinite. So you will have to set this all up. I will just copy my end from the previous tries of that. So I have an end and you should probably do the same. You can always exploit the same variables if you want. So now we can get to the actual fun part. And before we actually configure anything, you know, we need an initial address. The address in the website is already there. But like this is also a good question. Like how do you actually get a hold of any address that belongs to a project? And I'm going to show you a simple example that you can find addresses belonging to a project. So we are going to be doing it this way. So like I said, I chose Zora for this presentation. So let's just go to the best website and find Zora. And how I do it is just go to their website of any project and try to bridge something. And also it is important there are multiple bridges for Zora, for example. But what you want is the official bridge. I know it's a touchy subject, but in L2B, we assume that the official bridge is part of the rollup. I don't want to get into it, but make sure you have the official canonical bridge to use. And we just want to bridge the smallest amount of if. do please don't bridge anything. We just want to get to the part where it says sign and don't sign, please. We just want to get to the part where it shows us the address we are going to be interacting with. So let me just bridge some if. Yeah, whatever. Let me just bridge some ease. Yeah, whatever. Okay, so this is the address of the contract we are interested in. And we can just copy it and store it for later. So now how do we configure the discovery to start at that address? So we need to create a folder structure that discovery is able to understand. So this folder structure looks like this. So just... I'll type it out here. So Discovery is the folder where all of the projects that you will use live, basically. And they're like configurations, results, flat files, source code, anything that pertains to a particular project lives in Discovery. And the actual project is like that. So anything related to Zora lives in there. But there's also one more level, which is Ethereum. This is the actual chain you're going to be doing Discovery on. Because we have the ability to do discovery on multiple chains, like I showed, maybe I'll", "eventId": "devcon-7", - "slot_start": 1731486000000, - "slot_end": 1731487800000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1VlzRulp0j62UZdPbUEc2y_6-IxSsimLBL_t3kn0xprA", - "resources_slides": null, + "slot_start": 1731638700000, + "slot_end": 1731645900000, + "slot_roomId": "classroom-c", + "resources_presentation": "https://docs.google.com/presentation/d/1T24SoFUkubwO-ppCiYWJoisNwayKtozmAgEJYNPvVho", + "resources_slides": "", "speakers": [ - "vivek-bhupatiraju" + "mateusz-radomski" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -241065,10 +240397,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -241076,6 +240404,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -241582,6 +240911,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -241613,6 +240943,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -241634,18 +240965,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -241661,7 +240980,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -241818,6 +241136,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -241834,6 +241153,17 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -242108,7 +241438,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -242125,65 +241454,49 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, 0 ] }, { "session": { - "id": "discovery-the-tool-at-the-core-of-l2beat", - "sourceId": "G9ESC7", - "title": "Discovery - the tool at the core of L2BEAT", - "description": "Hands on workshop about how to use an L2BEAT tool called discovery for mapping, researching and monitoring all the contracts involved in a project. We'll start by introducing the problem that discovery tries to solve and after that we'll get into trying to understand the architecture of a real world project by using the avenues this tool gives us. After this session the participant should feel empowered to use discovery to deepen his knowledge about all on-chain deployments.", - "track": "Developer Experience", - "type": "Workshop", - "expertise": "Intermediate", - "audience": "Developper", + "id": "dj-and-after-party", + "sourceId": "Z8UXRG", + "title": "DJ and After Party", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Architecture", - "Tooling", - "DevEx", - "Event monitoring", - "research", - "DevEx", - "Event monitoring", - "Tooling" - ], - "keywords": [ - "Holistic monitoring", - "Architecture research" - ], - "duration": 4792, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "2446734d4de4327fb1de791a429823052d3a3669c0f5d37607ec93e2160ffec5", - "sources_youtubeId": "azowA66W5UY", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6738656320d1f9ac48c07177", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6738656320d1f9ac48c07177.vtt", - "transcript_text": " Hi, hello. So I'm Mateusz, or you can call me Matt, like the short for Matthew. I'm from L2Beat. I'm half of the tooling team in the L2Beat. And my workshop is on the discovery. It's the tool we use, we build, to help us research projects, and it turned out that it is really useful at solving all of our problems. I mean, not all, but like majority of problems I'm going to later talk about. So, a funny story is that I almost lost the demo part of this presentation. And the only surviving copy of it was like some random Vim buffer. So I just wanted to point it out. Okay. So there is a lot of projects on L2Beat. You know, the amount of projects is growing. And we internally expect there to be at least 1,000 L2s. So currently you can see that there is 51 rollups and 66 validium and optimiums. So around let's call it 110 projects of only L2s. We also track bridges. Not to the same standard, but for all of those old tools, we have a minimum bar of data we want to show, and that would be the risk rosette. We want the risk rosette to be obviously correct, and the standard for this data is actually quite high. We want to show data that is correct correct so you don't lose your funds, you don't make a decision based on our data that is incorrect. So how to even maintain all of this, right? So just like a quick math lesson, right? So imagine that we have 110 projects, and every project has a single update every two months, right? So it's actually quite sparse. We see projects that are updating way more often. And even if every project had a single update every two months, we would have an 84% chance of seeing at least a single update every two months, we would have an 84% chance of seeing at least a single update in a day. So it's basically guaranteed that every single day something new will change on chain. We need to be able to detect it, see it, see what's changed and act upon it. And also, like, even if we assume that an update happens every six months, the chance is still 45%, right? So it's like a coin toss, basically. And these chances are basically always working against us. The more and more projects are going to be added, the higher those chances are basically to be 100%. So what is our needs, right? So like I said, there is a high chance of an update happening. And we need to be able to fastly see that it happened. And we need to be fast in reacting to it. If, for example, our optimism, right, any of the top docs has an update and we take like three weeks to process that update or to even see that an update happened, then we are showing incorrect data for three weeks, which is actually a quite long amount of time when it comes to Web 3.0. When an update happens, we need to know what changed. So it's not enough to see that something has changed. We also need to know what has changed, right? So did any of the risks, assumption that we had before, are now different? So also, we need to be able to look inside the project and compare it to the previous version. And as always, we want to automate as much as possible. You cannot get to 110 projects listed on a website and do all of them manually. It is possible with human effort, but we don't have the amount of resources to do it. So right now we have four researchers and we maintain all of the 110 projects with only four people and we also manage to do stuff that's additional. So discovery essentially, I think, is at the core of solving all of those problems. Of course, discovery is not a panacea. It does not solve all of the problems by itself. But of the problems that I listed, discovery is always at the core of the solution. So the mental model of discovery is like this. I like to think about discovery this way. And also I like understanding things from the low level. So let's try to understand the inputs and outputs of discovery. The input, of course, is the Ethereum state. We want to see what is the state of each project on a given, let's say, block number. So the input to discovery is that Ethereum state. Of course, we are not sending, like, a snapshot.zip to discovery. We are doing something like... I mean, we use RPC providers, which is essentially having the entire Ethereum state at your fingertips. You can do any call you would like. And to facilitate the querying the data from the state, we have a config JSON, which instructs the discovery program, which data we are interested in and how to process it. And after passing those two inputs to discovery, an output is generated, which is a discovery JSON file. Of course, it's all a simplification. The more, like, the actual mental model, which I don't even think is correct, and there's missing some parts. It's more something like this. So that's what I mean when I say discovery is at the core, right? All roads lead back to discovery. I'll try to, during this workshop, I'll try to touch upon all of the things that are listed here. So you'll be able to understand at least how this flowchart happened. So since I myself am a visual person, I want to create, I want to show you a demo of something nice to look at. So this is something we have been working on, which will enable us to move from command line to a graphical user interface. So right now we are calling it Discovery UI. And let's see, like like all of the projects that are listed here are projects that we are tracking with Discovery. So I have picked Zora for the project we are going to be using during this workshop. So let's see what kind of data we can expect to see in this tool for Zora. So of course there is like a lot of things to take in. But let's start from the left and keep going right. There are on the left, the thing is that they look like files, but they are actually contracts. They are kind of inspired by the file view in Visual Studio. And you can see, maybe, I don't know, the contrast is not the best, but I hope you'll see that. We have two contracts, which are the initial contracts. And by initial, I mean the contracts from which we are going to be starting and all of the contracts that are on the left have been found based on those two initial contracts. So it is, as you can see, like it is quite useful to be able to find two contracts used in a project and then basically find all other contracts used by that project and also this updates automatically so if anything new happens, new is added or is removed, they are automatically updated. Each of those contracts has some values, right? So each contract has an address, a name, a description given by a researcher. And let's focus on the fields for now. So fields are state variables variables that we have found in the ABI which are either public variables or functions which don't take any arguments and just return something so we are trying to build the state of the contract through these. There is also one more part which will be important later during the demo. I mean during the workshop part. Which is that we try to build arrays from functions which take a single argument which is of type integer. So we just assume that this function is like get something by index and we try to get all of those things. And you can see that there are actually addresses inside of those values. And those addresses lead to other addresses. And this is how discovery works. It gets all of those state variables, and if it finds an address, it assumes that this address is also connected and just keeps discovering on those addresses. And the third view here is like a graph view. So all of those projects here are used in Zora. And the way that it works is, for example, let's focus on the security council. Maybe that's not the best. But the system config, right? So you can see that there are some state variables and they point to other contracts. For this view, it is really easy to understand how the project is built, what contracts reference each other, and how the data flow inside the project happens. Of course, this is not the default view. Like the view allows you to select them, move them around, you can color these, whatever your heart desires to make it easier for you to understand. We have two layout algorithms since it's a graph view. You can use the D3. We called it slow because it is not that fast. It uses force simulation to lay out the graph. We also have more like a hierarchical view which just lays the graph from left to right. And the third thing is the code. So all contracts have source code. And let's go back to the L1 standard bridge example. And we want to show the code to the researchers because to understand what a contract actually does, you need to look at the source code. And you might be weirded out by the fact that there are only two files. I'm going to touch upon this a little bit later because it is actually quite important why there's only two files and not like more. So you can just view the code in here. The part that I want to also show is that we see that it is important to be able to switch between views, right? So I can click L2 output Oracle on the left in the list view and it is selected in the values and the nodes panel. Vice versa, I can select something in the values and it is selected in the list and the nodes view and I can select something in the nodes view, and it is selected on every single audit review. So this is something which is really graphical and nice to show. And it basically is only the look inside the Discover JSON. This is just a nice way to visualize what's inside the Discover JSON. But it doesn't touch upon the way of how we even got that Discover JSON. So it is something we are working towards. It's not yet ready. You can only view things. It's basically read only at this time. But in the future, we hope to make Discovery this, so you'll be able to do your research in a nice graphical environment and do anything you need. So yeah, let's get back to, yeah, go ahead. So you were showing the source code, but the source code is not on-chain, right? Yeah, so the question is, how do I get the source code since it's not on chain? So I kind of skipped one assumption. It's that by the Ethereum state, we also kind of consider the EtherScan source code database. Even if you go to L2Beat and look into the products that we list, if a product does not verify their source code on Etherscan, we give them a big red warning. We expect you to verify your source code to show transparency to the users and to the researchers so they can do their stuff. It is something that I omitted, but we do use Etherscan or Etherscan derivatives like BlockScout or stuff like that to get the source code, and we heavily rely on the source code because if we didn't get the source code, we wouldn't be able to call anything because we don't know what the ADBI is, right? So, yeah. If you have any questions, do please shut them up during the workshop. But I'll get back to the presentation. Yeah. So if you run into something where you can't get access to the source, you don't try any kind of decompilation? We don't try that. If you don't try decompile? No. I mean, there was one case where we knew what the source was, it just wasn't verified, so we like forcefully, like we verified the source code for the project because they didn't want to do it for some reason, so we just did it. And we are not trying to decompile the bytecode in any way. I mean, if it's something that we can't get the project to verify and we need to look inside, we might decompile it, but Discovery does not try to do anything like that. It just assumes the happy case where the source is on ETHESCAN or whatever explorer the chain is using and just goes from there. But yeah, if we hit snag, like there is no source, we just either accept it as we cannot look inside, or we talk with the team to verify the source code. Okay, if there are no more questions, I'm going to keep moving forward. So, going back to the mental model, because I think it is really important, like, if you leave this place with a single thing, it's just that discovery is just a program. It has some input, it has some output, right? So the output is this discover.json, and the input is the config.json.c. And the way you can think about discovery, if it makes thinking about it easier for you, is that it's like a scraper for a website. So scraping a website looks like you put some address of a website, it downloads the content of that website, it tries to find any links, and it follows those links recursively. Discovery essentially does the same thing, but it doesn't download websites, it downloads the state of contracts and doesn't follow links but follows addresses to other contracts. And of course, discovery is not just like a simple thing that is like a black box. There are things happening inside that we are going to be talking about a little bit. So discovery is able to detect whether a contract is a proxy. It does the source code flattening. I'm going to be talking about it later, like I said. It does template matching. It is something that also we are going to be touching upon during the demo part. It has custom handlers, which it executes. We are also going to be doing that during the demo. And it has typecasting. I left typecasting out because it is really, it will take like 20 minutes or 25 minutes to explain all of it. So I just left it out. If we have time or you are interested, you can hit me up after the workshop and I'll be glad to talk about it with you. And of course there is like an engine that orchestrates everything. But yeah, it's like there are things inside that black box. So I have a demo prepared. There's a QR code. If you want to follow along with me, please do. And if you get stuck, I'll be able to help you gladly. I'll be going over the same thing personally. So if you just want to sit and just listen, no problem. The website has instructions so you can do it now or later, whatever. And also I really hope that the website works for you because it's self-hosted. So if it doesn't work, try disabling your VPN, try a different country. I tried it like five times. It worked without any VPNs. So I'm going to give you like a minute to get to the website if you want to follow along. And yeah. All right. Okay. I expect everybody to be on the website right now. If you didn't manage to scan the QR code or type it through, just ask your neighbor for a nice deed so they will be able to show you the URL. So like I said, I will be going over the same thing. Personally, I have it on my iPad so I will be going over all of the things that are written in here and also I will be like adding some additional context for it. And also, this is the important part. I forked discovery from the L2B repository just for the purposes of this demonstration. If you actually want to use discovery yourself, please use the original repository because I'm not going to be maintaining this fork and it's going to get really stale really quick. And why did I fork it? It's just because we are not, no, it's an internal tool. We will try to make it more available to the public. It's just, it is in a rough stage right now. But it is still cool to show what this tool is able to do. OK. So is this visible? Like the contrast is okay? Okay. So I have downloaded the, the only thing that I have done is just npm install. And if you, if it worked, you should be able to just do npx discovery. And something along the lines of this should appear. So there are two subcommands. You don't really need to worry about them. So there is the single discovery. It's just for convenience. If you really need to discover something like a single address, you put it there. But if you want to build like a project, you'll be writing a config JSON either way. So there is also invert. Invert is like we used it to build those graphs you saw. We used mermaid before we got the protocol bit I was showing. So don't worry about those. The only thing that you're interested in is this cover. So the most boring part of doing anything fun is setting it up and configuring it. Unfortunately, I'll have to leave the part of configuration to you. You need to configure two things. You need to configure the Etherscan API key. So, you can actually call Etherscan. And you need to configure an RPC URL. And the sad part is that only few RPCs work that well with discovery. I'll say that the free tier of Alchemy worked for me without problems. So if you have Alchemy, do use it. There is like an asterisk where RPCs that only support block ranges of 10,000 when you're querying for logs do not work with discovery. For simplicity, we are essentially expecting you to have an RPC where the block range for log querying is basically infinite. So you will have to set this all up. I will just copy my end from the previous tries of that. So I have an end and you should probably do the same. You can always exploit the same variables if you want. So now we can get to the actual fun part. And before we actually configure anything, you know, we need an initial address. The address in the website is already there. But like this is also a good question. Like how do you actually get a hold of any address that belongs to a project? And I'm going to show you a simple example that you can find addresses belonging to a project. So we are going to be doing it this way. So like I said, I chose Zora for this presentation. So let's just go to the best website and find Zora. And how I do it is just go to their website of any project and try to bridge something. And also it is important there are multiple bridges for Zora, for example. But what you want is the official bridge. I know it's a touchy subject, but in L2B, we assume that the official bridge is part of the rollup. I don't want to get into it, but make sure you have the official canonical bridge to use. And we just want to bridge the smallest amount of if. do please don't bridge anything. We just want to get to the part where it says sign and don't sign, please. We just want to get to the part where it shows us the address we are going to be interacting with. So let me just bridge some if. Yeah, whatever. Let me just bridge some ease. Yeah, whatever. Okay, so this is the address of the contract we are interested in. And we can just copy it and store it for later. So now how do we configure the discovery to start at that address? So we need to create a folder structure that discovery is able to understand. So this folder structure looks like this. So just... I'll type it out here. So Discovery is the folder where all of the projects that you will use live, basically. And they're like configurations, results, flat files, source code, anything that pertains to a particular project lives in Discovery. And the actual project is like that. So anything related to Zora lives in there. But there's also one more level, which is Ethereum. This is the actual chain you're going to be doing Discovery on. Because we have the ability to do discovery on multiple chains, like I showed, maybe I'll", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731638700000, - "slot_end": 1731645900000, - "slot_roomId": "classroom-c", - "resources_presentation": "https://docs.google.com/presentation/d/1T24SoFUkubwO-ppCiYWJoisNwayKtozmAgEJYNPvVho", - "resources_slides": null, - "speakers": [ - "mateusz-radomski" - ] + "slot_start": 1731668400000, + "slot_end": 1731675600000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1TSMgbtSJLOzOAEiyPEoXuekpCOjQiJ2mYcLOPgFaF3E", + "resources_slides": "" }, "vector": [ 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -242446,7 +241759,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -242946,7 +242258,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -242955,7 +242266,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -242987,7 +242297,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -243180,7 +242489,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -243198,7 +242506,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -243488,10 +242795,11 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -243500,7 +242808,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -243510,9 +242817,9 @@ }, { "session": { - "id": "dj-and-after-party", - "sourceId": "Z8UXRG", - "title": "DJ and After Party", + "id": "dj-anderson", + "sourceId": "V393ZX", + "title": "DJ Anderson", "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", "track": "Entertainment", "type": "Music", @@ -243525,10 +242832,11 @@ "language": "en", "speakers": [], "eventId": "devcon-7", - "slot_start": 1731668400000, - "slot_end": 1731675600000, + "slot_start": 1731567600000, + "slot_end": 1731571200000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1TSMgbtSJLOzOAEiyPEoXuekpCOjQiJ2mYcLOPgFaF3E" + "resources_presentation": "https://docs.google.com/presentation/d/11UdQ5iBzKBx_FS4T0nj0XPX9C1X0bSm-aP2bwq7jrOI", + "resources_slides": "" }, "vector": [ 0, @@ -244841,11 +244149,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -244867,9 +244170,9 @@ }, { "session": { - "id": "dj-anderson", - "sourceId": "V393ZX", - "title": "DJ Anderson", + "id": "dj-i34r7h", + "sourceId": "QTHGFE", + "title": "DJ @i34r7h", "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", "track": "Entertainment", "type": "Music", @@ -244882,10 +244185,11 @@ "language": "en", "speakers": [], "eventId": "devcon-7", - "slot_start": 1731567600000, - "slot_end": 1731571200000, + "slot_start": 1731639600000, + "slot_end": 1731643200000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/11UdQ5iBzKBx_FS4T0nj0XPX9C1X0bSm-aP2bwq7jrOI" + "resources_presentation": "https://docs.google.com/presentation/d/1f64FrhWEvOeHjw8XlNFarHOwwNkBaofQKZdOavm-Zq4", + "resources_slides": "" }, "vector": [ 0, @@ -246198,11 +245502,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -246224,9 +245523,9 @@ }, { "session": { - "id": "dj-i34r7h", - "sourceId": "QTHGFE", - "title": "DJ @i34r7h", + "id": "dj-mayu", + "sourceId": "XV779L", + "title": "DJ MAYU", "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", "track": "Entertainment", "type": "Music", @@ -246239,10 +245538,11 @@ "language": "en", "speakers": [], "eventId": "devcon-7", - "slot_start": 1731639600000, - "slot_end": 1731643200000, + "slot_start": 1731646800000, + "slot_end": 1731650400000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1f64FrhWEvOeHjw8XlNFarHOwwNkBaofQKZdOavm-Zq4" + "resources_presentation": "https://docs.google.com/presentation/d/1t2zQdmj0AJUDWbkdwI8GyR90vs3nQ_7TKvydOwUZYYk", + "resources_slides": "" }, "vector": [ 0, @@ -247555,11 +246855,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -247581,25 +246876,28 @@ }, { "session": { - "id": "dj-mayu", - "sourceId": "XV779L", - "title": "DJ MAYU", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "id": "djing-pino7", + "sourceId": "SPWJHX", + "title": "DJing - pino7", + "description": "I am a builder and a volunteer in Devcon SEA. Back in the days I've decided that I wanted to become awesome, and here I am in my journey. I am UX/UI Designer and I am becoming a React Developer. I have always being passionate about music. And there's always space for it during my life journey. I love communities, people, organizing events and playing some good music.", "track": "Entertainment", "type": "Music", - "expertise": "", - "audience": "Engineering", + "expertise": "Intermediate", + "audience": "Community", "featured": false, "doNotRecord": false, "keywords": [], "tags": [], "language": "en", - "speakers": [], + "speakers": [ + "pino7" + ], "eventId": "devcon-7", - "slot_start": 1731646800000, - "slot_end": 1731650400000, + "slot_start": 1731564000000, + "slot_end": 1731567600000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1t2zQdmj0AJUDWbkdwI8GyR90vs3nQ_7TKvydOwUZYYk" + "resources_presentation": "https://docs.google.com/presentation/d/1FZiG2A1-zzZBVPF6IvnlZPydiJX9JFyp4ngPzFzJTEo", + "resources_slides": "" }, "vector": [ 0, @@ -247874,6 +247172,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -248910,6 +248209,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -248917,8 +248217,6 @@ 0, 0, 0, - 2, - 0, 0, 2, 0, @@ -248928,37 +248226,57 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "djing-pino7", - "sourceId": "SPWJHX", - "title": "DJing - pino7", - "description": "I am a builder and a volunteer in Devcon SEA. Back in the days I've decided that I wanted to become awesome, and here I am in my journey. I am UX/UI Designer and I am becoming a React Developer. I have always being passionate about music. And there's always space for it during my life journey. I love communities, people, organizing events and playing some good music.", - "track": "Entertainment", - "type": "Music", - "expertise": "Intermediate", - "audience": "Community", + "id": "do-you-really-know-your-web3-users", + "sourceId": "YRDFDY", + "title": "Do you really know your web3 users?", + "description": "Product discovery is to understand users' problems and using that knowledge to build a product. In the world of Web3, where anonymity & privacy prevail, how can teams identify user segments & collect relevant data to understand behaviours behind accounts? As we aim to onboard the next billion web3 users, how should we approach activation & growth, considering best practices and emerging trends? This panel will explore strategies for effective product discovery in a privacy-centric ecosystem.", + "track": "Usability", + "type": "Panel", + "expertise": "Beginner", + "audience": "Product", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [ - "pino7" + "tags": [ + "Product-market fit", + "User Experience", + "UI/UX", + "User Research", + "product", + "discovery", + "Product-market fit", + "UI/UX", + "User Experience", + "User Research" + ], + "keywords": [ + "Product Management", + "Strategy", + "Product Discovery" ], + "duration": 3425, + "language": "en", + "sources_swarmHash": "ae5d589708b7deb49fa418329e2b03c6b8b14d698f9e4c29bd4e4a97b2b285b0", + "sources_youtubeId": "KSfkX-dGskg", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673420ce9dbb7a90e172caa6", "eventId": "devcon-7", - "slot_start": 1731564000000, - "slot_end": 1731567600000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1FZiG2A1-zzZBVPF6IvnlZPydiJX9JFyp4ngPzFzJTEo" + "slot_start": 1731394800000, + "slot_end": 1731398400000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1NT9-QOOV4dbn06g_FMOVREI8em-zEVjMVNnJ2DBkCuc", + "resources_slides": "", + "speakers": [ + "rahul-rumalla", + "alice-chaverot", + "austin-keeble", + "romina-bungert" + ] }, "vector": [ 0, @@ -248969,7 +248287,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -249234,47 +248551,11 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, + 6, + 6, + 6, + 6, 0, 0, 0, @@ -249762,6 +249043,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -249803,6 +249085,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -249830,6 +249114,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -250015,6 +249300,22 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -250275,7 +249576,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -250284,6 +249584,18 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, 2, 0, 0, @@ -250292,68 +249604,60 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "do-you-really-know-your-web3-users", - "sourceId": "YRDFDY", - "title": "Do you really know your web3 users?", - "description": "Product discovery is to understand users' problems and using that knowledge to build a product. In the world of Web3, where anonymity & privacy prevail, how can teams identify user segments & collect relevant data to understand behaviours behind accounts? As we aim to onboard the next billion web3 users, how should we approach activation & growth, considering best practices and emerging trends? This panel will explore strategies for effective product discovery in a privacy-centric ecosystem.", - "track": "Usability", - "type": "Panel", - "expertise": "Beginner", - "audience": "Product", + "id": "does-ethereum-really-need-pbs-solving-mev-at-the-app-vs-the-infrastructure-layer", + "sourceId": "TNKFPP", + "title": "Does Ethereum Really Need PBS? Solving MEV at the app vs the infrastructure layer", + "description": "In this talk, we will give a brief history of MEV (Maximal Extractable Value) and its influence on enshrining PBS (Proposer Builder Separation) into Ethereum. We will explore the Ethereum community’s evolving perspectives on PBS while looking at successful outcomes, unexpected consequences, and alternate solutions. \r\n\r\nUltimately, the talk will provocatively ask: does Ethereum really need PBS at all?", + "track": "Cryptoeconomics", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Product-market fit", - "User Experience", - "UI/UX", - "User Research", - "product", - "discovery", - "Product-market fit", - "UI/UX", - "User Experience", - "User Research" + "redistribution" ], "keywords": [ - "Product Management", - "Strategy", - "Product Discovery" + "Intents", + "MEV", + "PBS", + "Redistribution" ], - "duration": 3425, + "duration": 1475, "language": "en", - "sources_swarmHash": "ae5d589708b7deb49fa418329e2b03c6b8b14d698f9e4c29bd4e4a97b2b285b0", - "sources_youtubeId": "KSfkX-dGskg", + "sources_swarmHash": "81761a34dfd0fb923c0c06c5a197e014fbd7fbeb3e32c50fc0fb7f8b110f9696", + "sources_youtubeId": "4-QVxNAeIsw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673420ce9dbb7a90e172caa6", + "sources_streamethId": "6736f4bf1b0f83434d47cb02", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735aeab9dbb7a90e1bde09b.vtt", + "transcript_text": " Tanya Cushman Reviewer:\"Presenter\": Hello, welcome back. So, today I will be talking about Phantom Zone. But before I dive deep into Phantom Zone and talk about the rest of the things, I would walk you through the motivation behind Phantom Zone. So, if you guys are familiar with globally mutually trusted third party, I would want to introduce you to this idea of globally mutually trusted third party. What is this globally mutually trusted third party? Well, it provides you three guarantees. The first thing that it says is that whatever information you send to this third party, it will always keep that private. It would not leak it to anyone. The second guarantee that it provides is that all the information that it has collected over time from different people, you know, we have been sending this globally mutually trusted third party all our information let's say for a year. All the information that it has collected for all these years, it will always keep it private and would not allow anyone to poke inside its memory. And the third guarantee that it provides, which makes this particular party very magical, is that it can compute any arbitrary function we want it to compute, as long as we provide enough authorization to be able to compute that function. And it will only output the necessary outputs. Usually, I sort of refer to this mutually trusted third party as a mutually shared computer. And if you guys are familiar with something called the God Protocol, this is the God Protocol. This is a picture from an example back in 1987. So first observation to make is that if you really want these three guarantees to be, if you really want this party to be globally mutually trusted, we want this party to be able to prove these three guarantees to any individual without requiring any additional interaction, which is why we require cryptography. to prove these three guarantees to any individual without requiring any additional interaction which is why we require cryptography we cannot just make it on a certain legal arguments or something like that we require cryptography for building this the building this globally mutually trusted shared computer and we started to build phantom zone to eventually build the god protocol but to stick within the realms of practicality, we could only build an abridged version of it. So for the rest of the talk, I will be talking about, A, what is Phantom Zone? Why it is an abridged version of this God Protocol? And the second important thing that I'll be talking about is, how can we push the frontiers to eventually build the God Protocol? Okay, so Phantom Zone, the abridged version. The key idea in Phantom Zone is something called multi-party fully homomorphic encryption. And for me to describe you multi-party fully homomorphic encryption, I have to eventually describe you what is single-party encryption. In single-party encryption, you have a single client, this guy over here. They hold a secret. They keep the secret private to them. They can encrypt their information, which is A here, with their secret and produce an FHC ciphertext. And then they can send this FHC ciphertext to any server. And the can evaluate and any arbitrary function on their private input which is a and produce an output ciphertext and that the and the client can receive the output ciphertext and decrypt it. So this is single-party FIG coming to multi-party FIG. Well the key idea in multi-party FIG is that you split this secret which is held private by a single client in single-party FIG. Well the key idea in multi-party FIG is that you split this secret which is held private by a single client in single party FIG among many clients. So you have S0, S1, S2 as secret shots split among these three people over here. The first step in multi-party FIG is something called collective public key generation. So all these three parties come together and they generate the collective public key. And then all these three parties, using the collective public key, encrypt their private inputs and produce FHC ciphertext. And then they send their FHC ciphertext to the server. Server executes a marble refunction on the FHC ciphertext and produce an FHC output. The key thing to notice here is that all these parties would have to produce a decryption share to eventually decrypt the output ciphertext here. So they produce the decryption share using the secret shards and then they send it to each other and then only they're able to decrypt the output ciphertext. Because in this case, the secret was split among all these parties. So why is Phantom Zone an abridged version? Well, because Phantom Zone, assuming that in the future we're able to add publicly-verifiable FHE to a Phantom Zone can only guarantee the three guarantees that I talk about in the God Protocol to only the holders of the secret shots. It cannot guarantee these three guarantees to everyone around the globe. Which is why Phantom Zone is just an abridged version of it. Okay. So you might wonder, how do we build towards the God protocol? How do we even do it? Well, what I would like to say at the moment is I would have loved to say that after a lot of research and a lot of five years of research, we have figured out the solution to build the God protocol. But no, there are no enlightening thoughts here. And there's one obvious answer to eventually building the God protocol, which is program of sophistication. What's program of sophistication? Well, to simply describe the program of sophistication, let's just assume that you have function f, right? What you can do with program obfuscation is you take this function f and perform some transformations on this function f and produce an obfuscated circuit. You can give this obfuscated circuit to someone else and program obfuscation guarantees that the only thing that you can learn from that obfuscated circuit is the input to output map and nothing else. Now you might be wondering why is this useful? Because if the function is trivial, then you can easily learn it from the input to output map. Program obfuscation becomes very interesting when you sort of like obfuscate a program that is a cryptographic function. For example, let's just say that I take a function that decrypts any ciphertext that is encrypted to my public key. So I take a function and this function has my secret key and it decrypts any ciphertext that was encrypted to me using my public key. And I perform certain transformations using program obfuscation to this function and produce an obfuscated circuit. I give this obfuscated circuit to someone else. What they can do is that they can decrypt any ciphertext that was encrypted to me using this obfuscated circuit. But they can never, ever learn what the secret is inside that circuit. They can never learn my secret key. And these are the class of functions where program obfuscation becomes useful. And I'll tie it to building the God protocol later in the slides. So now, assume that we can only build program obfuscation for some limited class of functions, not for general class of functions, but limited class of functions. I'll tell you one way of building the got protocol using program ob application. Step one, modify the FHT scheme that we're using before to become publicly verifiable. What do I mean by that? Well a publicly verifiable FHT schemes does those things. It evaluates the FHT function which you know a a normal FHE scheme does. In addition to evaluating the function, it also produces a proof of correct evaluation so that anyone can verify this proof with the output ciphertext and be assured that the server that sort of executed this FHE function executed it correctly, and which I usually refer to as proof pi of correct evaluation. Step two, replace the collective key generation operation that we did in the multi-party FHE with a trusted setup. In the trusted setup, you have arbitrary number of people here. They perform some MPC protocol to produce FHE keys. The two types of FHE keys which are very important. Public key and the bootstrapping key. Bootstrapping key is usually used for some sort of FHE operations that you can completely black box. The key thing here is that no one knows the ideal secret key because we're doing a trusted setup in MPC to generate these two keys. The third step is modify the trusted setup to also output an obfuscated conditional decryption oracle. Okay, that's a mouthful. I sort of like go into it one level deeper. What is an obfuscated conditional decryptor? This particular conditional decryptor is an obfuscated program of the following functionality. What it does is that takes an output ciphertext and a proof of correct evaluation of FIG circuit. It verifies whether the proof is valid and decrypts the output ciphertext if and only if the proof is valid. And this sort of like tells you why did we assume in the first place that program obfuscation may be feasible only for like limited class of functions because to build the GOT protocol like to build the got protocol using the FHERoute, we only need program obfuscation to be practical for this obfuscated conditional decryptor. So we modify the tracer setup to also output this obfuscated conditional decryptor, and that's it. And another thing to note is that this conditional decryptor also has the secret key, the ideal secret key that no one knows embedded inside it. Okay. So the end-to-end flow is, you do MPC to generate three things. Public key, bootstrapping key, and the offscored conditional decryptor, which I now realize is somewhat of a mouthful. I should have chosen some other term. Anyways, the second flow is, now anyone can encrypt their private inputs using the public key that is the output of the MPC protocol. So you have multiple ciphertexts here. And then they can send it to the FHC server. FHC server evaluates the FHC function, outputs the encrypted output. In addition, it produces a proof because the FIG server is evaluating a publicly verifiable FIG scheme. And then we plug in the proof as well as the output to the off-scaled conditional decryptor and the conditional decryptor would only decrypt the encrypted output if and only if the proof is valid. So this is one way of building the God protocol using publicly verifiable FHE and program obfuscation for obfuscated conditional decryptor. So there's one way, which I've just shown you, but we need new ideas to push the frontiers and to finally build the program obfuscation or and to finally build program obfuscation or indistinguishably obfuscation, if you're familiar with that. Here, I've showed you just one way. But if you are able to come up with new ideas, then probably we can make program obfuscation more practical for general circuits, not just for limited class of functions that we used before. And probably, we can directly build the God protocol from program obfuscation. So while I was exploring this field of program obfuscation and I.O., one key observation that I made was that it's really hard to get efficient program of specification from standard assumptions and we would inevitably require exotic assumptions. And I'll tell you what are standard assumptions and what are exotic assumptions. Well a standard assumption is an assumption that has been there for a while, for example D log, discrete log problem. There also exists additional incentive for people to break these standard assumptions. And exotic assumptions are somewhat newer assumptions. Like, they have been only there for like five years, or not even five, it was like two to three years. What we can do as a community to, you know, realizing that we might inevitably need newer assumptions to build practical program amplification is we can start examining these newer assumptions, start breaking them, start testing them. Or we can build applications using this assumption so that we can incentivize people to break them and tell us whether they're broken or not. And then eventually, in a few years, we would have candidate assumptions that are newer assumptions, but they have become then standard using which we can build practical program sophistication. And taking a first step towards this, we are launching a bounty program to break one of the candidate assumptions, which is called program obfuscation by local mixing. The way I think about this particular assumption is that they're taking more computational complexity approach than taking the traditional approach of using algebraic structures to build program obfuscation. The goal of the bounty is that we provided an obfuscated circuit with roughly 240,000 gates, which was obfuscated from an original circuit with roughly 1,000 gates. And you had to find the original circuit. You can learn more about the bounty at OfficeTopia.io. If you know what OfficeTopia is, OfficeTopia means that we're living in a world where authentication is practical, and the bounty amount is 10K. And this bounty is launched in collaboration with Ethereum Foundation and Zerix Spark. Okay. So before I break, and I think that I have a bunch of time, okay, before I break, and I think that I have a bunch of time. Okay, before I break, I would want to make one conjecture. And the conjecture goes as follows. I think the God protocol is the convergence of cryptography. Probably building the God protocol would require certain sort of like FHE. That is just one route, but like publicly viable FHE and other things like MPC for just setup and so on and so forth. But once you build the got protocol, I think it encompasses everything. It gives us everything that we have been wanting for for a while. It gives us witness encryption. It gives us zero knowledge proofs via signatures. It gives us MPC, multi-party computation. It gives us FE, functional encryption, all of these things that we've been demanding for a while. And this is also one of the major reasons that we should start investigating much more seriously how to get practical program application and finally build the God protocol. And that's it. Thank you. All right, thank you for All right. Thank you for the talk. We do have some questions rolling in. Yeah, let's go through some of the questions. Let's start with the first one. Can we implement threshold ECDSA with Phantom Zone? At the moment, yes, because you can express everything. Like, theoretically, yes, but it would be very impractical to implement ECDSA with PhantomZone at the moment because ECDSA is like you're doing elliptical operations, which is a lot of operations. As far as I understand, threshold ECDSA is possible. It takes two days to generate one single signature. All right, so next question. Can you tell us a little bit more about the definition of obfuscation as a virtual black box? That's the first question over here. Isn't the definition of obfuscation as a virtual black box impossible? I am not posing obfuscation as a virtual black box. I did not mean to say obfuscation is a virtual black box. By the way, the impossible result of a virtual black box is only for certain very restricted class of programs. It's not for general class of programs. Eventually you can aim for virtual black box with certain caveats. But again saying that my definition of sophistication is not virtual black box. All right and what can be done today with Phantom Zone? At the moment as I said Phantom Zone is an abridged version of the Scott protocol. It does not even have publicly verified FHE scheme, so it does not give you all the three guarantees. The only guarantee that it gives you is that it will execute the function that you ask it to execute while private information can be coming from multiple people. It'll keep the information private, but you'll have to trust it for it. So you'll have to trust this particular server to always keep the information private, but you'll have to trust it for it. So you'll have to trust this particular server to always keep the information private and not send it to anyone else. Perfect. And we do have one last question. Oh, cool. More questions rolling in. Can obfuscating programs undermine open source transparency and make it harder to verify the absence of malicious code? I see. Make it harder to verify absence of malicious code? I see. Make it harder to verify absence of malicious code. Well, that is assuming that the entire program is obfuscated. When I say obfuscation, we require obfuscation for certain parts of the program, which can interact with a public program and a private program which is obfuscated. I understand that obfuscation can be used for many malicious purposes as well, like for example, you know, like, there are several reasons why people might be interested in obfuscation, but we can, as a community, make sure that there's interaction between the public interfaces and the private interfaces which are obfuscated. All right. And why do you call the publicly verifiable FHE circuit obfuscated? Doesn't the require solidity verifier or something which is public? No, I think once I give you obfuscated circuit, there are certain guarantees that you can learn from the obfuscated circuit itself, that it does not reveal anything, as long as you've done the obfuscation correctly. Alright, and do you have evidence that the conditional decryption functionality is possible using I.O.? Yes. There are theoretical results and we're trying to make it practical as well. All right. Can you give one example each on how I.O. can replace ZK, MPC, FHE? Okay. So for ZK, what you can do is like you can embed a secret key inside this off-secreted circuit, the God protocol, and a zero-knowledge proof is just a signature from this God protocol. Whatever secret exists inside this particular server or this God protocol, or this FHC circuit, off-secreted circuit, a signature by that thing becomes a zero-knowledge proof. So you do not require zero- zero knowledge on the client side anymore. For MPC, again, it's a globally mutually trusted third party. All of us encrypt our private inputs with the public key corresponding to the secret key that lives inside this off-site circuit. And we send our private inputs to this. It decrypts that, performs some function, and produces the output. So that's one way of replacing MPC, and the same applies for FG. Cool. We can stay here for maybe another 10 seconds if there are any new questions rolling in. All right, cool.", "eventId": "devcon-7", - "slot_start": 1731394800000, - "slot_end": 1731398400000, + "slot_start": 1731639900000, + "slot_end": 1731640500000, "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1NT9-QOOV4dbn06g_FMOVREI8em-zEVjMVNnJ2DBkCuc", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1y6tAISW_K9exOHiT-8JDt3qSFgyDYP0v5Zkc3T7JIdw", + "resources_slides": "https://drive.google.com/file/d/1ajfbPhbuj_WS7D8L0JFilnwZ1mPLSntT/view", "speakers": [ - "rahul-rumalla", - "alice-chaverot", - "austin-keeble", - "romina-bungert" + "felix-leupold" ] }, "vector": [ 0, 0, + 6, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -250619,13 +249923,11 @@ 0, 0, 0, - 6, - 6, - 6, - 6, 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -251112,7 +250414,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -251154,8 +250455,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -251183,7 +250482,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -251370,12 +250668,11 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -251665,9 +250962,11 @@ 0, 0, 0, + 2, + 0, + 0, 0, 0, - 2, 0, 0, 0, @@ -251681,47 +250980,41 @@ }, { "session": { - "id": "does-ethereum-really-need-pbs-solving-mev-at-the-app-vs-the-infrastructure-layer", - "sourceId": "TNKFPP", - "title": "Does Ethereum Really Need PBS? Solving MEV at the app vs the infrastructure layer", - "description": "In this talk, we will give a brief history of MEV (Maximal Extractable Value) and its influence on enshrining PBS (Proposer Builder Separation) into Ethereum. We will explore the Ethereum community’s evolving perspectives on PBS while looking at successful outcomes, unexpected consequences, and alternate solutions. \r\n\r\nUltimately, the talk will provocatively ask: does Ethereum really need PBS at all?", - "track": "Cryptoeconomics", - "type": "Lightning Talk", + "id": "dont-get-rekt-practical-threat-detection-for-users-and-devs", + "sourceId": "Y7QGNQ", + "title": "Don’t get rekt: practical threat detection for users and devs", + "description": "Learn to uncover, and protect against, weaponized repositories, sites and tools targeting web3 users, devs & researchers. With examples and hands-on exercises, the session begins with topics like detecting suspicious activity in sites, handling wallet secrets & signatures, decoding calldata of malicious txs, and simulating them to avoid attacks. To then cover more advanced techniques to spot harmful backdoors in code repositories and services that can impact on devs & users’ safety.", + "track": "Security", + "type": "Workshop", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developer", "featured": false, - "doNotRecord": false, - "tags": [ - "redistribution" - ], + "doNotRecord": true, "keywords": [ - "Intents", - "MEV", - "PBS", - "Redistribution" + "user safety", + "developer safety", + "phishing" + ], + "tags": [ + "Tooling", + "Security", + "phishing", + "Security", + "Tooling" ], - "duration": 1475, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6736f4bf1b0f83434d47cb02", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735aeab9dbb7a90e1bde09b.vtt", - "transcript_text": " Tanya Cushman Reviewer:\"Presenter\": Hello, welcome back. So, today I will be talking about Phantom Zone. But before I dive deep into Phantom Zone and talk about the rest of the things, I would walk you through the motivation behind Phantom Zone. So, if you guys are familiar with globally mutually trusted third party, I would want to introduce you to this idea of globally mutually trusted third party. What is this globally mutually trusted third party? Well, it provides you three guarantees. The first thing that it says is that whatever information you send to this third party, it will always keep that private. It would not leak it to anyone. The second guarantee that it provides is that all the information that it has collected over time from different people, you know, we have been sending this globally mutually trusted third party all our information let's say for a year. All the information that it has collected for all these years, it will always keep it private and would not allow anyone to poke inside its memory. And the third guarantee that it provides, which makes this particular party very magical, is that it can compute any arbitrary function we want it to compute, as long as we provide enough authorization to be able to compute that function. And it will only output the necessary outputs. Usually, I sort of refer to this mutually trusted third party as a mutually shared computer. And if you guys are familiar with something called the God Protocol, this is the God Protocol. This is a picture from an example back in 1987. So first observation to make is that if you really want these three guarantees to be, if you really want this party to be globally mutually trusted, we want this party to be able to prove these three guarantees to any individual without requiring any additional interaction, which is why we require cryptography. to prove these three guarantees to any individual without requiring any additional interaction which is why we require cryptography we cannot just make it on a certain legal arguments or something like that we require cryptography for building this the building this globally mutually trusted shared computer and we started to build phantom zone to eventually build the god protocol but to stick within the realms of practicality, we could only build an abridged version of it. So for the rest of the talk, I will be talking about, A, what is Phantom Zone? Why it is an abridged version of this God Protocol? And the second important thing that I'll be talking about is, how can we push the frontiers to eventually build the God Protocol? Okay, so Phantom Zone, the abridged version. The key idea in Phantom Zone is something called multi-party fully homomorphic encryption. And for me to describe you multi-party fully homomorphic encryption, I have to eventually describe you what is single-party encryption. In single-party encryption, you have a single client, this guy over here. They hold a secret. They keep the secret private to them. They can encrypt their information, which is A here, with their secret and produce an FHC ciphertext. And then they can send this FHC ciphertext to any server. And the can evaluate and any arbitrary function on their private input which is a and produce an output ciphertext and that the and the client can receive the output ciphertext and decrypt it. So this is single-party FIG coming to multi-party FIG. Well the key idea in multi-party FIG is that you split this secret which is held private by a single client in single-party FIG. Well the key idea in multi-party FIG is that you split this secret which is held private by a single client in single party FIG among many clients. So you have S0, S1, S2 as secret shots split among these three people over here. The first step in multi-party FIG is something called collective public key generation. So all these three parties come together and they generate the collective public key. And then all these three parties, using the collective public key, encrypt their private inputs and produce FHC ciphertext. And then they send their FHC ciphertext to the server. Server executes a marble refunction on the FHC ciphertext and produce an FHC output. The key thing to notice here is that all these parties would have to produce a decryption share to eventually decrypt the output ciphertext here. So they produce the decryption share using the secret shards and then they send it to each other and then only they're able to decrypt the output ciphertext. Because in this case, the secret was split among all these parties. So why is Phantom Zone an abridged version? Well, because Phantom Zone, assuming that in the future we're able to add publicly-verifiable FHE to a Phantom Zone can only guarantee the three guarantees that I talk about in the God Protocol to only the holders of the secret shots. It cannot guarantee these three guarantees to everyone around the globe. Which is why Phantom Zone is just an abridged version of it. Okay. So you might wonder, how do we build towards the God protocol? How do we even do it? Well, what I would like to say at the moment is I would have loved to say that after a lot of research and a lot of five years of research, we have figured out the solution to build the God protocol. But no, there are no enlightening thoughts here. And there's one obvious answer to eventually building the God protocol, which is program of sophistication. What's program of sophistication? Well, to simply describe the program of sophistication, let's just assume that you have function f, right? What you can do with program obfuscation is you take this function f and perform some transformations on this function f and produce an obfuscated circuit. You can give this obfuscated circuit to someone else and program obfuscation guarantees that the only thing that you can learn from that obfuscated circuit is the input to output map and nothing else. Now you might be wondering why is this useful? Because if the function is trivial, then you can easily learn it from the input to output map. Program obfuscation becomes very interesting when you sort of like obfuscate a program that is a cryptographic function. For example, let's just say that I take a function that decrypts any ciphertext that is encrypted to my public key. So I take a function and this function has my secret key and it decrypts any ciphertext that was encrypted to me using my public key. And I perform certain transformations using program obfuscation to this function and produce an obfuscated circuit. I give this obfuscated circuit to someone else. What they can do is that they can decrypt any ciphertext that was encrypted to me using this obfuscated circuit. But they can never, ever learn what the secret is inside that circuit. They can never learn my secret key. And these are the class of functions where program obfuscation becomes useful. And I'll tie it to building the God protocol later in the slides. So now, assume that we can only build program obfuscation for some limited class of functions, not for general class of functions, but limited class of functions. I'll tell you one way of building the got protocol using program ob application. Step one, modify the FHT scheme that we're using before to become publicly verifiable. What do I mean by that? Well a publicly verifiable FHT schemes does those things. It evaluates the FHT function which you know a a normal FHE scheme does. In addition to evaluating the function, it also produces a proof of correct evaluation so that anyone can verify this proof with the output ciphertext and be assured that the server that sort of executed this FHE function executed it correctly, and which I usually refer to as proof pi of correct evaluation. Step two, replace the collective key generation operation that we did in the multi-party FHE with a trusted setup. In the trusted setup, you have arbitrary number of people here. They perform some MPC protocol to produce FHE keys. The two types of FHE keys which are very important. Public key and the bootstrapping key. Bootstrapping key is usually used for some sort of FHE operations that you can completely black box. The key thing here is that no one knows the ideal secret key because we're doing a trusted setup in MPC to generate these two keys. The third step is modify the trusted setup to also output an obfuscated conditional decryption oracle. Okay, that's a mouthful. I sort of like go into it one level deeper. What is an obfuscated conditional decryptor? This particular conditional decryptor is an obfuscated program of the following functionality. What it does is that takes an output ciphertext and a proof of correct evaluation of FIG circuit. It verifies whether the proof is valid and decrypts the output ciphertext if and only if the proof is valid. And this sort of like tells you why did we assume in the first place that program obfuscation may be feasible only for like limited class of functions because to build the GOT protocol like to build the got protocol using the FHERoute, we only need program obfuscation to be practical for this obfuscated conditional decryptor. So we modify the tracer setup to also output this obfuscated conditional decryptor, and that's it. And another thing to note is that this conditional decryptor also has the secret key, the ideal secret key that no one knows embedded inside it. Okay. So the end-to-end flow is, you do MPC to generate three things. Public key, bootstrapping key, and the offscored conditional decryptor, which I now realize is somewhat of a mouthful. I should have chosen some other term. Anyways, the second flow is, now anyone can encrypt their private inputs using the public key that is the output of the MPC protocol. So you have multiple ciphertexts here. And then they can send it to the FHC server. FHC server evaluates the FHC function, outputs the encrypted output. In addition, it produces a proof because the FIG server is evaluating a publicly verifiable FIG scheme. And then we plug in the proof as well as the output to the off-scaled conditional decryptor and the conditional decryptor would only decrypt the encrypted output if and only if the proof is valid. So this is one way of building the God protocol using publicly verifiable FHE and program obfuscation for obfuscated conditional decryptor. So there's one way, which I've just shown you, but we need new ideas to push the frontiers and to finally build the program obfuscation or and to finally build program obfuscation or indistinguishably obfuscation, if you're familiar with that. Here, I've showed you just one way. But if you are able to come up with new ideas, then probably we can make program obfuscation more practical for general circuits, not just for limited class of functions that we used before. And probably, we can directly build the God protocol from program obfuscation. So while I was exploring this field of program obfuscation and I.O., one key observation that I made was that it's really hard to get efficient program of specification from standard assumptions and we would inevitably require exotic assumptions. And I'll tell you what are standard assumptions and what are exotic assumptions. Well a standard assumption is an assumption that has been there for a while, for example D log, discrete log problem. There also exists additional incentive for people to break these standard assumptions. And exotic assumptions are somewhat newer assumptions. Like, they have been only there for like five years, or not even five, it was like two to three years. What we can do as a community to, you know, realizing that we might inevitably need newer assumptions to build practical program amplification is we can start examining these newer assumptions, start breaking them, start testing them. Or we can build applications using this assumption so that we can incentivize people to break them and tell us whether they're broken or not. And then eventually, in a few years, we would have candidate assumptions that are newer assumptions, but they have become then standard using which we can build practical program sophistication. And taking a first step towards this, we are launching a bounty program to break one of the candidate assumptions, which is called program obfuscation by local mixing. The way I think about this particular assumption is that they're taking more computational complexity approach than taking the traditional approach of using algebraic structures to build program obfuscation. The goal of the bounty is that we provided an obfuscated circuit with roughly 240,000 gates, which was obfuscated from an original circuit with roughly 1,000 gates. And you had to find the original circuit. You can learn more about the bounty at OfficeTopia.io. If you know what OfficeTopia is, OfficeTopia means that we're living in a world where authentication is practical, and the bounty amount is 10K. And this bounty is launched in collaboration with Ethereum Foundation and Zerix Spark. Okay. So before I break, and I think that I have a bunch of time, okay, before I break, and I think that I have a bunch of time. Okay, before I break, I would want to make one conjecture. And the conjecture goes as follows. I think the God protocol is the convergence of cryptography. Probably building the God protocol would require certain sort of like FHE. That is just one route, but like publicly viable FHE and other things like MPC for just setup and so on and so forth. But once you build the got protocol, I think it encompasses everything. It gives us everything that we have been wanting for for a while. It gives us witness encryption. It gives us zero knowledge proofs via signatures. It gives us MPC, multi-party computation. It gives us FE, functional encryption, all of these things that we've been demanding for a while. And this is also one of the major reasons that we should start investigating much more seriously how to get practical program application and finally build the God protocol. And that's it. Thank you. All right, thank you for All right. Thank you for the talk. We do have some questions rolling in. Yeah, let's go through some of the questions. Let's start with the first one. Can we implement threshold ECDSA with Phantom Zone? At the moment, yes, because you can express everything. Like, theoretically, yes, but it would be very impractical to implement ECDSA with PhantomZone at the moment because ECDSA is like you're doing elliptical operations, which is a lot of operations. As far as I understand, threshold ECDSA is possible. It takes two days to generate one single signature. All right, so next question. Can you tell us a little bit more about the definition of obfuscation as a virtual black box? That's the first question over here. Isn't the definition of obfuscation as a virtual black box impossible? I am not posing obfuscation as a virtual black box. I did not mean to say obfuscation is a virtual black box. By the way, the impossible result of a virtual black box is only for certain very restricted class of programs. It's not for general class of programs. Eventually you can aim for virtual black box with certain caveats. But again saying that my definition of sophistication is not virtual black box. All right and what can be done today with Phantom Zone? At the moment as I said Phantom Zone is an abridged version of the Scott protocol. It does not even have publicly verified FHE scheme, so it does not give you all the three guarantees. The only guarantee that it gives you is that it will execute the function that you ask it to execute while private information can be coming from multiple people. It'll keep the information private, but you'll have to trust it for it. So you'll have to trust this particular server to always keep the information private, but you'll have to trust it for it. So you'll have to trust this particular server to always keep the information private and not send it to anyone else. Perfect. And we do have one last question. Oh, cool. More questions rolling in. Can obfuscating programs undermine open source transparency and make it harder to verify the absence of malicious code? I see. Make it harder to verify absence of malicious code? I see. Make it harder to verify absence of malicious code. Well, that is assuming that the entire program is obfuscated. When I say obfuscation, we require obfuscation for certain parts of the program, which can interact with a public program and a private program which is obfuscated. I understand that obfuscation can be used for many malicious purposes as well, like for example, you know, like, there are several reasons why people might be interested in obfuscation, but we can, as a community, make sure that there's interaction between the public interfaces and the private interfaces which are obfuscated. All right. And why do you call the publicly verifiable FHE circuit obfuscated? Doesn't the require solidity verifier or something which is public? No, I think once I give you obfuscated circuit, there are certain guarantees that you can learn from the obfuscated circuit itself, that it does not reveal anything, as long as you've done the obfuscation correctly. Alright, and do you have evidence that the conditional decryption functionality is possible using I.O.? Yes. There are theoretical results and we're trying to make it practical as well. All right. Can you give one example each on how I.O. can replace ZK, MPC, FHE? Okay. So for ZK, what you can do is like you can embed a secret key inside this off-secreted circuit, the God protocol, and a zero-knowledge proof is just a signature from this God protocol. Whatever secret exists inside this particular server or this God protocol, or this FHC circuit, off-secreted circuit, a signature by that thing becomes a zero-knowledge proof. So you do not require zero- zero knowledge on the client side anymore. For MPC, again, it's a globally mutually trusted third party. All of us encrypt our private inputs with the public key corresponding to the secret key that lives inside this off-site circuit. And we send our private inputs to this. It decrypts that, performs some function, and produces the output. So that's one way of replacing MPC, and the same applies for FG. Cool. We can stay here for maybe another 10 seconds if there are any new questions rolling in. All right, cool.", - "eventId": "devcon-7", - "slot_start": 1731639900000, - "slot_end": 1731640500000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1y6tAISW_K9exOHiT-8JDt3qSFgyDYP0v5Zkc3T7JIdw", - "resources_slides": null, "speakers": [ - "felix-leupold" - ] + "tincho", + "matta-the-red-guild" + ], + "eventId": "devcon-7", + "slot_start": 1731488400000, + "slot_end": 1731495600000, + "slot_roomId": "classroom-b", + "resources_presentation": "https://docs.google.com/presentation/d/1iQKRk0GBHlEdWgzH2yQxE2MJqGiiPO9fQI4PkTbLKOk", + "resources_slides": "https://drive.google.com/file/d/14b5knPYTUrt1SokifYXSD0KHvLjLAU4m/view" }, "vector": [ - 0, - 0, 6, 0, 0, @@ -251998,13 +251291,10 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -252473,6 +251763,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -252493,6 +251784,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -253032,15 +252324,13 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -253056,38 +252346,47 @@ }, { "session": { - "id": "dont-get-rekt-practical-threat-detection-for-users-and-devs", - "sourceId": "Y7QGNQ", - "title": "Don’t get rekt: practical threat detection for users and devs", - "description": "Learn to uncover, and protect against, weaponized repositories, sites and tools targeting web3 users, devs & researchers. With examples and hands-on exercises, the session begins with topics like detecting suspicious activity in sites, handling wallet secrets & signatures, decoding calldata of malicious txs, and simulating them to avoid attacks. To then cover more advanced techniques to spot harmful backdoors in code repositories and services that can impact on devs & users’ safety.", + "id": "double-entry-point-issues-from-breaking-compound-to-uniswap-v4", + "sourceId": "N9ZSQW", + "title": "Double entry point issues - From breaking Compound to Uniswap v4", + "description": "A short explanation of a critical-severity vulnerability we found in the Uniswap V4 core contracts that would have caused a ~$15M loss in Uniswap's pools. The goal is to explain the risks of double entry points, from the $30M+ TUSD issue in Compound to the Uniswap V4-specific case where protocols use native tokens and operate on chains where the native token has a corresponding ERC-20 token, and how to prevent them.", "track": "Security", - "type": "Workshop", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Research", "featured": false, - "doNotRecord": true, - "keywords": [ - "user safety", - "developer safety", - "phishing" - ], + "doNotRecord": false, "tags": [ - "Tooling", - "Security", - "phishing", "Security", - "Tooling" + "Bug", + "Bounties", + "contest", + "Architecture", + "Auditing", + "Bug", + "Security" ], - "language": "en", - "speakers": [ - "tincho", - "matta-the-red-guild" + "keywords": [ + "Contest" ], + "duration": 549, + "language": "en", + "sources_swarmHash": "888e78d0f50c5ace9d1d159811094841d1c1a0069a4a1b04aa73f219f47692ac", + "sources_youtubeId": "aq0n0T0wAeQ", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6737497c1b0f83434d831b2b", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6737497c1b0f83434d831b2b.vtt", + "transcript_text": " My name is J. Carpanelli. As he already said, I'm Head of Security Services at Open Zeppelin. I'm here today to talk to you about double entry point issues. So, first things first, what is the double entry point? As we all know how Solidity works, users or protocols interact with contracts by knowing where the address is. So basically they know the address and that address is pointing to a specific smart contract. A double entry point happens when actually there's not only one address that points to that storage of the contract, but actually there's many addresses. Could be two, could be two could be n and also protocols can talk to this double entry point so not only users but also protocols but also tokens basically anything that can interact with an address can interact with a contract that can have more than one entry point this can have a couple of problems, which are unclear to end users to which contract they should be talking to, unclear to protocols which contracts they should be interacting with when they're integrating, based on the fact that basically all smart contracts are interacting with other smart contracts all the time, and that introduces security caveats that we're going to see now with some examples. So, first case, compound 2022. This was an issue that we found at the beginning of 2022 in a token named TUSD. This TUSD token had two entry points, so basically anyone would be able to, for example, if you want to transfer some of your TUSD tokens from one address to the other, you could use any of the two entry points that you had. And how it worked, the legacy address, they did some kind of migration. This legacy address would, when you talk to the transfer function, would forward the transfer function to the new contract. And the same with all the functions defined in the contract. And again, as I said, not only with users, also with protocols. And the case was Compound, the ctoken contract had a sweep token function that had here a required statement that you can see that would check that the address that you want to sweep, the typical sweep token, the sweep token function that you want to remove certain tokens of a specific contract, will check that the address that you're sending wasn't the underlying. So if you have the CTUSD token and you want to sweep all the TUSD tokens, you shouldn't be able to do that. But if you want to sweep the DAI tokens, you should be able to do that and send it to the admin of the contract that I think it was the time lock button. The problem is that the underlying could be two addresses. So actually you can call this function, bypass the required statement, and actually move all the funds that were in the C token or the C to USC contract back to the admin, basically creating chaos. One of the, or a couple of things that we saw that happen is that the C token to token price got completely screwed, so any mint or any burn would account wrongly. And also there was the possibility to steal funds from the pool because all the calculations were made wrong. So if an attacker would send a transaction to the sweep token function to reduce to zero the underlying token balance and then touch a little bit what was happening in the, like, send other transactions to other functions would have been able to steal around $50 million. Second case, 2024, UNICEF v4. We did an audit for the v4 core code, and we found that the case was with another representation of double entry points that happens not when you have two addresses, but when you have a native token and an ERC20 representation of that ERC20 token. And basically, users can interact with both at will as well as protocols. Uniswap, we're not going to talk much about Uniswap because it's very complex, but there's a settle function. This settle function allows users basically to modify their account deltas, which is basically the amount of money that they hold in the pool manager at will by adding tokens or removing tokens. And it has this if statement that actually checks if the currency that we want to interact with in the pool manager is the native token. And if it's not the native token, it will enter to the else. So basically, we can interact with in the pool manager is the native token. And if it's not a native token, it will enter to the else. So basically, we can interact with another entry point. And an attacker could call the sync function, selling the Zillow address. The issue was actually with the, I didn't mention it, but it was with the Zillow token address. Basically, to put in transient storage the Zillow address to then interact and do other operations. Then they could call the settle function that would enter to the if statement and account the value that you're sending as a parameter as the new account delta, so the new balance of the user. And then call the other settle function. I mean, it's the same settle function, but would enter to the else statement and will basically account for 20 tokens instead of 10. It's the same set of functions, but we'll enter to the else statement. And we'll basically account for 20 tokens instead of 10. And in the end, the user would only deposit 10 tokens, but get a balance of 20. And then the same user could, without any problems, take that money from the pool. If they do this many times, or if they use higher values, that is not 10, they could drain the pool. So in v3, CLO has like $15 million of TBL, I believe, and if they were to migrate things from v3 to v4, it would have cost around $50 million of losses. So some takeaways. First of all, this is an issue that we found in 2022. Also in 2024. It's an issue that can still happen because it doesn't depend on our protocol. It depends on other protocols that we integrate with. So we should never trust any protocol or token that we interact with. Always double check that when we integrate with a protocol or a token, it doesn't have a double entry point. And actually, if we do, we have to also check and code in a defensive way so that if they introduce a double entry point, we can account for that and we don't have issues. And that's all. Thank you very much. Thank you, Jota. And thank you again for all that you are doing for the security of our apps. Okay, we have the full room almost, so I expect some questions. Raise your hand, please. You can just say something nice to the speaker. That's also accepted. Are you going to say something nice to the speaker. That's also accepted. Are you going to say something nice or you have a question? Let's see. Let's see. Yeah, hello. So with the race of AI that can create code, how do you think AI will blend into the development? How would you blend this into development with AI? Yeah, how are you going to use AI to basically secure check the system? It's a good question. You can write rules around the things that you want to check around double entry points and put it in an LLM to learn from different code bases and maybe check if they have a double entry point issue. You could also use AI to see if storage is modified by more than two addresses out there, I guess. Okay, thanks for the question. More? Yes, there in the back. In Arremeda. Thank you. Yes, there in the back. Thank you. I want to ask if there are many special tokens, like this double entry, some with hooks. So why is there not any framework that kind of specifies testing tokens like these ones? So like do fasting or this kind, or I know just standardize the testing for all the special tokens that we know that might have prevented, for instance, this issue in development. What do you think about this? I think that's a good idea, but it would be more like we have to come up with preventative measures rather than how to mitigate these sort of problems once they're introduced. The way of doing that is just like have good standards on things that we have to do and things that we shouldn't do. For example, never have two or more addresses pointing to the same storage layout of the same contract. I would always go through the preventative measurements, measures, rather than for trying to fix things when you're already wrong. Okay. Thank you for the question and for the answer. We are out of time. Big round of applause again, please, for Jota. I'm going to be outside in case anyone wants to talk about this topic. Thank you very much. Okay. Thanks again.", "eventId": "devcon-7", - "slot_start": 1731488400000, - "slot_end": 1731495600000, - "slot_roomId": "classroom-b", - "resources_presentation": "https://docs.google.com/presentation/d/1iQKRk0GBHlEdWgzH2yQxE2MJqGiiPO9fQI4PkTbLKOk" + "slot_start": 1731657000000, + "slot_end": 1731657600000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1nsS3htMgQANlE-F_Bcm9jAbdeixMwbjLd0u9GrwuCV0", + "resources_slides": "https://drive.google.com/file/d/1jkd7uvnsMVHpM2euyvMjM1ulV2zqdwPL/view", + "speakers": [ + "jota-carpanelli" + ] }, "vector": [ 6, @@ -253369,7 +252668,7 @@ 0, 0, 0, - 6, + 0, 6, 0, 0, @@ -253838,10 +253137,10 @@ 0, 0, 0, + 6, 0, 0, 0, - 6, 0, 0, 0, @@ -253862,7 +253161,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -253901,6 +253199,22 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -253975,6 +253289,14 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -254076,6 +253398,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -254086,42 +253412,10 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, + 0, + 0, + 2, + 2, 0, 0, 0, @@ -254409,7 +253703,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -254421,61 +253714,61 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "double-entry-point-issues-from-breaking-compound-to-uniswap-v4", - "sourceId": "N9ZSQW", - "title": "Double entry point issues - From breaking Compound to Uniswap v4", - "description": "A short explanation of a critical-severity vulnerability we found in the Uniswap V4 core contracts that would have caused a ~$15M loss in Uniswap's pools. The goal is to explain the risks of double entry points, from the $30M+ TUSD issue in Compound to the Uniswap V4-specific case where protocols use native tokens and operate on chains where the native token has a corresponding ERC-20 token, and how to prevent them.", - "track": "Security", + "id": "downtown-stimulus-public-goods-funding-for-main-st", + "sourceId": "VC9TDM", + "title": "Downtown Stimulus: Public Goods Funding for Main St", + "description": "Web3 Public Goods Funding has left web3, & successfully hit main st! 💰🏦\r\n\r\nThe downtown stimulus team raised $43k for Boulder Colorado COVID economic recovery & proved QF works in mainstream USA. Learn about this experiment & lessons from it from Gitcoin founder Kevin Owocki.", + "track": "Real World Ethereum", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Research", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Security", - "Bug", - "Bounties", - "contest", - "Architecture", - "Auditing", - "Bug", - "Security" + "Quadratic Voting", + "Public good", + "Local Impact", + "UI/UX", + "mainstream", + "Public good", + "UI/UX" ], "keywords": [ - "Contest" + "mainstream" ], - "duration": 549, + "duration": 599, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "418e76d2a15887841cd6d00d34e67a8a770a3dd778739b1baf803c368b66748e", + "sources_youtubeId": "LmYVHyEPeSE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6737497c1b0f83434d831b2b", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6737497c1b0f83434d831b2b.vtt", - "transcript_text": " My name is J. Carpanelli. As he already said, I'm Head of Security Services at Open Zeppelin. I'm here today to talk to you about double entry point issues. So, first things first, what is the double entry point? As we all know how Solidity works, users or protocols interact with contracts by knowing where the address is. So basically they know the address and that address is pointing to a specific smart contract. A double entry point happens when actually there's not only one address that points to that storage of the contract, but actually there's many addresses. Could be two, could be two could be n and also protocols can talk to this double entry point so not only users but also protocols but also tokens basically anything that can interact with an address can interact with a contract that can have more than one entry point this can have a couple of problems, which are unclear to end users to which contract they should be talking to, unclear to protocols which contracts they should be interacting with when they're integrating, based on the fact that basically all smart contracts are interacting with other smart contracts all the time, and that introduces security caveats that we're going to see now with some examples. So, first case, compound 2022. This was an issue that we found at the beginning of 2022 in a token named TUSD. This TUSD token had two entry points, so basically anyone would be able to, for example, if you want to transfer some of your TUSD tokens from one address to the other, you could use any of the two entry points that you had. And how it worked, the legacy address, they did some kind of migration. This legacy address would, when you talk to the transfer function, would forward the transfer function to the new contract. And the same with all the functions defined in the contract. And again, as I said, not only with users, also with protocols. And the case was Compound, the ctoken contract had a sweep token function that had here a required statement that you can see that would check that the address that you want to sweep, the typical sweep token, the sweep token function that you want to remove certain tokens of a specific contract, will check that the address that you're sending wasn't the underlying. So if you have the CTUSD token and you want to sweep all the TUSD tokens, you shouldn't be able to do that. But if you want to sweep the DAI tokens, you should be able to do that and send it to the admin of the contract that I think it was the time lock button. The problem is that the underlying could be two addresses. So actually you can call this function, bypass the required statement, and actually move all the funds that were in the C token or the C to USC contract back to the admin, basically creating chaos. One of the, or a couple of things that we saw that happen is that the C token to token price got completely screwed, so any mint or any burn would account wrongly. And also there was the possibility to steal funds from the pool because all the calculations were made wrong. So if an attacker would send a transaction to the sweep token function to reduce to zero the underlying token balance and then touch a little bit what was happening in the, like, send other transactions to other functions would have been able to steal around $50 million. Second case, 2024, UNICEF v4. We did an audit for the v4 core code, and we found that the case was with another representation of double entry points that happens not when you have two addresses, but when you have a native token and an ERC20 representation of that ERC20 token. And basically, users can interact with both at will as well as protocols. Uniswap, we're not going to talk much about Uniswap because it's very complex, but there's a settle function. This settle function allows users basically to modify their account deltas, which is basically the amount of money that they hold in the pool manager at will by adding tokens or removing tokens. And it has this if statement that actually checks if the currency that we want to interact with in the pool manager is the native token. And if it's not the native token, it will enter to the else. So basically, we can interact with in the pool manager is the native token. And if it's not a native token, it will enter to the else. So basically, we can interact with another entry point. And an attacker could call the sync function, selling the Zillow address. The issue was actually with the, I didn't mention it, but it was with the Zillow token address. Basically, to put in transient storage the Zillow address to then interact and do other operations. Then they could call the settle function that would enter to the if statement and account the value that you're sending as a parameter as the new account delta, so the new balance of the user. And then call the other settle function. I mean, it's the same settle function, but would enter to the else statement and will basically account for 20 tokens instead of 10. It's the same set of functions, but we'll enter to the else statement. And we'll basically account for 20 tokens instead of 10. And in the end, the user would only deposit 10 tokens, but get a balance of 20. And then the same user could, without any problems, take that money from the pool. If they do this many times, or if they use higher values, that is not 10, they could drain the pool. So in v3, CLO has like $15 million of TBL, I believe, and if they were to migrate things from v3 to v4, it would have cost around $50 million of losses. So some takeaways. First of all, this is an issue that we found in 2022. Also in 2024. It's an issue that can still happen because it doesn't depend on our protocol. It depends on other protocols that we integrate with. So we should never trust any protocol or token that we interact with. Always double check that when we integrate with a protocol or a token, it doesn't have a double entry point. And actually, if we do, we have to also check and code in a defensive way so that if they introduce a double entry point, we can account for that and we don't have issues. And that's all. Thank you very much. Thank you, Jota. And thank you again for all that you are doing for the security of our apps. Okay, we have the full room almost, so I expect some questions. Raise your hand, please. You can just say something nice to the speaker. That's also accepted. Are you going to say something nice to the speaker. That's also accepted. Are you going to say something nice or you have a question? Let's see. Let's see. Yeah, hello. So with the race of AI that can create code, how do you think AI will blend into the development? How would you blend this into development with AI? Yeah, how are you going to use AI to basically secure check the system? It's a good question. You can write rules around the things that you want to check around double entry points and put it in an LLM to learn from different code bases and maybe check if they have a double entry point issue. You could also use AI to see if storage is modified by more than two addresses out there, I guess. Okay, thanks for the question. More? Yes, there in the back. In Arremeda. Thank you. Yes, there in the back. Thank you. I want to ask if there are many special tokens, like this double entry, some with hooks. So why is there not any framework that kind of specifies testing tokens like these ones? So like do fasting or this kind, or I know just standardize the testing for all the special tokens that we know that might have prevented, for instance, this issue in development. What do you think about this? I think that's a good idea, but it would be more like we have to come up with preventative measures rather than how to mitigate these sort of problems once they're introduced. The way of doing that is just like have good standards on things that we have to do and things that we shouldn't do. For example, never have two or more addresses pointing to the same storage layout of the same contract. I would always go through the preventative measurements, measures, rather than for trying to fix things when you're already wrong. Okay. Thank you for the question and for the answer. We are out of time. Big round of applause again, please, for Jota. I'm going to be outside in case anyone wants to talk about this topic. Thank you very much. Okay. Thanks again.", + "sources_streamethId": "6735da639dbb7a90e1300946", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735da639dbb7a90e1300946.vtt", + "transcript_text": " Let's welcome Kevin. Hello, friends. What's up? I'm Kevin Awocki. I'm one of the co-founders of Gitcoin. Has anyone in the audience used Gitcoin? All right. Can't see it on the live stream, but a bunch of hands went up. All right. Can't see it on the live stream, but a bunch of hands went up. I'm going to talk about bringing the Gitcoin model of quadratic funding to Main Street. So how do we exit our little crypto bubble and start to do more public goods funding downtown? So if you've been in the Ethereum ecosystem over the last couple of years, you know that quadratic funding is a democratic way to run a public goods funding campaign. If you are an ecosystem that wants to fund your ecosystem public goods, you put out a pool of funds, and then you allow grants or projects in your ecosystem to access those funds by raising money from their constituents and receiving money from the central matching pool. But there's one weird trick with quadratic funding that makes it into a democratic power tool, and that's that the matching is based off of the amount of contributors to each project as opposed to the amount that they've given. So if you raise $100 from 100 contributors and I raise $100 from one contributor in a quadratic funding campaign, the one with the broader base of support is going to get 99% of the matching pool. And this makes quadratic funding into a power tool for funding what matters for everyday people in these communities, because the poor and the many get to make capital allocation decisions instead of the rich and the few. instead of the rich and the few. Quadratic funding has delivered $65 million worth of funding in the Ethereum ecosystem over the last six years on Gitcoin. But I wanted to prove that we could bring it out of the crypto bubble and to Main Street. And so in 2020, when COVID was ravaging the world, and in my hometown of Boulder, Colorado, 99% decrease in foot traffic for the downtown was happening. Businesses were struggling to stay afloat. And so I got together with my colleagues, Zach Herring and Katie Johnson, and we put together a $25,000 matching pool of quadratic funding in downtown Boulder, Colorado, and were able to run a quadratic funding campaign for a yoga studio, a bookstore, a coffee shop, and a couple of other projects downtown. So this is what it looked like, downtownstimulus.com, and basically the way that we positioned this was that you could support local businesses by making a contribution to these businesses using a credit card payout. So basically, we took quadratic funding, we took Gitcoin grants, we stripped out all the crypto, and we just allowed people to contribute with quadratic funding, or sorry, with US dollars, contribute with quadratic funding, sorry, with U.S. dollars and have their contributions matched by quadratic funding. So we had a $25,000 matching pool. Thank you to Vitalik Buterin and to local philanthropist Brad Feld. And we're able to raise $16,000 from 320 contributors of an average donation of about $40. And all in all, we're able to raise about $41,000 for downtown Boulder public goods. I think this is really exciting that a primitive that has been pioneered and distributed in the crypto ecosystem was able to go mainstream with a campaign like Downtown Stimulus. And I'm really excited about this meme of cosmolocalism, which is basically an approach to production and governance that provides a combination of global coordination with knowledge sharing and local manufacturing and resource use, emphasizing open source software and a relationship between the global and the local where the global services the local. Okay, so in what ways can we, in the Ethereum ecosystem and the open source ecosystem, create a global commons, create knowledge and production that is in service to the local? And this is opposed to the Web2 surveillance economy in which you have Facebook communities going into your local communities and surveilling everyone and tearing apart the social fabric because it's more profitable for Facebook shareholders that way. How can the Ethereum commons be in service of the local commons is the design space of cosmo localism and ethereum localism and i'm happy to say that downtown stimulus to me is a proof point that this is possible we can take the programmable money where we program our values into our money in the ethereum space from the ethereum space and start applying it to downtowns across the world and i'm really excited to downtowns across the world. And I'm really excited to see what kind of experiments in public goods funding, community currency, supporting the arts and supporting public goods that we can do when we do that. So I'm Kevin Iwaki, founder of Gitcoin. That's been Downtown Stimulus. Thanks so much for your time. Thank you, Kevin. Hello. Thank you, Kevin. Hello. Hello. Amazing. We have like roughly five minutes for questions. By the way, I'm also a big fan of cosmolocalism. Yeah, thanks. Anyone in the audience? The gentleman in the purple hat. Should I? Oh, wow. You're good. Good catch. Thanks. Yeah, I love your work and all that. I was going to ask, with things like community currencies and things like that where you might put conditions on your money, I sometimes think that that's obviously a restricted form of trade whereas regular fiat currencies are sort of unrestricted. So with that lens, how would you say that the community currencies can sort of out-compete freer money, if that's the right premise for you? Yeah, so the question is, how can community currencies out-compete national currencies? Yeah, like how would it have more utility if it's somewhat constrained in different ways? Yeah. Yeah, like how would it have more utility if it's somewhat constrained in some ways? Yeah. I think that community currencies are cool as an outsider, but I'm not a designer of any, so I'm not qualified to answer that question. I think Scott Morris has done a lot of work on community currencies, and I would ask him. Yeah, I guess I was riffing off the adding our values to our money. And if you put sort of constraints on it in that sense to fuel our values, I can see definitely the bigger picture that that's awesome and I want to see more of that. But then I also sort of play devil's advocate, because more so another friend does this to me where he sort of says, well, I could just do that with the US dollar or I could do that with this sphere currency. So I'm trying to sort of find the arguments, like other arguments that say this money with value restrictions is kind of more better in a more immediate form than I can currently argue. So I'm trying to find on that side. Yeah. I think that, you know, one of the things that I see a parallel between what we're doing in the crypto ecosystem and what community currencies have done for hundreds of years is that what we're doing in the crypto ecosystem is memetically local and community currencies are geographically local. Okay. So like to me, memetically local means that like Gitcoin's a public goods funding coin. Filecoin is a file storage coin. Ethereum is a computational coin in a local memetic area. And so, you know, in what ways, what we learn in community currencies can be applied to global currencies like the DAO currencies and vice versa. I think that there might be sort of a Cosmo local convergence there. Yeah. But my talk was about quadratic funding more than community currencies. I think that they're both cool, though. Thank you. Do we have time for one more question? Yes, we do. Gentleman over there. Thank you. Hi, Kevin. Hey. What do you think in your mind will be the hardest part of building some version of a downtown stimulus in another local community? Finding the initial donors or the tech, probably tech is just working something. Or making people understand how to use the system or anything like in your experience. What do you think should the focus on if you want to build something in my own local community? Yeah, great question. So we built a code base that you can use to do Fiat quadratic funding rounds. It's called simplegrants.xyz. Go to simplegrants.xyz, fork it, deploy it in your own local community. I should have put that in the talk, actually, now I think about it. But yeah, the hardest part, honestly, was raising the initial 25K. Quadratic funding campaigns in Web3, there's a lot of treasuries that are just hundreds of millions of dollars. And we draw on those to run quadratic funding rounds in QF. But the raising of funds to do quadratic funding locally is sort of hampered by, do you have access to wealthy individuals who are willing to do it? And can you crack the nut of the local government actually funding it? We actually went around the local government. We didn't even talk to the local government because their funding cycles are just quarters and quarters long. So to answer your question, I think that raising the initial amount of funding is the hardest thing that you'll do if you're going to run these campaigns. And hopefully we can make the tech a little bit easier with simplegrants.xyz. Thank you for the question. Thank you. And that's it for the time.", "eventId": "devcon-7", - "slot_start": 1731657000000, - "slot_end": 1731657600000, + "slot_start": 1731581400000, + "slot_end": 1731582000000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1nsS3htMgQANlE-F_Bcm9jAbdeixMwbjLd0u9GrwuCV0", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1Lf82ct08SpegO30t849kscAqeyNa8bTNVpMQ8ljElfA", + "resources_slides": "https://drive.google.com/file/d/1GiQe7KFzESPmZ2f8XtDUZXyNthoMw5T6/view", "speakers": [ - "jota-carpanelli" + "kevin-owocki" ] }, "vector": [ - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -255220,9 +254513,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -255279,10 +254569,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -255329,6 +254619,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -255372,7 +254663,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -255429,6 +254719,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -255465,6 +254756,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -255481,7 +254773,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -255499,7 +254790,6 @@ 0, 0, 2, - 2, 0, 0, 0, @@ -255788,11 +255078,9 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, + 2, 0, 0, 0, @@ -255805,45 +255093,47 @@ }, { "session": { - "id": "downtown-stimulus-public-goods-funding-for-main-st", - "sourceId": "VC9TDM", - "title": "Downtown Stimulus: Public Goods Funding for Main St", - "description": "Web3 Public Goods Funding has left web3, & successfully hit main st! 💰🏦\r\n\r\nThe downtown stimulus team raised $43k for Boulder Colorado COVID economic recovery & proved QF works in mainstream USA. Learn about this experiment & lessons from it from Gitcoin founder Kevin Owocki.", + "id": "ebay-and-web3-powered-digital-product-passports-and-what-this-could-mean-for-the-future-of-commerce", + "sourceId": "DWMA3P", + "title": "eBay & web3 powered Digital Product Passports and what this could mean for the future of commerce?", + "description": "eBay is embracing web3 technologies to fulfil the vision of a truly connected product world. Digital Product Passports (DPPs) underpin this movement with a real world application of public blockchain technologies, tokenised products, attestation based technologies and selective disclosure schemes as the technology of choice.\r\n\r\nI will explore what this could mean for one of the world of ecommerce, why brands are embracing this movement and whats in it for the consumer.", "track": "Real World Ethereum", "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Community", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Quadratic Voting", - "Public good", - "Local Impact", - "UI/UX", - "mainstream", - "Public good", - "UI/UX" + "Digital Sovereignty", + "Use Cases", + "Regulation", + "luxury", + "Digital Sovereignty", + "Regulation", + "Use Cases" ], "keywords": [ - "mainstream" + "digital-product-passports", + "DPPs", + "luxury" ], - "duration": 599, + "duration": 537, "language": "en", - "sources_swarmHash": "418e76d2a15887841cd6d00d34e67a8a770a3dd778739b1baf803c368b66748e", - "sources_youtubeId": "LmYVHyEPeSE", + "sources_swarmHash": "642467b5a73ddf35ef37960830082f8c7ef102570bcd47092898616cdb785bc9", + "sources_youtubeId": "soZ5eIS2olw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735da639dbb7a90e1300946", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735da639dbb7a90e1300946.vtt", - "transcript_text": " Let's welcome Kevin. Hello, friends. What's up? I'm Kevin Awocki. I'm one of the co-founders of Gitcoin. Has anyone in the audience used Gitcoin? All right. Can't see it on the live stream, but a bunch of hands went up. All right. Can't see it on the live stream, but a bunch of hands went up. I'm going to talk about bringing the Gitcoin model of quadratic funding to Main Street. So how do we exit our little crypto bubble and start to do more public goods funding downtown? So if you've been in the Ethereum ecosystem over the last couple of years, you know that quadratic funding is a democratic way to run a public goods funding campaign. If you are an ecosystem that wants to fund your ecosystem public goods, you put out a pool of funds, and then you allow grants or projects in your ecosystem to access those funds by raising money from their constituents and receiving money from the central matching pool. But there's one weird trick with quadratic funding that makes it into a democratic power tool, and that's that the matching is based off of the amount of contributors to each project as opposed to the amount that they've given. So if you raise $100 from 100 contributors and I raise $100 from one contributor in a quadratic funding campaign, the one with the broader base of support is going to get 99% of the matching pool. And this makes quadratic funding into a power tool for funding what matters for everyday people in these communities, because the poor and the many get to make capital allocation decisions instead of the rich and the few. instead of the rich and the few. Quadratic funding has delivered $65 million worth of funding in the Ethereum ecosystem over the last six years on Gitcoin. But I wanted to prove that we could bring it out of the crypto bubble and to Main Street. And so in 2020, when COVID was ravaging the world, and in my hometown of Boulder, Colorado, 99% decrease in foot traffic for the downtown was happening. Businesses were struggling to stay afloat. And so I got together with my colleagues, Zach Herring and Katie Johnson, and we put together a $25,000 matching pool of quadratic funding in downtown Boulder, Colorado, and were able to run a quadratic funding campaign for a yoga studio, a bookstore, a coffee shop, and a couple of other projects downtown. So this is what it looked like, downtownstimulus.com, and basically the way that we positioned this was that you could support local businesses by making a contribution to these businesses using a credit card payout. So basically, we took quadratic funding, we took Gitcoin grants, we stripped out all the crypto, and we just allowed people to contribute with quadratic funding, or sorry, with US dollars, contribute with quadratic funding, sorry, with U.S. dollars and have their contributions matched by quadratic funding. So we had a $25,000 matching pool. Thank you to Vitalik Buterin and to local philanthropist Brad Feld. And we're able to raise $16,000 from 320 contributors of an average donation of about $40. And all in all, we're able to raise about $41,000 for downtown Boulder public goods. I think this is really exciting that a primitive that has been pioneered and distributed in the crypto ecosystem was able to go mainstream with a campaign like Downtown Stimulus. And I'm really excited about this meme of cosmolocalism, which is basically an approach to production and governance that provides a combination of global coordination with knowledge sharing and local manufacturing and resource use, emphasizing open source software and a relationship between the global and the local where the global services the local. Okay, so in what ways can we, in the Ethereum ecosystem and the open source ecosystem, create a global commons, create knowledge and production that is in service to the local? And this is opposed to the Web2 surveillance economy in which you have Facebook communities going into your local communities and surveilling everyone and tearing apart the social fabric because it's more profitable for Facebook shareholders that way. How can the Ethereum commons be in service of the local commons is the design space of cosmo localism and ethereum localism and i'm happy to say that downtown stimulus to me is a proof point that this is possible we can take the programmable money where we program our values into our money in the ethereum space from the ethereum space and start applying it to downtowns across the world and i'm really excited to downtowns across the world. And I'm really excited to see what kind of experiments in public goods funding, community currency, supporting the arts and supporting public goods that we can do when we do that. So I'm Kevin Iwaki, founder of Gitcoin. That's been Downtown Stimulus. Thanks so much for your time. Thank you, Kevin. Hello. Thank you, Kevin. Hello. Hello. Amazing. We have like roughly five minutes for questions. By the way, I'm also a big fan of cosmolocalism. Yeah, thanks. Anyone in the audience? The gentleman in the purple hat. Should I? Oh, wow. You're good. Good catch. Thanks. Yeah, I love your work and all that. I was going to ask, with things like community currencies and things like that where you might put conditions on your money, I sometimes think that that's obviously a restricted form of trade whereas regular fiat currencies are sort of unrestricted. So with that lens, how would you say that the community currencies can sort of out-compete freer money, if that's the right premise for you? Yeah, so the question is, how can community currencies out-compete national currencies? Yeah, like how would it have more utility if it's somewhat constrained in different ways? Yeah. Yeah, like how would it have more utility if it's somewhat constrained in some ways? Yeah. I think that community currencies are cool as an outsider, but I'm not a designer of any, so I'm not qualified to answer that question. I think Scott Morris has done a lot of work on community currencies, and I would ask him. Yeah, I guess I was riffing off the adding our values to our money. And if you put sort of constraints on it in that sense to fuel our values, I can see definitely the bigger picture that that's awesome and I want to see more of that. But then I also sort of play devil's advocate, because more so another friend does this to me where he sort of says, well, I could just do that with the US dollar or I could do that with this sphere currency. So I'm trying to sort of find the arguments, like other arguments that say this money with value restrictions is kind of more better in a more immediate form than I can currently argue. So I'm trying to find on that side. Yeah. I think that, you know, one of the things that I see a parallel between what we're doing in the crypto ecosystem and what community currencies have done for hundreds of years is that what we're doing in the crypto ecosystem is memetically local and community currencies are geographically local. Okay. So like to me, memetically local means that like Gitcoin's a public goods funding coin. Filecoin is a file storage coin. Ethereum is a computational coin in a local memetic area. And so, you know, in what ways, what we learn in community currencies can be applied to global currencies like the DAO currencies and vice versa. I think that there might be sort of a Cosmo local convergence there. Yeah. But my talk was about quadratic funding more than community currencies. I think that they're both cool, though. Thank you. Do we have time for one more question? Yes, we do. Gentleman over there. Thank you. Hi, Kevin. Hey. What do you think in your mind will be the hardest part of building some version of a downtown stimulus in another local community? Finding the initial donors or the tech, probably tech is just working something. Or making people understand how to use the system or anything like in your experience. What do you think should the focus on if you want to build something in my own local community? Yeah, great question. So we built a code base that you can use to do Fiat quadratic funding rounds. It's called simplegrants.xyz. Go to simplegrants.xyz, fork it, deploy it in your own local community. I should have put that in the talk, actually, now I think about it. But yeah, the hardest part, honestly, was raising the initial 25K. Quadratic funding campaigns in Web3, there's a lot of treasuries that are just hundreds of millions of dollars. And we draw on those to run quadratic funding rounds in QF. But the raising of funds to do quadratic funding locally is sort of hampered by, do you have access to wealthy individuals who are willing to do it? And can you crack the nut of the local government actually funding it? We actually went around the local government. We didn't even talk to the local government because their funding cycles are just quarters and quarters long. So to answer your question, I think that raising the initial amount of funding is the hardest thing that you'll do if you're going to run these campaigns. And hopefully we can make the tech a little bit easier with simplegrants.xyz. Thank you for the question. Thank you. And that's it for the time.", + "sources_streamethId": "6735a44f9dbb7a90e1e7e1ef", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735a44f9dbb7a90e1e7e1ef.vtt", + "transcript_text": " Hi everyone, thanks for coming to see my talk. So I guess we'll go straight into it, we've only got seven minutes. Yeah, so who am I? So my name is James. I am the engineering director for eBay's small but hopefully impactful Web3 team. I came from an acquisition about two years ago of an NFT platform called Known Origin. Now really myself and my team are really interested in looking at the Web3 landscape, looking at the eBay machine as it stands now, and seeing how this technology can potentially impact them and find value for buyers and sellers. And today I'm going to talk about a topic called digital product passports and why I intrinsically believe Web3 is the right place for them. And I'm going to touch on four topics today. A topic called connected products, digital product passports. We'll talk about Web3 and we'll talk about commerce. But first, I want to set the scene and provide a little bit of vision of what commerce could look like. So you've got to bear with me with this, right? So you walk into an office, you see your colleagues stood around a board, maybe you're discussing the latest meme coin. You've got your Vision Pro 5000s on, some AR-enabled glasses. Not only that, it's looked in your address book and it's noticed the PFPs of your colleagues and displayed them on screen. You know, shout out to any Xcopy or Squiggle holders in the room. Not only that, it's noticed that your friend Dan at the board has got a killer watch and the latest greatest in sneakers. These products are connected. You've got to find out more. You double tap your fingers. Some machine vision or AI vision scans these items, uniquely identifies them and starts working out exactly which unique product they are, not the product SKU, the actual unique product. After that, the digital product passport pops up, a digital layer linking the physical product directly in your face. Now, this is a vision piece only, right? But you can see the potential, you know, and you might notice as well, this is very much similar to how maybe NFT platforms looked trading NFTs a few years ago. From this digital link, you can see ownership. You might see if it's got a warranty. You might see trading activity on the secondary market, traceability, servicing insights. This is the vision that I want to talk about today, and this is my vision and potentially the future vision of how commerce might look in several years' time. But wait, before we go on, I think it's worth touching about some of the problems and challenges in commerce at the moment. And, you know, the first three topics, you know, the supply chain. The supply chain is very difficult to wrangle for physical goods, aggregating lots of disparate information. Maybe the digital product passport is the perfect layer for this. You know, circularity. I bought a good on the primary market. How do I sell it? There's many websites, many marketplaces, all have different pros and cons. Maybe the product passport is the place where circularity will start. Maybe it will also tell you how to recycle this good at the end of its life. Maybe this product passport is the place where circularity will start. Maybe it will also tell you how to recycle this good at the end of its life. Maybe this product passport's the place where you'll get additional utility after your first transactional engagement with that brand or with that shop. And finally, the last three, there's some emerging regulatory insights coming from the EU. So from 2026, 2027, the EU will mandate digital product passports in things like batteries, fashion, white goods, collectibles. Now, the goal is to make things more circular and give consumers more choice. Maybe a tokenized digital product passport is the place where that will happen. The last two are digital physical experiences. A lot of the NFT space sort of tried to tackle this over the last few years with some mixed success, maybe starting with a physical and adding on a digital is the right approach. And then interoperability for your items. For the things you own physically, how do you interop with the wider world? Again, maybe this digital layer. So this is the vision I presented. Slightly wacky, really lo-fi. You know, this is not what we're building. This is a vision piece. But really, what does it enable? It enables a digital layer on physical, which is interactive product experiences, real-time information about your goods, you know, empowering customers, and then interoperability, where you, you know, you can take your physical good and use it anywhere. And this is, again, what these could enable. And then really quickly, I've only got a minute left. Last but not least, what is this? So first of all, a physical item and a product connector. Really, a connector comes in many forms. QR codes, RFDIs, NFC chips, machine vision we spoke about. And then you've got this digital layer. And this is what I talk about for digital passports and this could be you can own this thing right that's what crypto enables ownership of digital items quickly a few industry examples 30 seconds left there is already people playing in this game in the fashion space in the luxury space you know maybe on-chain is the next online and this change in consumer behavior will be a big driver for web2 adoption beyond training stonks and mean coins. And finally, wide Z-Bay care. It's massive. We've got to be ahead of these trends. And why Web3? It's credibly neutral. It's a base layer for enemies can play together. Lots of composable great standards to build on and interoperability and ownership. And that's it. Thank you very much. Thank you. That is very interesting in a very real world. Do we have any questions in the audience? Let's see. Anyone? Okay, cool. Oh. Hello, James. Hello. Quick question. When do you imagine a global platform like eBay showing its regular users, I don't know, an icon and something verified as something that says this come with a DPP and of course you can check it visually, but when you acquire this pair of sneakers, this watch, whatever, well, you also acquire this digital passport. Is there some form of an estimate timeline? Yeah, so I guess we may already be playing in this place in pilots, but you may not notice. And we also go down the mantra of eBay users don't really care exactly this place in pilots, but you may not notice. And, you know, we also go down the mantra of, you know, eBay users don't really care exactly, you know, what we're doing as long as they get lots of great value from it. So maybe we're already doing this and they don't know. And maybe next year it'll sort of be pushed up the rankings and things will become clearer. But there's also lots of luxury brands already playing in this place, is the reality. Just right over there. Ooh, nice. Hey, James. Based on eBay's sales numbers and that, what throughput do you think that you need from the underlying technology to serve your users? Oh, that's a good question. Well, I guess, you know, to get to eBay scale, you know, the end game, you know, it's hard to really talk about. It's a reality. It's hard to really say stuff in public. But, you know, eBay is a big, huge user base, billions of products, hundreds of millions of users. You know, in the last two years with the rise of L2s, there's an actual credible path for actually how to achieve the eBay and that's what we'll be keeping an eye on as that space emerges. One more question and the last one. Right over there the gentleman in the cap. It's going to be from over there. Yes. Beautiful. I've got to admit I'm a bit confused as to what would be the advantage for the end user. This feels like a lot of overwhelming information that might not be tangibly beneficial. And it also, frankly, seems like a bit of a privacy nightmare. I can see how aggregating all of this kind of information in a not centralized, but central point of identity could be problematic. So are there any... Do you also envision a level of privacy associated with that? Yeah, so privacy first, very true. We think about this a lot. And is it right that you know this wallet owns 10 Rolexes? Probably not. But we look at technologies like providing proofs, selective disclosure, you know, zero knowledge stuff. You know, that's really where I think this will go. And it'll ultimately be at the behest of the holder of how much information that they should disclose. I do agree that privacy is a big issue in this physical space, much more than in digital is a reality. I can't remember your first question. Sorry. much more than in digital is a reality. I can't remember your first question, sorry. Okay, so that's it for our question session today. And please reach out to James if you have any further questions, any discussion you want to continue. Thank you again. Thank you.", "eventId": "devcon-7", - "slot_start": 1731581400000, - "slot_end": 1731582000000, + "slot_start": 1731567600000, + "slot_end": 1731568200000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Lf82ct08SpegO30t849kscAqeyNa8bTNVpMQ8ljElfA", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1oolmmoeS_8L3O435iq2vuXQPr9H_eWlvs-2T3XokFwU", + "resources_slides": "https://drive.google.com/file/d/1eQnfDENzYtiKGa7DHiOMGgj36xytcM4y/view", "speakers": [ - "kevin-owocki" + "james-morgan" ] }, "vector": [ @@ -256645,6 +255935,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -256657,9 +255948,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -256676,6 +255964,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -256703,11 +255992,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -256807,7 +256096,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -256844,7 +256132,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -257161,15 +256448,12 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, 0, 0, - 0, 2, 0, 0, @@ -257178,52 +256462,53 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "ebay-and-web3-powered-digital-product-passports-and-what-this-could-mean-for-the-future-of-commerce", - "sourceId": "DWMA3P", - "title": "eBay & web3 powered Digital Product Passports and what this could mean for the future of commerce?", - "description": "eBay is embracing web3 technologies to fulfil the vision of a truly connected product world. Digital Product Passports (DPPs) underpin this movement with a real world application of public blockchain technologies, tokenised products, attestation based technologies and selective disclosure schemes as the technology of choice.\r\n\r\nI will explore what this could mean for one of the world of ecommerce, why brands are embracing this movement and whats in it for the consumer.", - "track": "Real World Ethereum", + "id": "ecosystem-development-best-practices-and-why-we-need-to-start-with-builders-first", + "sourceId": "EY3HL9", + "title": "Ecosystem Development Best Practices, and why we need to start with builders first", + "description": "Given the myriad of chains out there, it is increasingly crucial for L2s to solidify their ecosystem building playbook and constantly refine it to win over (and more importantly, retain) users and builders. As an ecosystem builder in SEA (Thailand) who has worked with over 10 ecosystems including other L1s, on local, regional and global initiatives, I am excited to share the ins and outs of ecosystem building from a neutral perspective.", + "track": "Layer 2", "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Product", + "audience": "Business", "featured": false, "doNotRecord": false, "tags": [ - "Digital Sovereignty", - "Use Cases", - "Regulation", - "luxury", - "Digital Sovereignty", - "Regulation", - "Use Cases" + "Layer 2s", + "DevRel", + "Best Practices", + "management", + "stakeholder", + "Best Practices", + "DevRel", + "Layer 2s" ], "keywords": [ - "digital-product-passports", - "DPPs", - "luxury" + "Ecosystem Building", + "Ecosystem Design", + "Developer Experience", + "Stakeholder Management" ], - "duration": 537, + "duration": 407, "language": "en", - "sources_swarmHash": "642467b5a73ddf35ef37960830082f8c7ef102570bcd47092898616cdb785bc9", - "sources_youtubeId": "soZ5eIS2olw", + "sources_swarmHash": "3ca335e97a65bd21e260157bab87ec0fc8fb8c50e77214212c844d794eb17896", + "sources_youtubeId": "xqs8trszoOY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735a44f9dbb7a90e1e7e1ef", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735a44f9dbb7a90e1e7e1ef.vtt", - "transcript_text": " Hi everyone, thanks for coming to see my talk. So I guess we'll go straight into it, we've only got seven minutes. Yeah, so who am I? So my name is James. I am the engineering director for eBay's small but hopefully impactful Web3 team. I came from an acquisition about two years ago of an NFT platform called Known Origin. Now really myself and my team are really interested in looking at the Web3 landscape, looking at the eBay machine as it stands now, and seeing how this technology can potentially impact them and find value for buyers and sellers. And today I'm going to talk about a topic called digital product passports and why I intrinsically believe Web3 is the right place for them. And I'm going to touch on four topics today. A topic called connected products, digital product passports. We'll talk about Web3 and we'll talk about commerce. But first, I want to set the scene and provide a little bit of vision of what commerce could look like. So you've got to bear with me with this, right? So you walk into an office, you see your colleagues stood around a board, maybe you're discussing the latest meme coin. You've got your Vision Pro 5000s on, some AR-enabled glasses. Not only that, it's looked in your address book and it's noticed the PFPs of your colleagues and displayed them on screen. You know, shout out to any Xcopy or Squiggle holders in the room. Not only that, it's noticed that your friend Dan at the board has got a killer watch and the latest greatest in sneakers. These products are connected. You've got to find out more. You double tap your fingers. Some machine vision or AI vision scans these items, uniquely identifies them and starts working out exactly which unique product they are, not the product SKU, the actual unique product. After that, the digital product passport pops up, a digital layer linking the physical product directly in your face. Now, this is a vision piece only, right? But you can see the potential, you know, and you might notice as well, this is very much similar to how maybe NFT platforms looked trading NFTs a few years ago. From this digital link, you can see ownership. You might see if it's got a warranty. You might see trading activity on the secondary market, traceability, servicing insights. This is the vision that I want to talk about today, and this is my vision and potentially the future vision of how commerce might look in several years' time. But wait, before we go on, I think it's worth touching about some of the problems and challenges in commerce at the moment. And, you know, the first three topics, you know, the supply chain. The supply chain is very difficult to wrangle for physical goods, aggregating lots of disparate information. Maybe the digital product passport is the perfect layer for this. You know, circularity. I bought a good on the primary market. How do I sell it? There's many websites, many marketplaces, all have different pros and cons. Maybe the product passport is the place where circularity will start. Maybe it will also tell you how to recycle this good at the end of its life. Maybe this product passport is the place where circularity will start. Maybe it will also tell you how to recycle this good at the end of its life. Maybe this product passport's the place where you'll get additional utility after your first transactional engagement with that brand or with that shop. And finally, the last three, there's some emerging regulatory insights coming from the EU. So from 2026, 2027, the EU will mandate digital product passports in things like batteries, fashion, white goods, collectibles. Now, the goal is to make things more circular and give consumers more choice. Maybe a tokenized digital product passport is the place where that will happen. The last two are digital physical experiences. A lot of the NFT space sort of tried to tackle this over the last few years with some mixed success, maybe starting with a physical and adding on a digital is the right approach. And then interoperability for your items. For the things you own physically, how do you interop with the wider world? Again, maybe this digital layer. So this is the vision I presented. Slightly wacky, really lo-fi. You know, this is not what we're building. This is a vision piece. But really, what does it enable? It enables a digital layer on physical, which is interactive product experiences, real-time information about your goods, you know, empowering customers, and then interoperability, where you, you know, you can take your physical good and use it anywhere. And this is, again, what these could enable. And then really quickly, I've only got a minute left. Last but not least, what is this? So first of all, a physical item and a product connector. Really, a connector comes in many forms. QR codes, RFDIs, NFC chips, machine vision we spoke about. And then you've got this digital layer. And this is what I talk about for digital passports and this could be you can own this thing right that's what crypto enables ownership of digital items quickly a few industry examples 30 seconds left there is already people playing in this game in the fashion space in the luxury space you know maybe on-chain is the next online and this change in consumer behavior will be a big driver for web2 adoption beyond training stonks and mean coins. And finally, wide Z-Bay care. It's massive. We've got to be ahead of these trends. And why Web3? It's credibly neutral. It's a base layer for enemies can play together. Lots of composable great standards to build on and interoperability and ownership. And that's it. Thank you very much. Thank you. That is very interesting in a very real world. Do we have any questions in the audience? Let's see. Anyone? Okay, cool. Oh. Hello, James. Hello. Quick question. When do you imagine a global platform like eBay showing its regular users, I don't know, an icon and something verified as something that says this come with a DPP and of course you can check it visually, but when you acquire this pair of sneakers, this watch, whatever, well, you also acquire this digital passport. Is there some form of an estimate timeline? Yeah, so I guess we may already be playing in this place in pilots, but you may not notice. And we also go down the mantra of eBay users don't really care exactly this place in pilots, but you may not notice. And, you know, we also go down the mantra of, you know, eBay users don't really care exactly, you know, what we're doing as long as they get lots of great value from it. So maybe we're already doing this and they don't know. And maybe next year it'll sort of be pushed up the rankings and things will become clearer. But there's also lots of luxury brands already playing in this place, is the reality. Just right over there. Ooh, nice. Hey, James. Based on eBay's sales numbers and that, what throughput do you think that you need from the underlying technology to serve your users? Oh, that's a good question. Well, I guess, you know, to get to eBay scale, you know, the end game, you know, it's hard to really talk about. It's a reality. It's hard to really say stuff in public. But, you know, eBay is a big, huge user base, billions of products, hundreds of millions of users. You know, in the last two years with the rise of L2s, there's an actual credible path for actually how to achieve the eBay and that's what we'll be keeping an eye on as that space emerges. One more question and the last one. Right over there the gentleman in the cap. It's going to be from over there. Yes. Beautiful. I've got to admit I'm a bit confused as to what would be the advantage for the end user. This feels like a lot of overwhelming information that might not be tangibly beneficial. And it also, frankly, seems like a bit of a privacy nightmare. I can see how aggregating all of this kind of information in a not centralized, but central point of identity could be problematic. So are there any... Do you also envision a level of privacy associated with that? Yeah, so privacy first, very true. We think about this a lot. And is it right that you know this wallet owns 10 Rolexes? Probably not. But we look at technologies like providing proofs, selective disclosure, you know, zero knowledge stuff. You know, that's really where I think this will go. And it'll ultimately be at the behest of the holder of how much information that they should disclose. I do agree that privacy is a big issue in this physical space, much more than in digital is a reality. I can't remember your first question. Sorry. much more than in digital is a reality. I can't remember your first question, sorry. Okay, so that's it for our question session today. And please reach out to James if you have any further questions, any discussion you want to continue. Thank you again. Thank you.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731567600000, - "slot_end": 1731568200000, + "slot_start": 1731402000000, + "slot_end": 1731402600000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1oolmmoeS_8L3O435iq2vuXQPr9H_eWlvs-2T3XokFwU", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/12auC9dhscSkSUtYU1CqtRqHz64-ljDXc1f7otM8hLMw", + "resources_slides": "https://drive.google.com/file/d/1nw2s9vBzS30DiwEVaX24JFLSD7IyozOn/view", "speakers": [ - "james-morgan" + "nine-arnakorn" ] }, "vector": [ @@ -257233,8 +256518,8 @@ 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -258006,6 +257291,19 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -258057,7 +257355,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -258085,7 +257382,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -258138,6 +257434,21 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -258224,45 +257535,14 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 2, 0, 0, 0, @@ -258548,7 +257828,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -258558,52 +257837,51 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "ecosystem-development-best-practices-and-why-we-need-to-start-with-builders-first", - "sourceId": "EY3HL9", - "title": "Ecosystem Development Best Practices, and why we need to start with builders first", - "description": "Given the myriad of chains out there, it is increasingly crucial for L2s to solidify their ecosystem building playbook and constantly refine it to win over (and more importantly, retain) users and builders. As an ecosystem builder in SEA (Thailand) who has worked with over 10 ecosystems including other L1s, on local, regional and global initiatives, I am excited to share the ins and outs of ecosystem building from a neutral perspective.", - "track": "Layer 2", + "id": "eea-and-the-institutional-infinity-garden", + "sourceId": "JQBXXD", + "title": "EEA and the Institutional Infinity Garden", + "description": "This talk would be to give an overview on the latest from the Enterprise Ethereum Alliance, how the year has progressed in enterprise and how EEA seeks to support and guide institutions to participate in Ethereum's Infinity Garden.", + "track": "Real World Ethereum", "type": "Lightning Talk", "expertise": "Beginner", "audience": "Business", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", - "DevRel", - "Best Practices", - "management", - "stakeholder", - "Best Practices", - "DevRel", - "Layer 2s" + "Coordination", + "Vision", + "Use Cases", + "institutional", + "Coordination", + "Use Cases", + "Vision" ], "keywords": [ - "Ecosystem Building", - "Ecosystem Design", - "Developer Experience", - "Stakeholder Management" + "Business", + "Enterprise", + "Instituional" ], - "duration": 407, + "duration": 602, "language": "en", - "sources_swarmHash": "3ca335e97a65bd21e260157bab87ec0fc8fb8c50e77214212c844d794eb17896", - "sources_youtubeId": "xqs8trszoOY", + "sources_swarmHash": "627a8020ea8fffe7e60da9ea41e68e2239bf60b5384058c21bd9da1f40eec92e", + "sources_youtubeId": "dYgucH3a7sI", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731402000000, - "slot_end": 1731402600000, + "slot_start": 1731480000000, + "slot_end": 1731480600000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/12auC9dhscSkSUtYU1CqtRqHz64-ljDXc1f7otM8hLMw", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1f1uiHRqQIfhY0F3DmSPJ-wpu3lCdP00YCID-a2-UblQ", + "resources_slides": "https://drive.google.com/file/d/1C3UMP1JMq728oimD3Zqw0gSUYzmwFURN/view", "speakers": [ - "nine-arnakorn" + "karen-scarbrough" ] }, "vector": [ @@ -258613,7 +257891,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -258891,9 +258168,8 @@ 0, 0, 0, - 6, - 0, 0, + 6, 0, 0, 0, @@ -259389,7 +258665,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -259424,7 +258699,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -259438,6 +258712,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -259513,6 +258788,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -259532,7 +258808,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -259591,6 +258866,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -259641,9 +258917,6 @@ 0, 0, 2, - 2, - 0, - 0, 0, 0, 0, @@ -259943,45 +259216,49 @@ }, { "session": { - "id": "eea-and-the-institutional-infinity-garden", - "sourceId": "JQBXXD", - "title": "EEA and the Institutional Infinity Garden", - "description": "This talk would be to give an overview on the latest from the Enterprise Ethereum Alliance, how the year has progressed in enterprise and how EEA seeks to support and guide institutions to participate in Ethereum's Infinity Garden.", - "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Business", + "id": "efficient-non-native-snark-recursion-using-bivariate-polynomial-testing", + "sourceId": "E8KYKE", + "title": "Efficient non-native SNARK recursion using bivariate polynomial testing", + "description": "Efficient SNARK recursion requires switching between pairing friendly elliptic curves. In most optimal approaches these curves would construct a cycle, but there are no such known cycles. Instead, we use non-native arithmetic to brute force the pairing computation at the cycle cut-off.\r\nWe describe an approach for combining direct field extension with polynomial-based non-native arithmetic. This reduces pairing computation to bivariate polynomial identity testing using Schwartz-Zippel lemma.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Coordination", - "Vision", - "Use Cases", - "institutional", - "Coordination", - "Use Cases", - "Vision" + "ZKP", + "Cryptography", + "SNARK", + "zk", + "based", + "pairing", + "Cryptography", + "SNARK", + "ZKP" ], "keywords": [ - "Business", - "Enterprise", - "Instituional" + "Pairing", + "based", + "ZK" ], - "duration": 602, + "duration": 1510, "language": "en", - "sources_swarmHash": "627a8020ea8fffe7e60da9ea41e68e2239bf60b5384058c21bd9da1f40eec92e", - "sources_youtubeId": "dYgucH3a7sI", + "sources_swarmHash": "a458b8ba83746ad8de60ca9f7be70aed5513ad7a20085623774277b867f33c48", + "sources_youtubeId": "ylmiiUnwhrE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "67343a9c9dbb7a90e193e070", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731480000000, - "slot_end": 1731480600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1f1uiHRqQIfhY0F3DmSPJ-wpu3lCdP00YCID-a2-UblQ", - "resources_slides": null, + "slot_start": 1731474000000, + "slot_end": 1731475800000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1uBrjsIa4svOJ9BePcS4YgEcFXFjVxeeeS9RBVSKBwzw", + "resources_slides": "https://drive.google.com/file/d/1X08XqyRL51PhoFe6KKsGUJocEz9lmnrG/view", "speakers": [ - "karen-scarbrough" + "ivo-kubjas" ] }, "vector": [ @@ -259991,11 +259268,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -260746,6 +260023,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -260777,6 +260055,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -260806,6 +260085,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -260815,10 +260095,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -260891,7 +260167,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -260969,7 +260244,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -261021,6 +260295,8 @@ 0, 0, 2, + 2, + 2, 0, 0, 0, @@ -261295,9 +260571,7 @@ 0, 0, 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -261306,8 +260580,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -261321,58 +260593,44 @@ }, { "session": { - "id": "efficient-non-native-snark-recursion-using-bivariate-polynomial-testing", - "sourceId": "E8KYKE", - "title": "Efficient non-native SNARK recursion using bivariate polynomial testing", - "description": "Efficient SNARK recursion requires switching between pairing friendly elliptic curves. In most optimal approaches these curves would construct a cycle, but there are no such known cycles. Instead, we use non-native arithmetic to brute force the pairing computation at the cycle cut-off.\r\nWe describe an approach for combining direct field extension with polynomial-based non-native arithmetic. This reduces pairing computation to bivariate polynomial identity testing using Schwartz-Zippel lemma.", - "track": "Applied Cryptography", + "id": "eip-7251-maximum-effective-balance-overview", + "sourceId": "BBFNLG", + "title": "EIP-7251 - Maximum effective balance overview", + "description": "An overview of the maximum effective balance change coming in Electra.\r\nAt a high level, other considerations that were required to allow the maximum effective balance increase in Electra, and ensure that it delivers value.", + "track": "Core Protocol", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Stakers/Validators", "featured": false, "doNotRecord": false, "tags": [ - "ZKP", - "Cryptography", - "SNARK", - "zk", - "based", - "pairing", - "Cryptography", - "SNARK", - "ZKP" + "Core Protocol", + "Staking", + "Pectra", + "Core Protocol", + "Staking" ], "keywords": [ - "Pairing", - "based", - "ZK" + "Pectra" ], - "duration": 1510, + "duration": 1218, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "7232962eceb9c9b07027a3ebb1759835c57a4c1aacf89e245dbceaca4a6ae4dc", + "sources_youtubeId": "EwW6dNi9VCY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67343a9c9dbb7a90e193e070", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731474000000, - "slot_end": 1731475800000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1uBrjsIa4svOJ9BePcS4YgEcFXFjVxeeeS9RBVSKBwzw", - "resources_slides": null, + "slot_start": 1731394800000, + "slot_end": 1731396600000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1Q5srMGhMm8grwI_O0CFKN_QN1QRx24-AxIwgbDha6U0", + "resources_slides": "https://drive.google.com/file/d/1S13Yqk1IjwpO7lSpWk2sO4VyGjL2CHVx/view", "speakers": [ - "ivo-kubjas" + "paul-harris" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -261653,35 +260911,13 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -262131,7 +261367,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -262193,7 +261428,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -262274,6 +261508,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -262403,9 +261638,6 @@ 0, 0, 0, - 2, - 2, - 2, 0, 0, 0, @@ -262434,6 +261666,30 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -262681,7 +261937,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -262695,6 +261950,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -262703,41 +261962,41 @@ }, { "session": { - "id": "eip-7251-maximum-effective-balance-overview", - "sourceId": "BBFNLG", - "title": "EIP-7251 - Maximum effective balance overview", - "description": "An overview of the maximum effective balance change coming in Electra.\r\nAt a high level, other considerations that were required to allow the maximum effective balance increase in Electra, and ensure that it delivers value.", + "id": "eip-7702-a-technical-deep-dive", + "sourceId": "NNNPLC", + "title": "EIP-7702: a technical deep dive", + "description": "We'll discuss some of the design goals that lead to EIP-7702, how it works, and what will be possible for users after the Pectra network upgrade.", "track": "Core Protocol", "type": "Talk", - "expertise": "Intermediate", - "audience": "Stakers/Validators", + "expertise": "Expert", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ "Core Protocol", - "Staking", - "Pectra", - "Core Protocol", - "Staking" + "Account Abstraction", + "eip", + "Account Abstraction", + "Core Protocol" ], "keywords": [ - "Pectra" + "EIP" ], - "duration": 1218, + "duration": 1299, "language": "en", - "sources_swarmHash": "7232962eceb9c9b07027a3ebb1759835c57a4c1aacf89e245dbceaca4a6ae4dc", - "sources_youtubeId": "EwW6dNi9VCY", + "sources_swarmHash": "d4c1051f49830760c82a47ec5d0413b0d5fef571e4c09d5a7a0c76f69753c619", + "sources_youtubeId": "_k5fKlKBWV4", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731394800000, - "slot_end": 1731396600000, + "slot_start": 1731393000000, + "slot_end": 1731394800000, "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1Q5srMGhMm8grwI_O0CFKN_QN1QRx24-AxIwgbDha6U0", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/15huammnvrT8ljoiAi9Bnn4jcV_r6L0sm3_gBK-LqQ-4", + "resources_slides": "https://drive.google.com/file/d/1iV1j274j_y7KL7GOTk2YJcE-qQ-WxjoE/view", "speakers": [ - "paul-harris" + "lightclient" ] }, "vector": [ @@ -262905,6 +262164,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -263028,10 +262288,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -263543,6 +262799,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -263621,7 +262878,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -264055,17 +263311,15 @@ 0, 0, 0, - 2, - 0, - 0, 0, + 2, + 2, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -264077,50 +263331,45 @@ }, { "session": { - "id": "eip-7702-a-technical-deep-dive", - "sourceId": "NNNPLC", - "title": "EIP-7702: a technical deep dive", - "description": "We'll discuss some of the design goals that lead to EIP-7702, how it works, and what will be possible for users after the Pectra network upgrade.", - "track": "Core Protocol", + "id": "eip-7732-enshrined-proposer-builder-separation", + "sourceId": "TKBF9R", + "title": "[EIP-7732] enshrined Proposer Builder Separation", + "description": "ePBS implementation in Prysm and Nimbus, fundamentally aimed at solving about solving trust issues. We're gonna discuss the block-auction, slot-auction and the approach proposed by Francesco during the cohort. Some technical challenges and problems that we came across like separating EL and CL block, PTC committee etc.", + "track": "[CLS] EPF Day", "type": "Talk", - "expertise": "Expert", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ + "Censorship Resistance", + "Consensus", "Core Protocol", - "Account Abstraction", - "eip", - "Account Abstraction", - "Core Protocol" + "PBS" ], "keywords": [ - "EIP" + "ePBS", + "EIP-7732" ], - "duration": 1299, + "duration": 751, "language": "en", - "sources_swarmHash": "d4c1051f49830760c82a47ec5d0413b0d5fef571e4c09d5a7a0c76f69753c619", - "sources_youtubeId": "_k5fKlKBWV4", + "sources_swarmHash": "e326ff4a5c85f7cfdcbb4ccebbd229632df88de258c3b4daa59aac0bad48ad30", + "sources_youtubeId": "r-ku7h6bC8M", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6734486c9dbb7a90e1866fa0", "eventId": "devcon-7", - "slot_start": 1731393000000, - "slot_end": 1731394800000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/15huammnvrT8ljoiAi9Bnn4jcV_r6L0sm3_gBK-LqQ-4", - "resources_slides": null, + "slot_start": 1731477600000, + "slot_end": 1731478500000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1XP6W6A3-lCz0aeamZyGShkdG9rB-Lpip1Ceasz22olM", + "resources_slides": "https://drive.google.com/file/d/1xx07mqLAHk-KjUlkJzkvaImI4kX2luiq/view", "speakers": [ - "lightclient" + "kira", + "caleb" ] }, "vector": [ - 0, - 0, - 0, - 0, - 6, - 0, 0, 0, 0, @@ -264136,6 +263385,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -264282,7 +263532,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -264408,6 +263657,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -264872,6 +264123,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -264905,6 +264157,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -264917,9 +264170,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -265016,6 +264266,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -265155,7 +264406,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -265429,11 +264679,11 @@ 0, 0, 0, + 2, 0, 0, 0, 2, - 2, 0, 0, 0, @@ -265451,56 +264701,55 @@ }, { "session": { - "id": "eip-7732-enshrined-proposer-builder-separation", - "sourceId": "TKBF9R", - "title": "[EIP-7732] enshrined Proposer Builder Separation", - "description": "ePBS implementation in Prysm and Nimbus, fundamentally aimed at solving about solving trust issues. We're gonna discuss the block-auction, slot-auction and the approach proposed by Francesco during the cohort. Some technical challenges and problems that we came across like separating EL and CL block, PTC committee etc.", - "track": "[CLS] EPF Day", + "id": "eips-simplified-history-and-process-explained", + "sourceId": "TBY8MK", + "title": "EIPs Simplified: History and Process Explained", + "description": "It is planned to be an easy-to-understand session about Ethereum Improvement Proposals (EIPs). We'll explore the interesting history of EIPs and the important moments that have shaped different types and categories of proposals. Learn how EIPs go from an idea to becoming part of the Ethereum network, and see how editors help improve the standardization process. This talk is perfect for anyone who wants to learn about EIPs without getting into technical details.", + "track": "Core Protocol", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", - "featured": false, + "audience": "Community", + "featured": true, "doNotRecord": false, "tags": [ - "Censorship Resistance", - "Consensus", "Core Protocol", - "PBS" + "ACD", + "Coordination", + "Governance", + "improvement", + "eip", + "processes", + "ACD", + "Coordination", + "Core Protocol", + "Governance" ], "keywords": [ - "ePBS", - "EIP-7732" + "EIP", + "Process", + "Improvement" ], - "duration": 751, + "duration": 125, "language": "en", - "sources_swarmHash": "e326ff4a5c85f7cfdcbb4ccebbd229632df88de258c3b4daa59aac0bad48ad30", - "sources_youtubeId": "r-ku7h6bC8M", + "sources_swarmHash": "801854695e0493469f2bb74493e3223d05e9df3dfa70606f5dfd12f0a17381bc", + "sources_youtubeId": "xycI1vbxJo8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734486c9dbb7a90e1866fa0", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731477600000, - "slot_end": 1731478500000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1XP6W6A3-lCz0aeamZyGShkdG9rB-Lpip1Ceasz22olM", - "resources_slides": null, + "slot_start": 1731389400000, + "slot_end": 1731391200000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1kJKEZ4wRwEX_SUXgxNa4xYGnxsnpoukmIzmPr2XQ64A", + "resources_slides": "https://drive.google.com/file/d/1wR2iCzSPkrV3tMb5x2cZ2HsX0jqLE_zn/view", "speakers": [ - "kira", - "caleb" + "hudson-jameson", + "pooja-ranjan" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -265778,18 +265027,6 @@ 0, 0, 0, - 6, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -265802,6 +265039,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -266246,7 +265485,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -266260,7 +265498,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -266357,38 +265594,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -266448,6 +265653,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -266580,6 +265786,52 @@ 0, 0, 0, + 2, + 2, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -266804,7 +266056,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -266817,6 +266068,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -266826,63 +266081,48 @@ }, { "session": { - "id": "eips-simplified-history-and-process-explained", - "sourceId": "TBY8MK", - "title": "EIPs Simplified: History and Process Explained", - "description": "It is planned to be an easy-to-understand session about Ethereum Improvement Proposals (EIPs). We'll explore the interesting history of EIPs and the important moments that have shaped different types and categories of proposals. Learn how EIPs go from an idea to becoming part of the Ethereum network, and see how editors help improve the standardization process. This talk is perfect for anyone who wants to learn about EIPs without getting into technical details.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Intermediate", + "id": "elevate-your-vibration-reggae-sesh-w-rafamilkz-and-friends", + "sourceId": "NNFDDB", + "title": "Elevate your vibration! (reggae-sesh w/ rafamilkz & friends)", + "description": "A reggae jam music sesh performed with soul and heart by web 3 builders & musicians, with the goal of elevating the vibration of Devcon, fostering an environment of peace, love and community! \r\nI have a list of songs to play (guitar and voice), and will have other musicians (cheers to Shaka!) to perform with me and increase the vibrations!", + "track": "Entertainment", + "type": "Music", + "expertise": "Beginner", "audience": "Community", - "featured": true, + "featured": false, "doNotRecord": false, - "tags": [ - "Core Protocol", - "ACD", - "Coordination", - "Governance", - "improvement", - "eip", - "processes", - "ACD", - "Coordination", - "Core Protocol", - "Governance" - ], "keywords": [ - "EIP", - "Process", - "Improvement" + "music", + "relaxation", + "fun" + ], + "tags": [ + "Art", + "Marketing" ], - "duration": 125, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731389400000, - "slot_end": 1731391200000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1kJKEZ4wRwEX_SUXgxNa4xYGnxsnpoukmIzmPr2XQ64A", - "resources_slides": null, "speakers": [ - "hudson-jameson", - "pooja-ranjan" - ] + "rafamilkz" + ], + "eventId": "devcon-7", + "slot_start": 1731574800000, + "slot_end": 1731577500000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/14nyL7Ln8KMC-c1thokTKnggtUR8lxRb5WI3bRH2a-uQ", + "resources_slides": "" }, "vector": [ 0, 0, 0, 0, - 6, 0, 0, 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -267163,7 +266403,6 @@ 0, 0, 0, - 6, 6, 0, 0, @@ -267643,7 +266882,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -267720,7 +266958,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -267731,6 +266968,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -267779,7 +267017,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -267913,12 +267150,9 @@ 0, 0, 0, - 2, - 2, - 2, - 2, 0, 0, + 2, 0, 0, 0, @@ -268187,9 +267421,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -268209,34 +267443,46 @@ }, { "session": { - "id": "elevate-your-vibration-reggae-sesh-w-rafamilkz-and-friends", - "sourceId": "NNFDDB", - "title": "Elevate your vibration! (reggae-sesh w/ rafamilkz & friends)", - "description": "A reggae jam music sesh performed with soul and heart by web 3 builders & musicians, with the goal of elevating the vibration of Devcon, fostering an environment of peace, love and community! \r\nI have a list of songs to play (guitar and voice), and will have other musicians (cheers to Shaka!) to perform with me and increase the vibrations!", - "track": "Entertainment", - "type": "Music", - "expertise": "Beginner", - "audience": "Community", + "id": "elliptic-curves-and-snarks-past-present-and-future", + "sourceId": "Y3PMMA", + "title": "Elliptic curves and SNARKs: past, present and future.", + "description": "Elliptic curves are used in many proof systems. Some systems (e.g. Bulletproofs) use plain curves (e.g. ed25519). Some (e.g. Groth16, KZG-PLONK) use pairing-friendly curves (e.g. BLS12-381). Some recursive systems require pairing-friendly 2-cycle (e.g. MNT4/6) or 2-chains (e.g. BLS12-377/BW6-761). Some other recursive/folding systems require plain 2-cycle (e.g. Pasta). In this talk we will go through the difference between these curves and why there isn't a silver bullet curve for all scenarios.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "music", - "relaxation", - "fun" - ], "tags": [ - "Art", - "Marketing" + "ZKP", + "Cryptography", + "SNARK", + "elliptic", + "curves", + "Cryptography", + "SNARK", + "ZKP" ], - "language": "en", - "speakers": [ - "rafamilkz" + "keywords": [ + "elliptic", + "curves" ], + "duration": 1518, + "language": "en", + "sources_swarmHash": "d418d4f93106c8a1c844d7ddadd6ef00204c7d15d551d1e3a9732f82c007bf46", + "sources_youtubeId": "Bey043R_52k", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731574800000, - "slot_end": 1731577500000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/14nyL7Ln8KMC-c1thokTKnggtUR8lxRb5WI3bRH2a-uQ" + "slot_start": 1731405600000, + "slot_end": 1731407400000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/15MaGmHzAvHj765BvqDHs0ZxiiGevi3H9hscAvkCAGTc", + "resources_slides": "https://drive.google.com/file/d/1EAw9Aa4uOHBB6FS3nuABbJHpHCyd6S7R/view", + "speakers": [ + "youssef-el-housni" + ] }, "vector": [ 0, @@ -268248,8 +267494,8 @@ 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -269000,6 +268246,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -269061,6 +268308,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -269098,12 +268346,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -269275,6 +268517,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -269284,6 +268527,7 @@ 0, 0, 2, + 2, 0, 0, 0, @@ -269550,8 +268794,7 @@ 0, 0, 0, - 0, - 0, + 2, 0, 0, 0, @@ -269562,8 +268805,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -269575,46 +268816,26 @@ }, { "session": { - "id": "elliptic-curves-and-snarks-past-present-and-future", - "sourceId": "Y3PMMA", - "title": "Elliptic curves and SNARKs: past, present and future.", - "description": "Elliptic curves are used in many proof systems. Some systems (e.g. Bulletproofs) use plain curves (e.g. ed25519). Some (e.g. Groth16, KZG-PLONK) use pairing-friendly curves (e.g. BLS12-381). Some recursive systems require pairing-friendly 2-cycle (e.g. MNT4/6) or 2-chains (e.g. BLS12-377/BW6-761). Some other recursive/folding systems require plain 2-cycle (e.g. Pasta). In this talk we will go through the difference between these curves and why there isn't a silver bullet curve for all scenarios.", - "track": "Applied Cryptography", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "embodiment-practice", + "sourceId": "LNF8NE", + "title": "Embodiment Practice", + "description": "By master Zoe\r\n- Gentle guided stretches to connect with the body and open different energy channels\r\n- A blend of embodiment, asana, meditation, breathwork, tapping, and somatics to weave together mind, body, and soul\r\n\r\nNov 13 10:30 - 11:15", + "track": "Entertainment", + "type": "Mixed Formats", + "expertise": "Beginner", + "audience": "Hobby", "featured": false, "doNotRecord": false, - "tags": [ - "ZKP", - "Cryptography", - "SNARK", - "elliptic", - "curves", - "Cryptography", - "SNARK", - "ZKP" - ], - "keywords": [ - "elliptic", - "curves" - ], - "duration": 1518, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "d418d4f93106c8a1c844d7ddadd6ef00204c7d15d551d1e3a9732f82c007bf46", - "sources_youtubeId": "Bey043R_52k", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731405600000, - "slot_end": 1731407400000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/15MaGmHzAvHj765BvqDHs0ZxiiGevi3H9hscAvkCAGTc", - "resources_slides": null, - "speakers": [ - "youssef-el-housni" - ] + "slot_start": 1731468600000, + "slot_end": 1731471300000, + "slot_roomId": "decompression-room", + "resources_presentation": "https://docs.google.com/presentation/d/16hER2e4hzqPjZrObAFmLsPIfyEkBHspMF-2HfxORQAg", + "resources_slides": "" }, "vector": [ 0, @@ -269626,7 +268847,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -269910,7 +269130,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -270381,7 +269600,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -270443,7 +269661,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -270653,22 +269870,23 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 2, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -270935,14 +270153,12 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -270953,32 +270169,48 @@ }, { "session": { - "id": "embodiment-practice", - "sourceId": "LNF8NE", - "title": "Embodiment Practice", - "description": "By master Zoe\r\n- Gentle guided stretches to connect with the body and open different energy channels\r\n- A blend of embodiment, asana, meditation, breathwork, tapping, and somatics to weave together mind, body, and soul\r\n\r\nNov 13 10:30 - 11:15", - "track": "Entertainment", - "type": "Mixed Formats", - "expertise": "Beginner", - "audience": "Hobby", + "id": "emilie-making-sure-eof-is-done-right", + "sourceId": "A9UWAY", + "title": "Emilie - Making sure EOF is done right", + "description": "We present Emilie. Emilie is designed to ensure the correct implementation of the EVM Object Format (EOF) by testing compilers and execution clients. It re-executes mainnet transactions using EOF bytecode instead of original bytecode, comparing results and performance with the original execution.\r\nEmilie tests interactions between EOF and legacy contracts using real data. It supports recompilation for Solidity and Vyper, enabling it to find bugs across compilers and execution clients.", + "track": "Core Protocol", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Core Protocol", + "ACD", + "Testing", + "eof", + "ACD", + "Core Protocol", + "Testing" + ], + "keywords": [ + "EOF" + ], + "duration": 461, "language": "en", - "speakers": [], + "sources_swarmHash": "cec267e1746d3de31fb7bdb80db78174214955db002b55c99c107ca8180c490a", + "sources_youtubeId": "igLOej4GFV0", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673594ae9dbb7a90e1313871", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673594ae9dbb7a90e1313871.vtt", + "transcript_text": " Yeah, hello everyone. It's my distinct pleasure to today introduce Emily, which is our small contribution to hopefully make EOF safe, so to make sure everything in EOF works properly. So not just right before me, but today already multiple times, EOF has been discussed. So in case you just walked in because you were especially interested in Emily, I'll give you a short recap. So what is EOF? So there are two talks listed here by Dan O'Farran and also from this morning, which are really good and really summarize it well. But for the purpose of this talk, we're just going to say EUF is a big change in EVM. So what is the problem with that? Because as in with any big change in EVM, a lot can go wrong. So we have a lot of different smart contracts, right? And we are using different compilers. I mean, there's Solidity and there's Viper, and they have a bunch of different versions. They have different optimization features and so on. So we actually have quite a few compilers that we're using. And then out of that, we get lots of different bytecodes. And these bytecodes we execute on different execution clients. And on these execution clients, these new bytecodes interact with many different existing contracts. So we have a horribly big search space to look for things that can go wrong, right? And experience shows that things do go wrong. And unfortunately, things don't always detect it during testing because we had chain splits after certain hard forks on chain. So what do we think can go wrong here? So first of all, there has been some discussions on what compilers should support, so we want to figure out what is actually supported so that users can have a seamless transition to EOF. In the bytecodes, of course, we can have incorrect compilation. We have totally new features. We might very well see incorrect compilation. And in the clients, we also have very new implementations. It's not implausible that we see incorrect execution. So all of these would be horrible. They would lead to chain splits. And then lastly, we might have incorrect interaction, right? So you're calling an existing old contract from your new EOF contract. Maybe that doesn't work the way you thought it would work. So now, how can we fix all that? So that's why we're here, to improve stuff. So first of all, is this news? Well, no, of course not. So there is already a lot of testing going on, right? So there's the testing team and lots of tests are being written. But we think that Emily can improve the situation because we, in the past past have learned that what really helps is real contracts with real data and real interactions because that's in the end what we want to be sure that works on chain right because that's what might cause the chain split so what does Emily do so here we have an existing mainnet transaction, which you are all familiar with. So now what does Emily do exactly? So we take contract A and we recompile it to EOF and we execute it again. So now we can compare these two executions, right? Because now we actually have a really good idea of what correctness should look like because we have the execution above, which is the current execution, and we can compare all the parts. So we can compare what does contract A do, what does it write to storage, how do all the call data and return data look, how is the output, how is the success flex, and so on. So we have a great correctness reference here. So Emily checks a bunch of things. I mentioned some of them already. So obviously Emily checks all the storage changes. Emily checks which events are emitted. Emily checks the call data. Emily checks the return data and the execution status. And lastly, what we also found quite interesting while developing is that we can monitor some of the gas costs. So we can see overall is UF execution on average cheaper or more expensive than the previous execution. And with that we can then find all these things that we're really concerned about. Missing compiler features, incorrect compilation, incorrect execution, incorrect interaction. So all these horrible things we're really concerned about, right? Missing compiler features, incorrect compilation, incorrect execution, incorrect interaction. So all these horrible things we want to avoid, we can hopefully find before they are happening on-chain. So with that, Emily contributes to the security of EOF. Of course, as is the usual case, things are not quite as simple, so a bunch of things can go wrong. I'm happy to discuss them a little bit more during the questions, but one of the things that can go wrong is as was previously discussed, the gas oil code is disappearing, but currently it's still there, and it's causing a bunch of problems, as was just discussed in the previous talk. But yeah, we have some countermeasures for that, so I'm happy to discuss this in the questions. And with that, I close, and thank you for your attention. Thank you, thank you. So, let's start with the questions. Are there instances where one may want to rewrite their contracts for EOF for better performance or other reasons? Yeah, I guess we're going to see. I think the truth is that we don't know exactly what the numbers are going to be like because we haven't seen the latest compiler versions and we haven't seen the full EOF spec. But I think that's quite possible, because it's feasible that there might be contracts which are significantly cheaper when executed in EOF, yes. So is the recompilation to EOF done from source code? Where do you get the source code? Yeah, excellent question. So we get the source code from Etherscan, and also other sources, but one of the sources is Etherscan. So obviously we don't have source code for every contract, but that's also not necessary. I mean, we just want to test as much as possible, and it's actually good that we don't have source code for everything, because we we as I said we also want to test this interaction with legacy contracts so we don't want to necessarily recompile everything but yeah we recompile some and then test the interaction. If Emily compiles legacy bytecode to EUF how does it cuts compiler bugs from a high-level language to EUF? Yeah, so basically we take the code and we previously ran it with the real data, right? So, for example, let's consider a transfer, an ESC20 transfer. And let's say the ESC20 transfer now works incorrectly when compiled with EUF. Now what we would see is we would, for example, see a difference in the storage. And because we check how was the storage changed originally, and now we check how is it changed now that we ran it based on EUF, we can detect those kind of correctness issues.", "eventId": "devcon-7", - "slot_start": 1731468600000, - "slot_end": 1731471300000, - "slot_roomId": "decompression-room", - "resources_presentation": "https://docs.google.com/presentation/d/16hER2e4hzqPjZrObAFmLsPIfyEkBHspMF-2HfxORQAg" + "slot_start": 1731562800000, + "slot_end": 1731563400000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/17yJsWv6HioxijpCWnMnLMPeQFMTy_KMomUQHF2n1hS8", + "resources_slides": "https://drive.google.com/file/d/1d3Itf_V91wqf2Ls9R_iXHGlVEesGPbtQ/view", + "speakers": [ + "hubert-ritzdorf" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -271272,6 +270504,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -271745,6 +270978,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -271969,6 +271203,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -272013,11 +271248,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -272283,10 +271520,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -272299,8 +271533,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -272310,50 +271542,52 @@ }, { "session": { - "id": "emilie-making-sure-eof-is-done-right", - "sourceId": "A9UWAY", - "title": "Emilie - Making sure EOF is done right", - "description": "We present Emilie. Emilie is designed to ensure the correct implementation of the EVM Object Format (EOF) by testing compilers and execution clients. It re-executes mainnet transactions using EOF bytecode instead of original bytecode, comparing results and performance with the original execution.\r\nEmilie tests interactions between EOF and legacy contracts using real data. It supports recompilation for Solidity and Vyper, enabling it to find bugs across compilers and execution clients.", - "track": "Core Protocol", - "type": "Lightning Talk", + "id": "empirical-analysis-of-amm-loss-versus-rebalancing-on-layer-2-chains", + "sourceId": "T8BXK3", + "title": "Empirical analysis of AMM loss versus rebalancing on layer 2 chains", + "description": "This talk presents an empirical analysis of Loss versus Rebalancing (LVR) on Arbitrum, Base and Ethereum. It compares the realised and theoretical LVR; along with the arbitrage profits from CEX-DEX/Perpetual; then further reveals whether the frequency of delta-hedging, the pool liquidity and the block time difference lead to better or worse LVR.", + "track": "Cryptoeconomics", + "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Core Protocol", - "ACD", - "Testing", - "eof", - "ACD", - "Core Protocol", - "Testing" + "Layer 2s", + "Cross-L2", + "MEV", + "AMMs", + "cross-domain", + "arbitrage", + "AMMs", + "Cross-L2", + "Layer 2s", + "MEV" ], "keywords": [ - "EOF" + "loss versus rebalancing", + "cross-domain arbitrage" ], - "duration": 461, + "duration": 1415, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "d55e25ae416289798534dbe4778dc58bffac84dfc55980a2773e6e72c56dafe6", + "sources_youtubeId": "ArILIuH7G2U", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673594ae9dbb7a90e1313871", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673594ae9dbb7a90e1313871.vtt", - "transcript_text": " Yeah, hello everyone. It's my distinct pleasure to today introduce Emily, which is our small contribution to hopefully make EOF safe, so to make sure everything in EOF works properly. So not just right before me, but today already multiple times, EOF has been discussed. So in case you just walked in because you were especially interested in Emily, I'll give you a short recap. So what is EOF? So there are two talks listed here by Dan O'Farran and also from this morning, which are really good and really summarize it well. But for the purpose of this talk, we're just going to say EUF is a big change in EVM. So what is the problem with that? Because as in with any big change in EVM, a lot can go wrong. So we have a lot of different smart contracts, right? And we are using different compilers. I mean, there's Solidity and there's Viper, and they have a bunch of different versions. They have different optimization features and so on. So we actually have quite a few compilers that we're using. And then out of that, we get lots of different bytecodes. And these bytecodes we execute on different execution clients. And on these execution clients, these new bytecodes interact with many different existing contracts. So we have a horribly big search space to look for things that can go wrong, right? And experience shows that things do go wrong. And unfortunately, things don't always detect it during testing because we had chain splits after certain hard forks on chain. So what do we think can go wrong here? So first of all, there has been some discussions on what compilers should support, so we want to figure out what is actually supported so that users can have a seamless transition to EOF. In the bytecodes, of course, we can have incorrect compilation. We have totally new features. We might very well see incorrect compilation. And in the clients, we also have very new implementations. It's not implausible that we see incorrect execution. So all of these would be horrible. They would lead to chain splits. And then lastly, we might have incorrect interaction, right? So you're calling an existing old contract from your new EOF contract. Maybe that doesn't work the way you thought it would work. So now, how can we fix all that? So that's why we're here, to improve stuff. So first of all, is this news? Well, no, of course not. So there is already a lot of testing going on, right? So there's the testing team and lots of tests are being written. But we think that Emily can improve the situation because we, in the past past have learned that what really helps is real contracts with real data and real interactions because that's in the end what we want to be sure that works on chain right because that's what might cause the chain split so what does Emily do so here we have an existing mainnet transaction, which you are all familiar with. So now what does Emily do exactly? So we take contract A and we recompile it to EOF and we execute it again. So now we can compare these two executions, right? Because now we actually have a really good idea of what correctness should look like because we have the execution above, which is the current execution, and we can compare all the parts. So we can compare what does contract A do, what does it write to storage, how do all the call data and return data look, how is the output, how is the success flex, and so on. So we have a great correctness reference here. So Emily checks a bunch of things. I mentioned some of them already. So obviously Emily checks all the storage changes. Emily checks which events are emitted. Emily checks the call data. Emily checks the return data and the execution status. And lastly, what we also found quite interesting while developing is that we can monitor some of the gas costs. So we can see overall is UF execution on average cheaper or more expensive than the previous execution. And with that we can then find all these things that we're really concerned about. Missing compiler features, incorrect compilation, incorrect execution, incorrect interaction. So all these horrible things we're really concerned about, right? Missing compiler features, incorrect compilation, incorrect execution, incorrect interaction. So all these horrible things we want to avoid, we can hopefully find before they are happening on-chain. So with that, Emily contributes to the security of EOF. Of course, as is the usual case, things are not quite as simple, so a bunch of things can go wrong. I'm happy to discuss them a little bit more during the questions, but one of the things that can go wrong is as was previously discussed, the gas oil code is disappearing, but currently it's still there, and it's causing a bunch of problems, as was just discussed in the previous talk. But yeah, we have some countermeasures for that, so I'm happy to discuss this in the questions. And with that, I close, and thank you for your attention. Thank you, thank you. So, let's start with the questions. Are there instances where one may want to rewrite their contracts for EOF for better performance or other reasons? Yeah, I guess we're going to see. I think the truth is that we don't know exactly what the numbers are going to be like because we haven't seen the latest compiler versions and we haven't seen the full EOF spec. But I think that's quite possible, because it's feasible that there might be contracts which are significantly cheaper when executed in EOF, yes. So is the recompilation to EOF done from source code? Where do you get the source code? Yeah, excellent question. So we get the source code from Etherscan, and also other sources, but one of the sources is Etherscan. So obviously we don't have source code for every contract, but that's also not necessary. I mean, we just want to test as much as possible, and it's actually good that we don't have source code for everything, because we we as I said we also want to test this interaction with legacy contracts so we don't want to necessarily recompile everything but yeah we recompile some and then test the interaction. If Emily compiles legacy bytecode to EUF how does it cuts compiler bugs from a high-level language to EUF? Yeah, so basically we take the code and we previously ran it with the real data, right? So, for example, let's consider a transfer, an ESC20 transfer. And let's say the ESC20 transfer now works incorrectly when compiled with EUF. Now what we would see is we would, for example, see a difference in the storage. And because we check how was the storage changed originally, and now we check how is it changed now that we ran it based on EUF, we can detect those kind of correctness issues.", + "sources_streamethId": "6735e2fb9dbb7a90e14ee83e", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731562800000, - "slot_end": 1731563400000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/17yJsWv6HioxijpCWnMnLMPeQFMTy_KMomUQHF2n1hS8", - "resources_slides": null, + "slot_start": 1731562200000, + "slot_end": 1731564000000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1Y6GrE_61ZfJ2Mxu9xrE-xcG7WBCWmKg3qYPa5F0zL3s", + "resources_slides": "https://drive.google.com/file/d/1lEfPZifUzry_aVkzBOVRfBtaV1JdClSC/view", "speakers": [ - "hubert-ritzdorf" + "elaine-hu" ] }, "vector": [ - 0, - 0, 0, 0, 6, @@ -272646,11 +271880,9 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -273106,6 +272338,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -273122,7 +272355,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -273170,6 +272402,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -273186,6 +272419,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -273302,6 +272536,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -273347,7 +272582,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -273393,16 +272627,13 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 2, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -273670,8 +272901,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -273688,61 +272919,60 @@ }, { "session": { - "id": "empirical-analysis-of-amm-loss-versus-rebalancing-on-layer-2-chains", - "sourceId": "T8BXK3", - "title": "Empirical analysis of AMM loss versus rebalancing on layer 2 chains", - "description": "This talk presents an empirical analysis of Loss versus Rebalancing (LVR) on Arbitrum, Base and Ethereum. It compares the realised and theoretical LVR; along with the arbitrage profits from CEX-DEX/Perpetual; then further reveals whether the frequency of delta-hedging, the pool liquidity and the block time difference lead to better or worse LVR.", - "track": "Cryptoeconomics", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "empower-the-ethereum-network-with-your-own-node", + "sourceId": "RAXURS", + "title": "Empower the Ethereum Network with your own node", + "description": "Stereum is an easy to use MIT-licensed Open Source GUI open-source Node Setup & Management Software.\r\nAfter a couple of clicks you have your hardware set up for \r\n1) Solo Staking (MEV)\r\n2) Distributed Validator Staking(Obol, SSV)\r\n3) running an Archive Node \r\n4) node operation of several protocols (SSV Network, CSM and Simple DVTM)\r\nWe want to make a workshop, where you can tryout a setup yourself and take time for your questions. dApps, testnet-mainnet switch and client-diversity supported!", + "track": "Usability", + "type": "Workshop", + "expertise": "Beginner", + "audience": "Stakers/Validators", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", - "Cross-L2", - "MEV", - "AMMs", - "cross-domain", - "arbitrage", - "AMMs", - "Cross-L2", - "Layer 2s", - "MEV" + "Staking", + "Best Practices", + "Accessibility", + "network", + "access", + "Accessibility", + "Best Practices", + "Staking" ], "keywords": [ - "loss versus rebalancing", - "cross-domain arbitrage" + "Ethereum Node", + "Tooling", + "Network Access" ], - "duration": 1415, + "duration": 4470, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "f4e918da3ae9577243a5645f92b870e4d206670389a185a629e9ac6921540464", + "sources_youtubeId": "cAsztMfLfF0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735e2fb9dbb7a90e14ee83e", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731562200000, - "slot_end": 1731564000000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1Y6GrE_61ZfJ2Mxu9xrE-xcG7WBCWmKg3qYPa5F0zL3s", - "resources_slides": null, + "slot_start": 1731465900000, + "slot_end": 1731471300000, + "slot_roomId": "classroom-c", + "resources_presentation": "https://docs.google.com/presentation/d/1pvjBcm_guIMvayHy6vzCMwdxhLF_FviCoXJx10mrzT8", + "resources_slides": "https://drive.google.com/file/d/18_mx6yh7ovdLdB05kbiE_RlwQJHL08YI/view", "speakers": [ - "elaine-hu" + "stefan-kobrc", + "stereum-team", + "david-muhlbacher" ] }, "vector": [ 0, 0, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -274030,6 +273260,8 @@ 0, 0, 6, + 6, + 6, 0, 0, 0, @@ -274487,7 +273719,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -274512,6 +273743,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -274540,6 +273772,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -274551,7 +273784,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -274609,6 +273841,9 @@ 0, 0, 0, + 2, + 0, + 0, 0, 0, 0, @@ -274685,7 +273920,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -274745,45 +273979,38 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, 0, 0, 0, @@ -275053,12 +274280,10 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, + 2, 0, 0, 0, @@ -275070,52 +274295,46 @@ }, { "session": { - "id": "empower-the-ethereum-network-with-your-own-node", - "sourceId": "RAXURS", - "title": "Empower the Ethereum Network with your own node", - "description": "Stereum is an easy to use MIT-licensed Open Source GUI open-source Node Setup & Management Software.\r\nAfter a couple of clicks you have your hardware set up for \r\n1) Solo Staking (MEV)\r\n2) Distributed Validator Staking(Obol, SSV)\r\n3) running an Archive Node \r\n4) node operation of several protocols (SSV Network, CSM and Simple DVTM)\r\nWe want to make a workshop, where you can tryout a setup yourself and take time for your questions. dApps, testnet-mainnet switch and client-diversity supported!", - "track": "Usability", - "type": "Workshop", + "id": "empowering-a-safer-internet-community-driven-scam-reporting-and-prevention-in-thailand", + "sourceId": "FGUAQX", + "title": "Empowering a Safer Internet: community-driven scam reporting and prevention in Thailand\"", + "description": "In today’s digital age, user-driven solutions are vital for online safety. This talk explores Whoscall—a free mobile app trusted by over 17 million users globally, offering call and SMS identification, phishing site scanning, and personal data breach detection—and Thailand’s Scam Alert feature. Both initiatives empower users and promote public-private collaboration in scam prevention.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Stakers/Validators", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Staking", - "Best Practices", - "Accessibility", - "network", - "access", - "Accessibility", - "Best Practices", - "Staking" + "Public good", + "SEA", + "Security" ], "keywords": [ - "Ethereum Node", - "Tooling", - "Network Access" + "Anti-Scam" ], - "duration": 4470, + "duration": 533, "language": "en", - "sources_swarmHash": "f4e918da3ae9577243a5645f92b870e4d206670389a185a629e9ac6921540464", - "sources_youtubeId": "cAsztMfLfF0", + "sources_swarmHash": "706ba7fe2748e77f900452e4d1f1ba71883ef043c73c9fc738f17c3dac914a8b", + "sources_youtubeId": "guXtHSEhZgQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6735cb649dbb7a90e1b64fac", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735cb649dbb7a90e1b64fac.vtt", + "transcript_text": " Hello everyone, greetings from Taiwan. I am Michelle Shen, Product Director at HUSCO. Today I am going to share our HUSCO empowers internet safety through a community-driven approach to scan reporting and prevention in Thailand. In the digital world, trust is fundamental to the online interaction. At HOSCO, we are committed to building that trust by combining cutting-edge technology like AI with global partnership. This approach aims to create a safe collaborative space where user activity and participants in combating scans. Hoosco was founded in 2012. We are founded by the trusted company, Gogolook. Gogolook now has a dual headquarter in Taiwan and Thailand. So for over a decade, we were connected with governments worldwide, including the police agencies across Asia, and we enhanced anti-scan solution. We now have more than 250 employees around the world. So for those who never heard of Who's Code before or you never used Who's Code, so here give you a very quick introduction to the videoอีกที ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตทำร้ายตัวเอง แล้วทำร้ายลูก ๆ 1 tbs of sugar 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Okay, so we just published a book. This book includes more than 1,000 of the scammers' calls, and weight is more than seven kilograms. So this book is just a function that we want to give a simple introduction of the host code. That is, if you cannot remember all the scammers' calls, the easy way to protect yourself is to install HoosCode. So why we can do it? I think now because we have the largest number of databases in East and South Asia. Every year we block more than 2.6 billion of spam calls and messages. So we help users identify the stress and stay protected every day. So now we reach more than 100 downloads worldwide. So Hooska offers not only the color identification as an SMS filter, but we also evaluate our new features. So right now we also can help users do the personal data linking monitoring, and also we can detect the suspicious website. And also now we are trying to provide some defects scan detection. The reason why we are consistently involving and to meet the new threat is because the scan become a new normal. According to our annual scan report in AIPAC, only 1.6% of people, they never account for scan before in a year. So means that people will at least account once a month. Even some of the people, more than 50% of people, they will account for every single day. And I think the most important is that the scammers, they are so smart. They were involved with technology. So with these new technologies, such as social engineering, phishing, and defect, they leverage the data science, big data, and AI to power themselves. So that also gives us a learned. Even at Asper's, acknowledging that is a battle that we can not win only by ourselves. We need to fight with the network. So that's why we want to build an involving network to combat a scan. A part of our self and AI technology to present scans and public database, we also couple that tip with government, including the lawyer type police. Most importantly, we believe that we should work closely with our users. Sooner we post, we can discover the new trending scan is happening. So who are these hidden heroes? What outcome did they contribute? In the past years, these report users, they submit over 4 million of scan code reports through the Who's Code. So who are they? After speaking with them, some of them just the uncle or auntie that you will meet every single day. So this community driven effort reflects a strong sense of justice among our users. And the only purpose they want to report is that they want to pretend they are loved. Like they believe the report they built can help more users pretend as guests. So we also found that this value to share will transcend age, even gender, and demographics. So while community-driven reporting is inactive, it also raises some challenges, such as how we're going to ensure the report is trustworthy. And we also have some challenge to meeting the regulator compliance. So I think we also see some opportunity such as incorporating blockchain to facilitate report origins and data integrity will help us to overcome this challenge. So in closing, Unity is essential in combating scan, at whose core we are built for trust and join forces with users, partners, and governments to create a safe internet. Together, I think we can make a meaningful impact. Thanks for your listening. This is Michelle from Google Look. Thank you.", "eventId": "devcon-7", - "slot_start": 1731465900000, - "slot_end": 1731471300000, - "slot_roomId": "classroom-c", - "resources_presentation": "https://docs.google.com/presentation/d/1pvjBcm_guIMvayHy6vzCMwdxhLF_FviCoXJx10mrzT8", - "resources_slides": null, + "slot_start": 1731579600000, + "slot_end": 1731580200000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1PhodWX8WCiq6P9Vsm9h6TVZlVmdMbAjQkvyVws1MlFw", + "resources_slides": "", "speakers": [ - "stefan-kobrc", - "stereum-team", - "david-muhlbacher" + "michelle-shen" ] }, "vector": [ 0, + 6, 0, 0, 0, @@ -275123,7 +274342,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -275411,12 +274629,9 @@ 0, 0, 0, - 6, - 6, - 6, - 0, 0, 0, + 6, 0, 0, 0, @@ -275866,6 +275081,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -275897,7 +275113,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -275926,7 +275141,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -275954,7 +275168,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -275977,6 +275190,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -275995,7 +275209,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -276115,6 +275328,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -276165,7 +275379,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -276438,7 +275651,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -276446,54 +275658,52 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "empowering-a-safer-internet-community-driven-scam-reporting-and-prevention-in-thailand", - "sourceId": "FGUAQX", - "title": "Empowering a Safer Internet: community-driven scam reporting and prevention in Thailand\"", - "description": "In today’s digital age, user-driven solutions are vital for online safety. This talk explores Whoscall—a free mobile app trusted by over 17 million users globally, offering call and SMS identification, phishing site scanning, and personal data breach detection—and Thailand’s Scam Alert feature. Both initiatives empower users and promote public-private collaboration in scam prevention.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "id": "empowering-users-how-ethereums-low-node-requirements-promote-true-decentralization-over-solana", + "sourceId": "QAJNMK", + "title": "Empowering Users: How Ethereum’s Low Node Requirements Promote True Decentralization Over Solana", + "description": "Nine years after Ethereum's launch, you can still run a node at home on commodity hardware, even low-powered devices like $185 ARM64 boards.\r\n\r\nWhy is this so important? Wouldn't Solana's approach, using more powerful hardware for higher speed and throughput, be better? We'll explore why home nodes matter for decentralization, credible neutrality, and global accessibility.\r\n\r\nWe'll also compare node requirements vs the Nakamoto coefficient as metrics for measuring decentralization.", + "track": "Core Protocol", "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Community", + "audience": "Stakers/Validators", "featured": false, "doNotRecord": false, "tags": [ - "Public good", - "SEA", - "Security" - ], - "keywords": [ - "Anti-Scam" + "Decentralization", + "Home staking" ], - "duration": 533, + "keywords": [], + "duration": 436, "language": "en", - "sources_swarmHash": "706ba7fe2748e77f900452e4d1f1ba71883ef043c73c9fc738f17c3dac914a8b", - "sources_youtubeId": "guXtHSEhZgQ", + "sources_swarmHash": "e392f868233af92802f11b232aab83133a4eca653b22dc1d8af805c6e35f1a75", + "sources_youtubeId": "Nu7zHhcaRWY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735cb649dbb7a90e1b64fac", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735cb649dbb7a90e1b64fac.vtt", - "transcript_text": " Hello everyone, greetings from Taiwan. I am Michelle Shen, Product Director at HUSCO. Today I am going to share our HUSCO empowers internet safety through a community-driven approach to scan reporting and prevention in Thailand. In the digital world, trust is fundamental to the online interaction. At HOSCO, we are committed to building that trust by combining cutting-edge technology like AI with global partnership. This approach aims to create a safe collaborative space where user activity and participants in combating scans. Hoosco was founded in 2012. We are founded by the trusted company, Gogolook. Gogolook now has a dual headquarter in Taiwan and Thailand. So for over a decade, we were connected with governments worldwide, including the police agencies across Asia, and we enhanced anti-scan solution. We now have more than 250 employees around the world. So for those who never heard of Who's Code before or you never used Who's Code, so here give you a very quick introduction to the videoอีกที ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตอนนี้ ตทำร้ายตัวเอง แล้วทำร้ายลูก ๆ 1 tbs of sugar 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey 1 tbs of honey Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Ketua kota Okay, so we just published a book. This book includes more than 1,000 of the scammers' calls, and weight is more than seven kilograms. So this book is just a function that we want to give a simple introduction of the host code. That is, if you cannot remember all the scammers' calls, the easy way to protect yourself is to install HoosCode. So why we can do it? I think now because we have the largest number of databases in East and South Asia. Every year we block more than 2.6 billion of spam calls and messages. So we help users identify the stress and stay protected every day. So now we reach more than 100 downloads worldwide. So Hooska offers not only the color identification as an SMS filter, but we also evaluate our new features. So right now we also can help users do the personal data linking monitoring, and also we can detect the suspicious website. And also now we are trying to provide some defects scan detection. The reason why we are consistently involving and to meet the new threat is because the scan become a new normal. According to our annual scan report in AIPAC, only 1.6% of people, they never account for scan before in a year. So means that people will at least account once a month. Even some of the people, more than 50% of people, they will account for every single day. And I think the most important is that the scammers, they are so smart. They were involved with technology. So with these new technologies, such as social engineering, phishing, and defect, they leverage the data science, big data, and AI to power themselves. So that also gives us a learned. Even at Asper's, acknowledging that is a battle that we can not win only by ourselves. We need to fight with the network. So that's why we want to build an involving network to combat a scan. A part of our self and AI technology to present scans and public database, we also couple that tip with government, including the lawyer type police. Most importantly, we believe that we should work closely with our users. Sooner we post, we can discover the new trending scan is happening. So who are these hidden heroes? What outcome did they contribute? In the past years, these report users, they submit over 4 million of scan code reports through the Who's Code. So who are they? After speaking with them, some of them just the uncle or auntie that you will meet every single day. So this community driven effort reflects a strong sense of justice among our users. And the only purpose they want to report is that they want to pretend they are loved. Like they believe the report they built can help more users pretend as guests. So we also found that this value to share will transcend age, even gender, and demographics. So while community-driven reporting is inactive, it also raises some challenges, such as how we're going to ensure the report is trustworthy. And we also have some challenge to meeting the regulator compliance. So I think we also see some opportunity such as incorporating blockchain to facilitate report origins and data integrity will help us to overcome this challenge. So in closing, Unity is essential in combating scan, at whose core we are built for trust and join forces with users, partners, and governments to create a safe internet. Together, I think we can make a meaningful impact. Thanks for your listening. This is Michelle from Google Look. Thank you.", + "sources_streamethId": "6736d86274749a4b893436c0", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731579600000, - "slot_end": 1731580200000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1PhodWX8WCiq6P9Vsm9h6TVZlVmdMbAjQkvyVws1MlFw", - "resources_slides": null, + "slot_start": 1731643200000, + "slot_end": 1731643800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/149MDCwjImcWRfdIwZw6lfpbIkNtiT4AFD60ebK9hnNQ", + "resources_slides": "https://drive.google.com/file/d/1RIE_HihMHMceJ7W1to2mArnrXLDaNAzi/view", "speakers": [ - "michelle-shen" + "diego-losada" ] }, "vector": [ 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -277240,11 +276450,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -277328,6 +276533,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -277343,13 +276549,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -277487,7 +276693,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -277812,8 +277017,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -277825,38 +277030,42 @@ }, { "session": { - "id": "empowering-users-how-ethereums-low-node-requirements-promote-true-decentralization-over-solana", - "sourceId": "QAJNMK", - "title": "Empowering Users: How Ethereum’s Low Node Requirements Promote True Decentralization Over Solana", - "description": "Nine years after Ethereum's launch, you can still run a node at home on commodity hardware, even low-powered devices like $185 ARM64 boards.\r\n\r\nWhy is this so important? Wouldn't Solana's approach, using more powerful hardware for higher speed and throughput, be better? We'll explore why home nodes matter for decentralization, credible neutrality, and global accessibility.\r\n\r\nWe'll also compare node requirements vs the Nakamoto coefficient as metrics for measuring decentralization.", + "id": "encrypted-mempools-a-path-to-ethereum-l1", + "sourceId": "SGDDEX", + "title": "Encrypted Mempools: a path to Ethereum L1", + "description": "This talk will explore the future of encrypted mempools, paving the way to enshrinement on Ethereum L1. Starting from current designs such as Shutter and SUAVE, security assumptions and out-of-protocol infrastructure can be stripped away with cryptography including homomorphic encryption, VDFs, and delay encryption. These approaches would trustlessly bring front running protection and censorship resistance to the protocol.", "track": "Core Protocol", "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Stakers/Validators", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Decentralization", - "Home staking" + "encryption", + "mempool", + "Censorship Resistance", + "Core Protocol", + "Cryptography" ], - "keywords": [], - "duration": 436, + "keywords": [ + "Encrypted", + "Mempool" + ], + "duration": 565, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "13fb566c3794a741fd8dff3d5d83fa04159a09104d886258558e6781631adaaa", + "sources_youtubeId": "mUoWwRoHrvk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736d86274749a4b893436c0", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731643200000, - "slot_end": 1731643800000, + "slot_start": 1731466500000, + "slot_end": 1731467100000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/149MDCwjImcWRfdIwZw6lfpbIkNtiT4AFD60ebK9hnNQ", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1lvMpzBomZ6dNVchh_7lRcXyFGQ2an1s7f3t0tDgzR2E", + "resources_slides": "https://drive.google.com/file/d/1ADS4Li8Pfe0iFnTDavlUsqhKDb8wIwHi/view", "speakers": [ - "diego-losada" + "marc-harvey-hill" ] }, "vector": [ @@ -278621,11 +277830,13 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -278697,58 +277908,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -278806,6 +277965,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -278956,6 +278116,38 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -279176,7 +278368,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -279184,6 +278375,14 @@ 0, 0, 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, 2, 0, 0, @@ -279191,47 +278390,51 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, 0 ] }, { "session": { - "id": "encrypted-mempools-a-path-to-ethereum-l1", - "sourceId": "SGDDEX", - "title": "Encrypted Mempools: a path to Ethereum L1", - "description": "This talk will explore the future of encrypted mempools, paving the way to enshrinement on Ethereum L1. Starting from current designs such as Shutter and SUAVE, security assumptions and out-of-protocol infrastructure can be stripped away with cryptography including homomorphic encryption, VDFs, and delay encryption. These approaches would trustlessly bring front running protection and censorship resistance to the protocol.", - "track": "Core Protocol", - "type": "Lightning Talk", + "id": "end-to-end-internet-games", + "sourceId": "EZ9T33", + "title": "End-to-end internet games", + "description": "For the past 1.5 years, I've been building fully onchain games–games where the entire state is onchain for some reason (have launched 7!). \r\n\r\nThere is lots of cryptographic data floating around the internet. New primitives are allowing all this data to be interoperable with each other... and even verifiable on-chain. \r\n\r\nI'll discuss some of this tech (tls notary, app attest, zkml, etc.) and discuss what new wild games we can build with them.", + "track": "Applied Cryptography", + "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "encryption", - "mempool", - "Censorship Resistance", - "Core Protocol", - "Cryptography" + "Gaming", + "Mechanism design", + "Mobile" ], "keywords": [ - "Encrypted", - "Mempool" + "ZK", + "Programmable cryptography", + "onchain games" ], - "duration": 565, + "duration": 1486, "language": "en", - "sources_swarmHash": "13fb566c3794a741fd8dff3d5d83fa04159a09104d886258558e6781631adaaa", - "sources_youtubeId": "mUoWwRoHrvk", + "sources_swarmHash": "46a058b4803524f956646260cd45e173c58e0e34f6aa603b9af7f661f00a18ff", + "sources_youtubeId": "4Cblt2gOIas", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673447d19dbb7a90e17a1d59", "eventId": "devcon-7", - "slot_start": 1731466500000, - "slot_end": 1731467100000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1lvMpzBomZ6dNVchh_7lRcXyFGQ2an1s7f3t0tDgzR2E", - "resources_slides": null, + "slot_start": 1731477600000, + "slot_end": 1731479400000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1SKERFupONxE6JOQvDC21CI1lz62VYcj5ZdZOGlXcWOg", + "resources_slides": "https://drive.google.com/file/d/1GepqHXoeuAw6stm_ph4M35l4Vu5Wtr9h/view", "speakers": [ - "marc-harvey-hill" + "small-brain" ] }, "vector": [ @@ -279239,16 +278442,13 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -279380,6 +278580,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -279536,7 +278737,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -279993,23 +279193,23 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -280097,6 +279297,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -280134,7 +279335,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -280286,8 +279486,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -280553,7 +279751,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -280566,47 +279763,32 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "end-to-end-internet-games", - "sourceId": "EZ9T33", - "title": "End-to-end internet games", - "description": "For the past 1.5 years, I've been building fully onchain games–games where the entire state is onchain for some reason (have launched 7!). \r\n\r\nThere is lots of cryptographic data floating around the internet. New primitives are allowing all this data to be interoperable with each other... and even verifiable on-chain. \r\n\r\nI'll discuss some of this tech (tls notary, app attest, zkml, etc.) and discuss what new wild games we can build with them.", - "track": "Applied Cryptography", - "type": "Talk", - "expertise": "Intermediate", + "id": "energy-renewal-sound-healing", + "sourceId": "7DEDKP", + "title": "Energy Renewal (Sound Healing)", + "description": "By master Ice \r\nThis session helps you rest deeply, reset your energy, and find inner peace.\r\n- Recharge and relax with gentle sounds of gongs and bowls\r\n- a short guided meditation. \r\n\r\nNov 14 10:30 -11:15", + "track": "Entertainment", + "type": "Mixed Formats", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Gaming", - "Mechanism design", - "Mobile" - ], - "keywords": [ - "ZK", - "Programmable cryptography", - "onchain games" - ], - "duration": 1486, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "46a058b4803524f956646260cd45e173c58e0e34f6aa603b9af7f661f00a18ff", - "sources_youtubeId": "4Cblt2gOIas", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673447d19dbb7a90e17a1d59", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731477600000, - "slot_end": 1731479400000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1SKERFupONxE6JOQvDC21CI1lz62VYcj5ZdZOGlXcWOg", - "resources_slides": null, - "speakers": [ - "small-brain" - ] + "slot_start": 1731555000000, + "slot_end": 1731557700000, + "slot_roomId": "decompression-room", + "resources_presentation": "https://docs.google.com/presentation/d/1FvG19MBxNr-yTjRDpb3Z4gWrJzfSxAeauH5sxykoiLg", + "resources_slides": "" }, "vector": [ 0, @@ -280618,7 +279800,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -280751,7 +279932,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -281367,7 +280547,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -281383,7 +280562,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -281471,7 +280649,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -281923,8 +281100,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 2, @@ -281945,32 +281122,45 @@ }, { "session": { - "id": "energy-renewal-sound-healing", - "sourceId": "7DEDKP", - "title": "Energy Renewal (Sound Healing)", - "description": "By master Ice \r\nThis session helps you rest deeply, reset your energy, and find inner peace.\r\n- Recharge and relax with gentle sounds of gongs and bowls\r\n- a short guided meditation. \r\n\r\nNov 14 10:30 -11:15", - "track": "Entertainment", - "type": "Mixed Formats", - "expertise": "", + "id": "enhancing-ethereum-p2p-network-security-through-fuzzing", + "sourceId": "7SR77E", + "title": "Enhancing Ethereum P2P Network Security through Fuzzing", + "description": "Security is a big deal for Ethereum's p2p network. We think fuzzing is a great way to make it more secure. We developed a time-series-based fuzz testing tool for the Ethereum network layer. In this tool, we integrated mutation mechanisms and seed selection algorithms, and introduced a new time-series feedback model. Using this tool, we can spot and fix existing vulnerabilities while also spotting new risks.", + "track": "Core Protocol", + "type": "Lightning Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "keywords": [ + "Fuzzing", + "p2p network" + ], + "tags": [ + "network", + "p2p" + ], "language": "en", - "speakers": [], + "sources_swarmHash": "318caf8e3c3d2cefe58bdf4b84464ca84773412519e1ec72bf190e1abd87608d", + "sources_youtubeId": "e7T7naJRWVg", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "tim-fan", + "sun-haochen", + "fudong-wu" + ], "eventId": "devcon-7", - "slot_start": 1731555000000, - "slot_end": 1731557700000, - "slot_roomId": "decompression-room", - "resources_presentation": "https://docs.google.com/presentation/d/1FvG19MBxNr-yTjRDpb3Z4gWrJzfSxAeauH5sxykoiLg" + "slot_start": 1731571200000, + "slot_end": 1731571800000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1B-0SsGH9Jbgo3njphxqoa7CInPi0Ftq_r5Ivuuvi8zg", + "resources_slides": "https://drive.google.com/file/d/1lGEiPRa3DHZxDl4uHSdhOHp-sgxkXeLJ/view" }, "vector": [ - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -282272,6 +281462,9 @@ 0, 0, 0, + 6, + 6, + 6, 0, 0, 0, @@ -282804,6 +281997,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -283016,6 +282210,54 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -283228,62 +282470,10 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, + 0, 2, 0, 0, @@ -283302,41 +282492,49 @@ }, { "session": { - "id": "enhancing-ethereum-p2p-network-security-through-fuzzing", - "sourceId": "7SR77E", - "title": "Enhancing Ethereum P2P Network Security through Fuzzing", - "description": "Security is a big deal for Ethereum's p2p network. We think fuzzing is a great way to make it more secure. We developed a time-series-based fuzz testing tool for the Ethereum network layer. In this tool, we integrated mutation mechanisms and seed selection algorithms, and introduced a new time-series feedback model. Using this tool, we can spot and fix existing vulnerabilities while also spotting new risks.", - "track": "Core Protocol", + "id": "ens-war-stories-securing-web3-from-web2-based-attacks", + "sourceId": "P9U9Q3", + "title": "ENS War Stories: Securing Web3 from Web2-Based Attacks", + "description": "Web3 is not an island. Every day, threat actors try to exploit web2 domains to target web3 entities. This talk recounts ENS' war stories / lessons of battling threats in the DNS, including:\r\n- Detecting early-stage attacks on web3 entities in the DNS\r\n- How we unraveled a campaign of over 2,500+ web2 domains targeting web3 and defi entities \r\n- Legal and technical remedies to combat web2-based threats (and their limitations)\r\n- Why the ecosystem must come together to share intel and resources", + "track": "Security", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Community", "featured": false, "doNotRecord": false, - "keywords": [ - "Fuzzing", - "p2p network" - ], "tags": [ - "network", - "p2p" + "Collective Intelligence", + "Security", + "Best Practices", + "user", + "safety", + "Best Practices", + "Collective Intelligence", + "Security" ], - "language": "en", - "speakers": [ - "tim-fan", - "sun-haochen", - "fudong-wu" + "keywords": [ + "threat actors", + "legal process", + "user safety" ], + "duration": 781, + "language": "en", + "sources_swarmHash": "ddf9a9beb6a6cc606ece80ac2cfa5b7ea1dc15ed7e74f90748997b8a953b8574", + "sources_youtubeId": "ht_Szqvtx8w", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731571200000, - "slot_end": 1731571800000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1B-0SsGH9Jbgo3njphxqoa7CInPi0Ftq_r5Ivuuvi8zg" + "slot_start": 1731389400000, + "slot_end": 1731390000000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1TPTt3DvIJCvQfAzoDGb3Ea32KlVjG7UCxB3UUz4JC4I", + "resources_slides": "https://drive.google.com/file/d/1hNnpW-0g_JQUwESvzh2lNn2a4KP9SkCq/view", + "speakers": [ + "alexander-urbelis" + ] }, "vector": [ - 0, - 0, - 0, - 0, 6, 0, 0, @@ -283635,17 +282833,13 @@ 0, 0, 0, - 6, - 6, - 6, - 0, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -284089,6 +283283,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -284119,8 +283314,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -284172,7 +283369,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -284386,10 +283582,11 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -284651,12 +283848,12 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -284669,50 +283866,48 @@ }, { "session": { - "id": "ens-war-stories-securing-web3-from-web2-based-attacks", - "sourceId": "P9U9Q3", - "title": "ENS War Stories: Securing Web3 from Web2-Based Attacks", - "description": "Web3 is not an island. Every day, threat actors try to exploit web2 domains to target web3 entities. This talk recounts ENS' war stories / lessons of battling threats in the DNS, including:\r\n- Detecting early-stage attacks on web3 entities in the DNS\r\n- How we unraveled a campaign of over 2,500+ web2 domains targeting web3 and defi entities \r\n- Legal and technical remedies to combat web2-based threats (and their limitations)\r\n- Why the ecosystem must come together to share intel and resources", - "track": "Security", + "id": "ensuring-data-availability-in-l2s", + "sourceId": "SCUHA7", + "title": "Ensuring Data Availability in L2s", + "description": "The talk explores the risks associated with data availability (DA) providers in L2s, highlighting the necessary security guarantees of DA layers. It covers economic security considerations, security properties of DA attestations, and fraud detection mechanisms against data withholding attacks. The goal is to guide L2 users in understanding the different risk profiles of DA providers and assist developers and researchers in enhancing the security and functionality of L2 solutions.", + "track": "Layer 2", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Collective Intelligence", + "Layer 2s", + "Data Availability", "Security", - "Best Practices", - "user", - "safety", - "Best Practices", - "Collective Intelligence", + "risk", + "Data Availability", + "Layer 2s", "Security" ], "keywords": [ - "threat actors", - "legal process", - "user safety" + "Risks" ], - "duration": 781, + "duration": 522, "language": "en", - "sources_swarmHash": "ddf9a9beb6a6cc606ece80ac2cfa5b7ea1dc15ed7e74f90748997b8a953b8574", - "sources_youtubeId": "ht_Szqvtx8w", + "sources_swarmHash": "75b7386cbae4776efde1f4dc6db350038877f82dff20402586a0f62b0ebab6d5", + "sources_youtubeId": "JroXK1iS0Xs", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "67370cf774749a4b898fcc26", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67370cf774749a4b898fcc26.vtt", + "transcript_text": " Hello, hi everyone. My name is Vincenzo. I work in research at L2Bit. Today the talk is about data availability risks and how to make DLIR secure, looking at it through the lens of the L2B-DA risk framework. So to start, data availability is crucial for L2", "eventId": "devcon-7", - "slot_start": 1731389400000, - "slot_end": 1731390000000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1TPTt3DvIJCvQfAzoDGb3Ea32KlVjG7UCxB3UUz4JC4I", - "resources_slides": null, + "slot_start": 1731654600000, + "slot_end": 1731655200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1fP4Av0dDJM1g4BBb6EB2lsaWqOTPmZK7RkZvBv_vs-w", + "resources_slides": "https://drive.google.com/file/d/1OJxALbqYXDi2Ejua-3Uiwj1wMRu6EO6q/view", "speakers": [ - "alexander-urbelis" + "vincenzo-furcillo" ] }, "vector": [ - 6, 0, 0, 0, @@ -284720,6 +283915,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -285460,9 +284656,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -285494,10 +284687,7 @@ 0, 0, 0, - 2, - 0, 0, - 2, 0, 0, 0, @@ -285532,6 +284722,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -285766,10 +284958,9 @@ 0, 0, 0, - 2, - 2, 0, 0, + 2, 0, 0, 0, @@ -286031,11 +285222,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -286048,45 +285239,46 @@ }, { "session": { - "id": "ensuring-data-availability-in-l2s", - "sourceId": "SCUHA7", - "title": "Ensuring Data Availability in L2s", - "description": "The talk explores the risks associated with data availability (DA) providers in L2s, highlighting the necessary security guarantees of DA layers. It covers economic security considerations, security properties of DA attestations, and fraud detection mechanisms against data withholding attacks. The goal is to guide L2 users in understanding the different risk profiles of DA providers and assist developers and researchers in enhancing the security and functionality of L2 solutions.", - "track": "Layer 2", - "type": "Lightning Talk", + "id": "ensuring-privacy-in-digital-identity-to-prevent-a-dystopian-crisis", + "sourceId": "TZQYGY", + "title": "Ensuring Privacy in Digital Identity to Prevent a Dystopian Crisis", + "description": "This talk will explore introducing a method for privacy-preserving proof of user uniqueness in contexts like elections using DIDs, ZK, and VCs for verifying credentials without revealing unique identifiers while ensuring compatibility with multiple trust sources. This enables self-sovereign digital identity, allowing selective disclosure of verified credentials while protecting personal data, supporting privacy-preserving KYC, sybil resistance, compliant access to financial services, and more.", + "track": "Cypherpunk & Privacy", + "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", - "Data Availability", + "Identity", + "Zero-Knowledge", "Security", - "risk", - "Data Availability", - "Layer 2s", - "Security" + "zk", + "proof", + "Identity", + "Security", + "Zero-Knowledge" ], "keywords": [ - "Risks" + "Zk", + "Proof" ], - "duration": 522, + "duration": 1426, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "27893580368e8396780c9f103b3d94703c4ab1700db60ed5816b3334de7aff27", + "sources_youtubeId": "6EJBsHydyVU", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67370cf774749a4b898fcc26", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67370cf774749a4b898fcc26.vtt", - "transcript_text": " Hello, hi everyone. My name is Vincenzo. I work in research at L2Bit. Today the talk is about data availability risks and how to make DLIR secure, looking at it through the lens of the L2B-DA risk framework. So to start, data availability is crucial for L2", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731654600000, - "slot_end": 1731655200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1fP4Av0dDJM1g4BBb6EB2lsaWqOTPmZK7RkZvBv_vs-w", - "resources_slides": null, + "slot_start": 1731402000000, + "slot_end": 1731403800000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1tnxzF5on5Su2ji2vPSGG1B6RHmxzdqDiuloznxu_PIg", + "resources_slides": "https://drive.google.com/file/d/1Koenif0k3CGI6ivvq0vU8SzVly_8gT4Z/view", "speakers": [ - "vincenzo-furcillo" + "jordi-baylina", + "oleksandr-brezhniev" ] }, "vector": [ @@ -286095,9 +285287,9 @@ 0, 0, 0, + 6, 0, 0, - 6, 0, 0, 0, @@ -286397,6 +285589,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -286837,6 +286030,14 @@ 0, 0, 0, + 6, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -286878,6 +286079,12 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -286907,8 +286114,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -287049,6 +286254,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -287106,6 +286315,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -287146,32 +286359,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -287408,7 +286595,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -287421,52 +286607,45 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "ensuring-privacy-in-digital-identity-to-prevent-a-dystopian-crisis", - "sourceId": "TZQYGY", - "title": "Ensuring Privacy in Digital Identity to Prevent a Dystopian Crisis", - "description": "This talk will explore introducing a method for privacy-preserving proof of user uniqueness in contexts like elections using DIDs, ZK, and VCs for verifying credentials without revealing unique identifiers while ensuring compatibility with multiple trust sources. This enables self-sovereign digital identity, allowing selective disclosure of verified credentials while protecting personal data, supporting privacy-preserving KYC, sybil resistance, compliant access to financial services, and more.", - "track": "Cypherpunk & Privacy", - "type": "Talk", + "id": "enter-the-war-room-a-black-swan-simulation", + "sourceId": "HQSNWQ", + "title": "Enter the War Room: A Black Swan Simulation", + "description": "BREAKING: A key Layer 2 sequencer has suffered a complete outage for a brief period! As a consequence, many loans from the protocol DevaLend could not be paid, leading to liquidations and bad debt.\r\n\r\nIn this workshop, you will assume the role of one of the key players in this exciting simulation, and explore how to navigate through it. Propose how to navigate through the DevaLend situation and react as new scenarios evolve and respond to your ideas. Good Luck!", + "track": "Coordination", + "type": "Workshop", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Community", "featured": false, - "doNotRecord": false, - "tags": [ - "Identity", - "Zero-Knowledge", - "Security", - "zk", - "proof", - "Identity", - "Security", - "Zero-Knowledge" - ], + "doNotRecord": true, "keywords": [ - "Zk", - "Proof" + "Conflict" + ], + "tags": [ + "Layer 2s", + "Governance", + "Emergency Plan", + "conflict", + "Emergency Plan", + "Governance", + "Layer 2s" ], - "duration": 1426, "language": "en", - "sources_swarmHash": "27893580368e8396780c9f103b3d94703c4ab1700db60ed5816b3334de7aff27", - "sources_youtubeId": "6EJBsHydyVU", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731402000000, - "slot_end": 1731403800000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1tnxzF5on5Su2ji2vPSGG1B6RHmxzdqDiuloznxu_PIg", - "resources_slides": null, "speakers": [ - "jordi-baylina", - "oleksandr-brezhniev" - ] + "juan-carlos-bell-llinas", + "oxytocin" + ], + "eventId": "devcon-7", + "slot_start": 1731393000000, + "slot_end": 1731400200000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1QJBCSyIk_2YgSpZlJQsuZo3ymwCCG1XhQXN6zxlSBoQ", + "resources_slides": "https://drive.google.com/file/d/1Z8PjyJpz88NAxOzQkenIcl3BcNBqqTb4/view" }, "vector": [ 0, @@ -287474,13 +286653,14 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -288220,11 +287400,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -288232,8 +287407,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -288269,7 +287442,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -288290,6 +287462,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -288319,6 +287492,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -288426,6 +287600,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -288444,7 +287619,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -288506,7 +287680,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -288528,6 +287701,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -288787,12 +287961,12 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -288805,38 +287979,41 @@ }, { "session": { - "id": "enter-the-war-room-a-black-swan-simulation", - "sourceId": "HQSNWQ", - "title": "Enter the War Room: A Black Swan Simulation", - "description": "BREAKING: A key Layer 2 sequencer has suffered a complete outage for a brief period! As a consequence, many loans from the protocol DevaLend could not be paid, leading to liquidations and bad debt.\r\n\r\nIn this workshop, you will assume the role of one of the key players in this exciting simulation, and explore how to navigate through it. Propose how to navigate through the DevaLend situation and react as new scenarios evolve and respond to your ideas. Good Luck!", - "track": "Coordination", - "type": "Workshop", - "expertise": "Intermediate", + "id": "epf-day-introduction", + "sourceId": "PE8JHU", + "title": "EPF Day Introduction", + "description": "Josh and Mario introduce the fifth cohort's EPF Day.", + "track": "[CLS] EPF Day", + "type": "Lightning Talk", + "expertise": "Beginner", "audience": "Community", "featured": false, - "doNotRecord": true, - "keywords": [ - "Conflict" - ], + "doNotRecord": false, "tags": [ - "Layer 2s", - "Governance", - "Emergency Plan", - "conflict", - "Emergency Plan", - "Governance", - "Layer 2s" + "EPF", + "Ethereum Roadmap", + "Layer 1" ], + "keywords": [], + "duration": 567, "language": "en", - "speakers": [ - "juan-carlos-bell-llinas", - "oxytocin" - ], + "sources_swarmHash": "07c8d90edd36f96c7e993e961857bd845f20407627a717428d0862e3d0594b15", + "sources_youtubeId": "_2fNWYoBrT8", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673887c51b0f83434dcfb118", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673887c51b0f83434dcfb118.vtt", + "transcript_text": " All right. Welcome to EPF Day. Thanks for being here. My name is Josh. This is Mario. We coordinate the Ethereum protocol fellowship with the Ethereum foundation. And this is EPF day where the fellows from the fifth cohort will be giving you lots and lots of demonstrations about what they have been working on over the past five months. Though quickly, I want to talk a little bit about what the Ethereum Protocol Fellowship is. This is a program that's been running for about three years now. It's been going through five different cohorts, starting with what was called the core developer apprentice program started by Piper Merriam and then Mario and I have been running this iteration of the program for the past three cohorts. The protocol fellowship is a program that is meant to steward the protocol through the bringing in of more and more developers. So according to the latest protocol guild statistics, there are about 180 developers that are working on the core protocol, all the things from specification to working on EL clients, CL clients, doing testing, and also doing lots of the research that is necessary for future upgrades and forks so it's a lot of stuff that needs to get done and not that many people to do it as Tim Baco likes to say and I think others I'm sure you've seen this roadmap before that each one of these tiny little boxes could use an entire team to focus on just that box. So we are here to try to fill in all of those boxes. The EPF, the Protocol Fellowship, has sort of grown a lot over the past couple of years and we've been adding new and new features to it to enhance the experience and to make it easier for people who are interested in doing this work to do so. So it started with the EPF. Again, this is the fifth cohort. At the beginning of this year we started the Ethereum protocol study group and that was a three month, two month long program that offers some more basic introduction and overview to the different pieces of the protocol so those people that are interested and maybe have some developer experience but don't really have much knowledge of Ethereum itself can get introduced to it and learn about the different pieces and parts and understand what it is that they might want to focus on. In addition to that, we've created a resource, epf.wiki, which is the intention is for it to be the go-to place for people who are interested in learning about the protocol to do so answering questions and and helping them to understand more about it it is a growing and collaborative resource so it is in it's still it's sort of fledgling form and will you welcome any further contributions to it if you are learning about the protocol or already are very knowledgeable about it to add your two cents to it. And finally, like Piper likes to say, the door to core development is hilariously wide open. You just have to step through. The EPF open. You just have to step through. The EPF is a way for you to step through with somebody holding your hand a little bit. I'll go ahead and pass it to Mario. Yeah, thank you so much. And let me tell you a bit about the current cohort. So as Josh mentioned, we started this cohort. I feel like this cohort started even before it started because we did a study group first. And it was a very special experience. We started with that this time for the first time, and there have been a bunch of people who came to learn about the protocol basically from zero and then joined the cohort. And in the last six months or during this year, we started the study group, like, February, March. So, like, during this year we started the study group like February, March. So like during this year people who didn't have any idea about the core development became like almost full-time contributors in some cases. So yeah, it's been a wild ride. We had around 45 follows. Just some context to this number because we started, when we started the cohort, it's permissionless, anybody can join. And in these first calls, we had like over 50 people, maybe like 60 people who were interested in the fellowship, but then at least 45 of them submitted multiple updates throughout multiple weeks during like two months or something. So 45 of them at least made it to the second month. But to the very end, till today, we have 30 fellows who made it to the EPF day there are a few of them who couldn't make it of course because of the travel restrictions and so on but we had Anyway, okay, put it further, okay. Five months of the, ah, the echo. Five months of the actual fellowship, which was the first sub-learning part, first month to figure out what projects these people want to even work on. And for many, it was easier thanks to the study group. We had two calls every week. So all together we had over 40 calls together over the past five months. We met each other every week for the office hours, for the stand-up. So a ton of calls. But also, we met in person. First time we met in person in Brussels, we had a chance to meet with maybe half of the fellows, but almost everyone is here today at DEF CON. So it's really an honor to actually meet you finally all after seeing each other at these calls. Yeah, so 30 people made it to the end with 30 projects. It's not one project per person. There were teams of people Working on a bigger scope project together, which is also Amazing to see that people learn to collaborate together. And we will have presentation for 20 projects today. 30 were proposed. Yeah. And altogether we had like over 500 comments, over 500 updates in the repository. So when you open the development updates in the Core 5 repository with all of the tracking for every fellow, it's over 500 of them. So I guess I read all of them, but it's not really possible to do it by yourself. But there's been a lot of work done. And it's hard to summarize what individual fellow did. I know that Rahul summarized it, so I kind of borrowed his data here just to give you an idea what is the output of one fellow who dedicated 1,000 hours to the fellowship. And how many coffees? 228 cups of coffee, yeah. That's interesting metrics that I would like to also use for measuring DPF. But, yeah, thank you so much for the numbers, Rahul. So this is 26 updates just from one person. So you see, like, this is what the single person is able to dedicate to the fellowship over five months. And so with this, today, again, it's an honor to host the project presentations to see your recap of what you've been working on in past months. And there is 20 of them. There is a lot of them. So we have a ton of topics to go over. I'm not going to even mention each of them because you will see them. This is the order, and we are starting with EVM memory repricing. So maybe let's get slowly ready to that. And, yeah, we will have first half of the presentations in the morning with a pause at 1 p.m., 1.15, because we have quarter past 1. We will have 45 minutes for lunch, and then we meet here again at 2 p.m. for the rest of the presentations and for the panel. Two, we'll have a panel discussion as well. So that's the schedule for today. Please enjoy the EPF day. And yeah, I hope you get inspired by the fellows, by their work. I hope you are able to meet some interesting people here.", "eventId": "devcon-7", - "slot_start": 1731393000000, - "slot_end": 1731400200000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1QJBCSyIk_2YgSpZlJQsuZo3ymwCCG1XhQXN6zxlSBoQ" + "slot_start": 1731467700000, + "slot_end": 1731468600000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1UgPaeQAkzm7-SuT2jdxRMLHcVJEzy3CxxHN_BL0ftCg", + "resources_slides": "https://drive.google.com/file/d/1Z9q2gfwmP7f9R9oGZnirNgq_xScRnNAu/view", + "speakers": [ + "mario-havel", + "josh-davis" + ] }, "vector": [ 0, @@ -288850,11 +288027,12 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -289601,6 +288779,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -289656,10 +288835,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -289686,7 +288861,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -289782,6 +288956,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -289794,7 +288969,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -290160,8 +289334,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -290175,40 +289347,36 @@ }, { "session": { - "id": "epf-day-introduction", - "sourceId": "PE8JHU", - "title": "EPF Day Introduction", - "description": "Josh and Mario introduce the fifth cohort's EPF Day.", + "id": "epf-day-panel", + "sourceId": "ZMRJ9B", + "title": "EPF Day Panel", + "description": "Panel with former fellows who became core devs and mentors", "track": "[CLS] EPF Day", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Community", + "type": "Panel", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "EPF", - "Ethereum Roadmap", - "Layer 1" - ], + "tags": [], "keywords": [], - "duration": 567, + "duration": 2235, "language": "en", - "sources_swarmHash": "07c8d90edd36f96c7e993e961857bd845f20407627a717428d0862e3d0594b15", - "sources_youtubeId": "_2fNWYoBrT8", + "sources_swarmHash": "072589e67adaa227348834bef25064203fc523216871a6bae08780ba110064ef", + "sources_youtubeId": "BT1mIVNNOts", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673887c51b0f83434dcfb118", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673887c51b0f83434dcfb118.vtt", - "transcript_text": " All right. Welcome to EPF Day. Thanks for being here. My name is Josh. This is Mario. We coordinate the Ethereum protocol fellowship with the Ethereum foundation. And this is EPF day where the fellows from the fifth cohort will be giving you lots and lots of demonstrations about what they have been working on over the past five months. Though quickly, I want to talk a little bit about what the Ethereum Protocol Fellowship is. This is a program that's been running for about three years now. It's been going through five different cohorts, starting with what was called the core developer apprentice program started by Piper Merriam and then Mario and I have been running this iteration of the program for the past three cohorts. The protocol fellowship is a program that is meant to steward the protocol through the bringing in of more and more developers. So according to the latest protocol guild statistics, there are about 180 developers that are working on the core protocol, all the things from specification to working on EL clients, CL clients, doing testing, and also doing lots of the research that is necessary for future upgrades and forks so it's a lot of stuff that needs to get done and not that many people to do it as Tim Baco likes to say and I think others I'm sure you've seen this roadmap before that each one of these tiny little boxes could use an entire team to focus on just that box. So we are here to try to fill in all of those boxes. The EPF, the Protocol Fellowship, has sort of grown a lot over the past couple of years and we've been adding new and new features to it to enhance the experience and to make it easier for people who are interested in doing this work to do so. So it started with the EPF. Again, this is the fifth cohort. At the beginning of this year we started the Ethereum protocol study group and that was a three month, two month long program that offers some more basic introduction and overview to the different pieces of the protocol so those people that are interested and maybe have some developer experience but don't really have much knowledge of Ethereum itself can get introduced to it and learn about the different pieces and parts and understand what it is that they might want to focus on. In addition to that, we've created a resource, epf.wiki, which is the intention is for it to be the go-to place for people who are interested in learning about the protocol to do so answering questions and and helping them to understand more about it it is a growing and collaborative resource so it is in it's still it's sort of fledgling form and will you welcome any further contributions to it if you are learning about the protocol or already are very knowledgeable about it to add your two cents to it. And finally, like Piper likes to say, the door to core development is hilariously wide open. You just have to step through. The EPF open. You just have to step through. The EPF is a way for you to step through with somebody holding your hand a little bit. I'll go ahead and pass it to Mario. Yeah, thank you so much. And let me tell you a bit about the current cohort. So as Josh mentioned, we started this cohort. I feel like this cohort started even before it started because we did a study group first. And it was a very special experience. We started with that this time for the first time, and there have been a bunch of people who came to learn about the protocol basically from zero and then joined the cohort. And in the last six months or during this year, we started the study group, like, February, March. So, like, during this year we started the study group like February, March. So like during this year people who didn't have any idea about the core development became like almost full-time contributors in some cases. So yeah, it's been a wild ride. We had around 45 follows. Just some context to this number because we started, when we started the cohort, it's permissionless, anybody can join. And in these first calls, we had like over 50 people, maybe like 60 people who were interested in the fellowship, but then at least 45 of them submitted multiple updates throughout multiple weeks during like two months or something. So 45 of them at least made it to the second month. But to the very end, till today, we have 30 fellows who made it to the EPF day there are a few of them who couldn't make it of course because of the travel restrictions and so on but we had Anyway, okay, put it further, okay. Five months of the, ah, the echo. Five months of the actual fellowship, which was the first sub-learning part, first month to figure out what projects these people want to even work on. And for many, it was easier thanks to the study group. We had two calls every week. So all together we had over 40 calls together over the past five months. We met each other every week for the office hours, for the stand-up. So a ton of calls. But also, we met in person. First time we met in person in Brussels, we had a chance to meet with maybe half of the fellows, but almost everyone is here today at DEF CON. So it's really an honor to actually meet you finally all after seeing each other at these calls. Yeah, so 30 people made it to the end with 30 projects. It's not one project per person. There were teams of people Working on a bigger scope project together, which is also Amazing to see that people learn to collaborate together. And we will have presentation for 20 projects today. 30 were proposed. Yeah. And altogether we had like over 500 comments, over 500 updates in the repository. So when you open the development updates in the Core 5 repository with all of the tracking for every fellow, it's over 500 of them. So I guess I read all of them, but it's not really possible to do it by yourself. But there's been a lot of work done. And it's hard to summarize what individual fellow did. I know that Rahul summarized it, so I kind of borrowed his data here just to give you an idea what is the output of one fellow who dedicated 1,000 hours to the fellowship. And how many coffees? 228 cups of coffee, yeah. That's interesting metrics that I would like to also use for measuring DPF. But, yeah, thank you so much for the numbers, Rahul. So this is 26 updates just from one person. So you see, like, this is what the single person is able to dedicate to the fellowship over five months. And so with this, today, again, it's an honor to host the project presentations to see your recap of what you've been working on in past months. And there is 20 of them. There is a lot of them. So we have a ton of topics to go over. I'm not going to even mention each of them because you will see them. This is the order, and we are starting with EVM memory repricing. So maybe let's get slowly ready to that. And, yeah, we will have first half of the presentations in the morning with a pause at 1 p.m., 1.15, because we have quarter past 1. We will have 45 minutes for lunch, and then we meet here again at 2 p.m. for the rest of the presentations and for the panel. Two, we'll have a panel discussion as well. So that's the schedule for today. Please enjoy the EPF day. And yeah, I hope you get inspired by the fellows, by their work. I hope you are able to meet some interesting people here.", + "sources_streamethId": "67347fbe9dbb7a90e1b8c58d", "eventId": "devcon-7", - "slot_start": 1731467700000, - "slot_end": 1731468600000, + "slot_start": 1731489300000, + "slot_end": 1731492000000, "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1UgPaeQAkzm7-SuT2jdxRMLHcVJEzy3CxxHN_BL0ftCg", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1zfYthY0BXd-oH251a-aAijUCaZpAylXrBQ0_5terdHk", + "resources_slides": "https://drive.google.com/file/d/19AAp3P2RFPU44UpizTvD0eu-QrHTb_nX/view", "speakers": [ - "josh-davis", - "mario-havel" + "mario-havel", + "eniko-nagy", + "echo", + "eitan" ] }, "vector": [ @@ -290522,9 +289690,52 @@ 0, 0, 0, + 6, 0, 6, 6, + 6, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -290978,7 +290189,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -291155,7 +290365,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -291270,51 +290479,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -291525,6 +290689,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -291535,8 +290700,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -291548,36 +290711,39 @@ }, { "session": { - "id": "epf-day-panel", - "sourceId": "ZMRJ9B", - "title": "EPF Day Panel", - "description": "Panel with former fellows who became core devs and mentors", + "id": "epf-nethermindil-evm", + "sourceId": "QJNNDL", + "title": "EPF - Nethermind/IL-EVM", + "description": "This talk will discuss my EPF work on Nethermind's IL-EVM project, which included developing tools to analyze EVM execution patterns, writing (optimised) opcode and top pattern implementations, and conducting and writing tests.", "track": "[CLS] EPF Day", - "type": "Panel", + "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 2235, + "tags": [ + "Core", + "Protocol" + ], + "keywords": [ + "EVM", + "Optimization" + ], + "duration": 699, "language": "en", - "sources_swarmHash": "072589e67adaa227348834bef25064203fc523216871a6bae08780ba110064ef", - "sources_youtubeId": "BT1mIVNNOts", + "sources_swarmHash": "ee30c44852dd78c6f9f71de6a552443c01b1cd6fa673737ad972191ff03e328d", + "sources_youtubeId": "9UhpqUzsEJE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67347fbe9dbb7a90e1b8c58d", + "sources_streamethId": "673428639dbb7a90e1a39ee7", "eventId": "devcon-7", - "slot_start": 1731489300000, - "slot_end": 1731492000000, + "slot_start": 1731470400000, + "slot_end": 1731471300000, "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1zfYthY0BXd-oH251a-aAijUCaZpAylXrBQ0_5terdHk", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/13ze8Pr4OxtxoFIIxGDV0SAGLb_aIJtknEOqxz5Ct5lA", + "resources_slides": "https://drive.google.com/file/d/1d3_vY9k2PtKtEZyiEcGee-e-k9HwAidr/view", "speakers": [ - "mario-havel", - "eniko-nagy", - "echo", - "eitan" + "siddharth-vaderaa" ] }, "vector": [ @@ -291893,17 +291059,10 @@ 0, 0, 0, - 6, - 6, - 6, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, + 6, 0, 0, 0, @@ -292643,6 +291802,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -292917,39 +292078,38 @@ }, { "session": { - "id": "epf-nethermindil-evm", - "sourceId": "QJNNDL", - "title": "EPF - Nethermind/IL-EVM", - "description": "This talk will discuss my EPF work on Nethermind's IL-EVM project, which included developing tools to analyze EVM execution patterns, writing (optimised) opcode and top pattern implementations, and conducting and writing tests.", - "track": "[CLS] EPF Day", - "type": "Lightning Talk", + "id": "erc-3668-on-linea-built-in-trust-minimized-l2-to-l1-data-retrieval", + "sourceId": "FARJAG", + "title": "ERC-3668 on Linea: built-in, trust-minimized L2 to L1 data retrieval", + "description": "ERC-3668 (aka. CCIP-read) enable L1 contracts to access Linea state. No special library need to be integrated, everything is built into the protocol and secured by Linea's zero-knowledge proofs. During this presentation, we will go into the details of how this works, the benefits and use cases you can start building today.", + "track": "Layer 2", + "type": "Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Core", - "Protocol" + "Layer 2s", + "Zero-Knowledge" ], "keywords": [ - "EVM", - "Optimization" + "Cross-chain" ], - "duration": 699, + "duration": 945, "language": "en", - "sources_swarmHash": "ee30c44852dd78c6f9f71de6a552443c01b1cd6fa673737ad972191ff03e328d", - "sources_youtubeId": "9UhpqUzsEJE", + "sources_swarmHash": "c2f71910dba4040f55e11efe7077f7d2fe0251f2e870654772778d56bdc18b30", + "sources_youtubeId": "7Ov7JcxHE-s", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673428639dbb7a90e1a39ee7", + "sources_streamethId": "6734478f9dbb7a90e1759be2", "eventId": "devcon-7", - "slot_start": 1731470400000, - "slot_end": 1731471300000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/13ze8Pr4OxtxoFIIxGDV0SAGLb_aIJtknEOqxz5Ct5lA", - "resources_slides": null, + "slot_start": 1731475800000, + "slot_end": 1731477600000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1caoeThC6_UrDRFE2PQIcbIczFePi9G_E72Ud7YuJejc", + "resources_slides": "https://drive.google.com/file/d/13gD9b7yT2mqWHnNDJlt_p5QNVTKsX-mP/view", "speakers": [ - "siddharth-vaderaa" + "julien" ] }, "vector": [ @@ -292960,6 +292120,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -292968,11 +292129,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -293195,6 +292351,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -293269,7 +292426,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -293717,6 +292873,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -293770,6 +292927,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -294012,9 +293170,6 @@ 0, 0, 0, - 2, - 2, - 0, 0, 0, 0, @@ -294289,92 +293444,47 @@ }, { "session": { - "id": "erc-3668-on-linea-built-in-trust-minimized-l2-to-l1-data-retrieval", - "sourceId": "FARJAG", - "title": "ERC-3668 on Linea: built-in, trust-minimized L2 to L1 data retrieval", - "description": "ERC-3668 (aka. CCIP-read) enable L1 contracts to access Linea state. No special library need to be integrated, everything is built into the protocol and secured by Linea's zero-knowledge proofs. During this presentation, we will go into the details of how this works, the benefits and use cases you can start building today.", - "track": "Layer 2", + "id": "erc-4337-adoption-analysis", + "sourceId": "SGRFUA", + "title": "ERC-4337: Adoption Analysis", + "description": "Since the EntryPoint contract was deployed, millions of smart accounts have been created and UserOps submitted, via hundreds of exciting projects in the space. Join us as we look at the interesting trends onchain and the unique challenges and exciting opportunities faced by teams building in the space", + "track": "Usability", "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", - "featured": false, + "expertise": "Beginner", + "audience": "Product", + "featured": true, "doNotRecord": false, - "tags": [ - "Layer 2s", - "Zero-Knowledge" - ], "keywords": [ - "Cross-chain" + "ERC-4337" + ], + "tags": [ + "DevRel", + "Use Cases", + "Account Abstraction", + "erc-4337", + "Account Abstraction", + "DevRel", + "Use Cases" ], - "duration": 945, "language": "en", - "sources_swarmHash": "c2f71910dba4040f55e11efe7077f7d2fe0251f2e870654772778d56bdc18b30", - "sources_youtubeId": "7Ov7JcxHE-s", + "sources_swarmHash": "7c4218b2473a3775a377c88b4fb4f3da5763b000ac0bd90e68fe39a47d330b93", + "sources_youtubeId": "qgrkOarhBzo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734478f9dbb7a90e1759be2", - "eventId": "devcon-7", - "slot_start": 1731475800000, - "slot_end": 1731477600000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1caoeThC6_UrDRFE2PQIcbIczFePi9G_E72Ud7YuJejc", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "julien" - ] + "tom-teman" + ], + "eventId": "devcon-7", + "slot_start": 1731564000000, + "slot_end": 1731565800000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/17M-nImCJUoQMma2tumjGUf2IgWOapEl76FIQWV-y4XA", + "resources_slides": "https://drive.google.com/file/d/1WsX1h6HdMVSXQbGAL1-KLUHVwhbzRJWE/view" }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -294383,6 +293493,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -295087,11 +294198,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -295141,8 +294247,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -295180,6 +294284,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -295207,6 +294312,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -295290,6 +294396,55 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -295638,7 +294793,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -295648,6 +294802,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -295660,43 +294816,49 @@ }, { "session": { - "id": "erc-4337-adoption-analysis", - "sourceId": "SGRFUA", - "title": "ERC-4337: Adoption Analysis", - "description": "Since the EntryPoint contract was deployed, millions of smart accounts have been created and UserOps submitted, via hundreds of exciting projects in the space. Join us as we look at the interesting trends onchain and the unique challenges and exciting opportunities faced by teams building in the space", - "track": "Usability", + "id": "erigon-3-a-new-paradigm-for-ethereum-clients", + "sourceId": "CWZK8G", + "title": "Erigon 3 a New Paradigm for Ethereum Clients", + "description": "Erigon 3 represents a step change for Ethereum clients:\r\n\r\n* Modular client combining EL & CL\r\n* Transaction Centric\r\n* Deterministic storage model built to optimize EVM based chains\r\n* Performs on commodity drives\r\n* Sync model uses verifiable data replication and minimal re-execution\r\n* Acts as block consumer and producer, RPC, or indexer\r\n* Splits chain dissemination from chain distribution\r\n\r\nThis talk outlines the key features of Erigon 3 and explains how it will change Ethereum client landscape.", + "track": "Core Protocol", "type": "Talk", - "expertise": "Beginner", + "expertise": "Intermediate", "audience": "Product", - "featured": true, + "featured": false, "doNotRecord": false, "keywords": [ - "ERC-4337" + "efficiency", + "client", + "modular" ], "tags": [ - "DevRel", - "Use Cases", - "Account Abstraction", - "erc-4337", - "Account Abstraction", - "DevRel", - "Use Cases" + "Architecture", + "Data Availability", + "Scalability", + "modular", + "Architecture", + "Data Availability", + "Scalability" ], "language": "en", + "sources_swarmHash": "e50220090933449af7ced2212fd9fade4e5054c7a674ee3f7ee568402c27bf34", + "sources_youtubeId": "sMPe1Ae99aA", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "tom-teman" + "mark-holt" ], "eventId": "devcon-7", - "slot_start": 1731564000000, - "slot_end": 1731565800000, + "slot_start": 1731481200000, + "slot_end": 1731483000000, "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/17M-nImCJUoQMma2tumjGUf2IgWOapEl76FIQWV-y4XA" + "resources_presentation": "https://docs.google.com/presentation/d/1AXdOVnj0u1_i9ZgFD0ao2vPGkVGyZU_aLbplEgtr5iY", + "resources_slides": "https://drive.google.com/file/d/1opEm2S90hqk37kx_NonDgDc7o5Szc6Ew/view" }, "vector": [ - 0, - 0, - 0, - 0, 0, 0, 0, @@ -295883,8 +295045,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -296015,6 +295175,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -296495,7 +295656,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -296509,10 +295669,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -296523,7 +295685,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -296586,6 +295747,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -296607,7 +295769,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -296618,7 +295779,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -296756,6 +295916,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -297007,9 +296168,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -297029,39 +296190,46 @@ }, { "session": { - "id": "erigon-3-a-new-paradigm-for-ethereum-clients", - "sourceId": "CWZK8G", - "title": "Erigon 3 a New Paradigm for Ethereum Clients", - "description": "Erigon 3 represents a step change for Ethereum clients:\r\n\r\n* Modular client combining EL & CL\r\n* Transaction Centric\r\n* Deterministic storage model built to optimize EVM based chains\r\n* Performs on commodity drives\r\n* Sync model uses verifiable data replication and minimal re-execution\r\n* Acts as block consumer and producer, RPC, or indexer\r\n* Splits chain dissemination from chain distribution\r\n\r\nThis talk outlines the key features of Erigon 3 and explains how it will change Ethereum client landscape.", + "id": "eth-a-roadmap-to-real-decentralization-in-a-world-of-centralized-power", + "sourceId": "C3HTZP", + "title": "ETH++: A roadmap to (real) decentralization in a world of centralized power", + "description": "Unfortunately, trends in block building and MEV furnish rapid centralization pressures that erode the protocol guarantees we gather here to build for Ethereum. We must now define a roadmap to save proof-of-stake. This requires help from builders, transaction originators, protocol designers, and you. We will demistify the hype on how and if trusted hardware (TEEs) can help us decentralize. Let's focus on geographical diversity and permissionless designs, to bring the world together.", "track": "Core Protocol", "type": "Talk", "expertise": "Intermediate", - "audience": "Product", + "audience": "Community", "featured": false, "doNotRecord": false, - "keywords": [ - "efficiency", - "client", - "modular" - ], "tags": [ - "Architecture", - "Data Availability", - "Scalability", - "modular", - "Architecture", - "Data Availability", - "Scalability" + "Protocol Design", + "Censorship Resistance", + "Decentralization", + "MEV", + "Censorship Resistance", + "MEV", + "Protocol Design" ], - "language": "en", - "speakers": [ - "mark-holt" + "keywords": [ + "TEE", + "hardware", + "decentralization" ], + "duration": 1502, + "language": "en", + "sources_swarmHash": "f5b073402029914ebdac73cc6b507d7de61254e303ae0954496daf4736572e11", + "sources_youtubeId": "ncAbQBVPV2Q", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67332a173a168eb53572b8a2", "eventId": "devcon-7", - "slot_start": 1731481200000, - "slot_end": 1731483000000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1AXdOVnj0u1_i9ZgFD0ao2vPGkVGyZU_aLbplEgtr5iY" + "slot_start": 1731403800000, + "slot_end": 1731405600000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1bcWYCRlknrhBAHOizptWAGujiHHrSU_sAh9xh-oi1js", + "resources_slides": "https://drive.google.com/file/d/11AmauLfDF5czSctk2D28zIEfUAv972CA/view", + "speakers": [ + "philip-daian" + ] }, "vector": [ 0, @@ -297814,6 +296982,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -297857,6 +297026,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -297877,13 +297047,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 2, - 0, 0, 0, 0, @@ -297919,6 +297082,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -297955,7 +297119,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -297965,6 +297128,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -298125,7 +297289,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -298386,8 +297549,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -298400,45 +297563,50 @@ }, { "session": { - "id": "eth-a-roadmap-to-real-decentralization-in-a-world-of-centralized-power", - "sourceId": "C3HTZP", - "title": "ETH++: A roadmap to (real) decentralization in a world of centralized power", - "description": "Unfortunately, trends in block building and MEV furnish rapid centralization pressures that erode the protocol guarantees we gather here to build for Ethereum. We must now define a roadmap to save proof-of-stake. This requires help from builders, transaction originators, protocol designers, and you. We will demistify the hype on how and if trusted hardware (TEEs) can help us decentralize. Let's focus on geographical diversity and permissionless designs, to bring the world together.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Intermediate", + "id": "eth-arauca-emersons-legacy-and-the-hope-for-change-in-vulnerable-communities-through-ethereum", + "sourceId": "TA3N8E", + "title": "ETH Arauca: Emerson's Legacy and the Hope for Change in Vulnerable Communities Through Ethereum", + "description": "In this talk, we will explore the moving case of ETH Arauca and the brave young activist Emerson, who led the ETH Colombia node and whose life was tragically taken in the exercise of his mission. We will analyze how Ethereum, through its vision of decentralized finance, can act as an engine of transformation in vulnerable communities with conflict contexts. This talk seeks to give visibility to Emerson's legacy, ETH leaders challenges, and highlight the potential of Ethereum to drive real change", + "track": "Real World Ethereum", + "type": "Lightning Talk", + "expertise": "Beginner", "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Protocol Design", - "Censorship Resistance", "Decentralization", - "MEV", - "Censorship Resistance", - "MEV", - "Protocol Design" + "Local Impact", + "Social Recovery", + "ethereum", + "good", + "Decentralization", + "Local Impact", + "Social Recovery" ], "keywords": [ - "TEE", - "hardware", - "decentralization" + "Ethereum", + "for", + "Good" ], - "duration": 1502, + "duration": 670, "language": "en", - "sources_swarmHash": "f5b073402029914ebdac73cc6b507d7de61254e303ae0954496daf4736572e11", - "sources_youtubeId": "ncAbQBVPV2Q", + "sources_swarmHash": "69fd2ee0d8dea44deb53d744d9f578f2dad9895e80f75843eb123fadb4476ff5", + "sources_youtubeId": "ocnOAWArb0w", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67332a173a168eb53572b8a2", + "sources_streamethId": "67374cba1b0f83434dd72b8e", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731403800000, - "slot_end": 1731405600000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1bcWYCRlknrhBAHOizptWAGujiHHrSU_sAh9xh-oi1js", - "resources_slides": null, + "slot_start": 1731660600000, + "slot_end": 1731661200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1nM9AZTRUu_izRLyWBvXZg8c-yplG6h0ED_v5As56vgk", + "resources_slides": "https://drive.google.com/file/d/1VpWVq3wwDjdS7MYHLnNReJZgl-YvKPyE/view", "speakers": [ - "philip-daian" + "andres-forigua", + "william-martinez", + "mateo-sabogal" ] }, "vector": [ @@ -298446,9 +297614,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -298761,6 +297929,8 @@ 0, 0, 6, + 6, + 6, 0, 0, 0, @@ -299195,10 +298365,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -299239,7 +298405,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -299341,20 +298506,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -299375,6 +298526,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -299452,6 +298604,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -299515,6 +298668,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -299756,7 +298913,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -299772,57 +298928,48 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0 ] }, { "session": { - "id": "eth-arauca-emersons-legacy-and-the-hope-for-change-in-vulnerable-communities-through-ethereum", - "sourceId": "TA3N8E", - "title": "ETH Arauca: Emerson's Legacy and the Hope for Change in Vulnerable Communities Through Ethereum", - "description": "In this talk, we will explore the moving case of ETH Arauca and the brave young activist Emerson, who led the ETH Colombia node and whose life was tragically taken in the exercise of his mission. We will analyze how Ethereum, through its vision of decentralized finance, can act as an engine of transformation in vulnerable communities with conflict contexts. This talk seeks to give visibility to Emerson's legacy, ETH leaders challenges, and highlight the potential of Ethereum to drive real change", - "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "eth-escape-winner-revealed", + "sourceId": "WXS8BH", + "title": "ETH Escape Winner Revealed", + "description": "We'll announce the winner of ETH Escape.", + "track": "[CLS] ETH Escape - Speed Hacking Challenge", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Decentralization", - "Local Impact", - "Social Recovery", - "ethereum", - "good", - "Decentralization", - "Local Impact", - "Social Recovery" - ], - "keywords": [ - "Ethereum", - "for", - "Good" - ], - "duration": 670, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "69fd2ee0d8dea44deb53d744d9f578f2dad9895e80f75843eb123fadb4476ff5", - "sources_youtubeId": "ocnOAWArb0w", + "sources_swarmHash": "5304e1f0420f40e6a4f48b035a6c651c52dacb2925780a2ad61a0bddcbe95480", + "sources_youtubeId": "K4pfXsEwCHQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67374cba1b0f83434dd72b8e", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731660600000, - "slot_end": 1731661200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1nM9AZTRUu_izRLyWBvXZg8c-yplG6h0ED_v5As56vgk", - "resources_slides": null, "speakers": [ - "andres-forigua", - "mateo-sabogal", - "william-martinez" - ] + "michael-okeeffe" + ], + "eventId": "devcon-7", + "slot_start": 1731576600000, + "slot_end": 1731580200000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1lSwPhaKp0iIdGqbNHH0Wq_hG_vGPCW1ja_5qbLVLScg", + "resources_slides": "" }, "vector": [ 0, @@ -299831,7 +298978,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -299845,6 +298991,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -300144,11 +299291,9 @@ 0, 0, 0, - 6, - 6, - 6, 0, 0, + 6, 0, 0, 0, @@ -300678,7 +299823,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -300744,7 +299888,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -300822,7 +299965,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -300887,7 +300029,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -301144,11 +300285,12 @@ 2, 0, 0, + 2, + 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -301161,31 +300303,45 @@ }, { "session": { - "id": "eth-escape-winner-revealed", - "sourceId": "WXS8BH", - "title": "ETH Escape Winner Revealed", - "description": "We'll announce the winner of ETH Escape.", - "track": "[CLS] ETH Escape - Speed Hacking Challenge", - "type": "Music", - "expertise": "", - "audience": "Engineering", + "id": "eth-is-permissionless-money", + "sourceId": "TMFPCF", + "title": "ETH is permissionless money", + "description": "ETH is money! In this talk, we will explore the role of Ethereum's native asset on the base chain, in the L2 ecosystems, and in crypto broadly. We discuss the ETH supply, what it means to be permissionless money, how ETH is being used today, and how it's role can evolve.", + "track": "Cryptoeconomics", + "type": "Talk", + "expertise": "Beginner", + "audience": "Research", "featured": false, "doNotRecord": false, + "tags": [ + "Censorship Resistance", + "Decentralization", + "Ethereum Roadmap" + ], "keywords": [], - "tags": [], + "duration": 1525, "language": "en", - "speakers": [ - "michael-okeeffe" - ], + "sources_swarmHash": "16a407d31169ede407864499ca35faa00582fd0bcbd19ef8a43d06a74ecda142", + "sources_youtubeId": "qdCUbOvukDE", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6736c6df9dbb7a90e1d18b82", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731576600000, - "slot_end": 1731580200000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1lSwPhaKp0iIdGqbNHH0Wq_hG_vGPCW1ja_5qbLVLScg" + "slot_start": 1731569400000, + "slot_end": 1731571200000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1BKehfujLaDakbU2-PjgsWO9PzcaHlv5FlzNG5PlH6zY", + "resources_slides": "https://drive.google.com/file/d/1BSVjqKLt4f5XPk1vMV3yyWS-cZGay4Q5/view", + "speakers": [ + "mike-neuder" + ] }, "vector": [ 0, 0, + 6, 0, 0, 0, @@ -301203,7 +300359,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -302034,6 +301189,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -302079,6 +301235,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -302122,13 +301279,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -302514,55 +301665,61 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "eth-is-permissionless-money", - "sourceId": "TMFPCF", - "title": "ETH is permissionless money", - "description": "ETH is money! In this talk, we will explore the role of Ethereum's native asset on the base chain, in the L2 ecosystems, and in crypto broadly. We discuss the ETH supply, what it means to be permissionless money, how ETH is being used today, and how it's role can evolve.", - "track": "Cryptoeconomics", + "id": "ethereum-a-force-of-good", + "sourceId": "HUZP7J", + "title": "Ethereum a Force of Good", + "description": "Ethereum as a Force for Good\r\nWhat does it mean for Ethereum to be a force of good? How can real-world applications of Ethereum such as RWA, DeFi, and Web3 social right current inequities in the world? What are key blockers that we need to overcome to bring Ethereum into the mainstream? In this talk, Stani will elaborate on how Ethereum is a positive force of change in the world.", + "track": "Real World Ethereum", "type": "Talk", "expertise": "Beginner", - "audience": "Research", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Censorship Resistance", - "Decentralization", - "Ethereum Roadmap" + "RWA", + "Ethereum for Good", + "Economics", + "micropayments", + "Economics", + "Ethereum for Good", + "RWA" ], - "keywords": [], - "duration": 1525, + "keywords": [ + "stablecoins", + "supply chain", + "agriculture", + "scalability" + ], + "duration": 1570, "language": "en", - "sources_swarmHash": "16a407d31169ede407864499ca35faa00582fd0bcbd19ef8a43d06a74ecda142", - "sources_youtubeId": "qdCUbOvukDE", + "sources_swarmHash": "fcbb11b0fbb59cfb6316106365ba8065682b5fe889588b22de59a4c48016395a", + "sources_youtubeId": "Z86QO0WgWtE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736c6df9dbb7a90e1d18b82", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6735cf379dbb7a90e1f9620f", "eventId": "devcon-7", - "slot_start": 1731569400000, - "slot_end": 1731571200000, + "slot_start": 1731576600000, + "slot_end": 1731578400000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1BKehfujLaDakbU2-PjgsWO9PzcaHlv5FlzNG5PlH6zY", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1zwoxKxRNSg1zW4w3I3Ad1I6aSDAtCo3sBRkenui4eQ4", + "resources_slides": "https://drive.google.com/file/d/1KvgGkHtyX3OF85QD41bM9nP87Jnyp8Pc/view", "speakers": [ - "mike-neuder" + "stani-kulechov" ] }, "vector": [ 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -303339,6 +302496,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -303409,13 +302567,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -303436,6 +302587,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -303455,7 +302607,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -303464,6 +302615,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -303499,7 +302651,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -303621,6 +302772,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -303875,10 +303027,10 @@ 2, 0, 0, - 2, 0, 0, 0, + 2, 0, 0, 0, @@ -303892,10 +303044,10 @@ }, { "session": { - "id": "ethereum-a-force-of-good", - "sourceId": "HUZP7J", - "title": "Ethereum a Force of Good", - "description": "Ethereum as a Force for Good\r\nWhat does it mean for Ethereum to be a force of good? How can real-world applications of Ethereum such as RWA, DeFi, and Web3 social right current inequities in the world? What are key blockers that we need to overcome to bring Ethereum into the mainstream? In this talk, Stani will elaborate on how Ethereum is a positive force of change in the world.", + "id": "ethereum-and-robots", + "sourceId": "9G9LSH", + "title": "Ethereum and Robots", + "description": "I will describe how Ethereum can be used in the emerging consumer robots industry (and generally for autonomous machines).\r\n* privacy preserving surveillance\r\n* autonomous transport\r\n* factory to consumer - tokenization models\r\n* Laws of Robotics - zk hardware", "track": "Real World Ethereum", "type": "Talk", "expertise": "Beginner", @@ -303903,35 +303055,36 @@ "featured": false, "doNotRecord": false, "tags": [ - "RWA", - "Ethereum for Good", - "Economics", - "micropayments", - "Economics", - "Ethereum for Good", - "RWA" + "Collective Intelligence", + "Civil Resistance", + "DePIN", + "Autonomous World", + "robots", + "Autonomous World", + "Civil Resistance", + "Collective Intelligence", + "DePIN" ], "keywords": [ - "stablecoins", - "supply chain", - "agriculture", - "scalability" + "Robots" ], - "duration": 1570, + "duration": 1531, "language": "en", - "sources_swarmHash": "fcbb11b0fbb59cfb6316106365ba8065682b5fe889588b22de59a4c48016395a", - "sources_youtubeId": "Z86QO0WgWtE", + "sources_swarmHash": "1f7a566e27b32382aa1713acd471469239bf022cfe7569f8241c430a8ebb9577", + "sources_youtubeId": "SQzTSNurehU", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735cf379dbb7a90e1f9620f", + "sources_streamethId": "6735c5c19dbb7a90e1266802", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735cae29dbb7a90e1adef32.vtt", + "transcript_text": " Hey, everyone. My name is Marcus. I'm building EthOS. So the first question is what is EthOS? So three years ago, we forked Androids, more specifically Graphene OS. We started building Ethereum, putting Ethereum into the OS layer because we think current OSs see Ethereum as an app. We want to leverage the protocol. The first thing we we did was basically put a light note on it. And, yeah. And so, for example, we have on E4OS, we have a system level wallet or OS level wallet. So we took basically a signer, put it into the OS, and any app can basically connect to it and make signature requests to the user. And the upgrade for the users, they don't have to switch apps or anything and it just works. There's also basically upgrades to privacy and security and basically the reason is it doesn't do any outbound internet transactions so you also don't need any internet to sign anything. Also, yeah, as I mentioned before, we put right now on EthOS, we have two light client proxies. So when we first started, the first thing that we wanted to do was put a light node on it. And at the time, we didn't have much to choose from. So we took the Geflight node, put it into the OS as a system server, and basically what happened was it really into the OS as a system server, and basically what happened was it really turned the phone into a pocket warmer. The battery really, really drained pretty quickly. But it did work when you had a stable internet connection, it would connect to peers, and then you would have a local RPC with which you can send transactions, get blockchain data and also any app could connect to this RPC. Yeah, as I mentioned right now, we have Helios and Nimbus verified proxy. They're both currently Liteclient proxies, not full Liteclients, because of the merge, the Geth Lite node stopped working because the beacon chain was added to Ethereum. And so right now, basically, you still need an essentialised RPC, but it does verify some pieces of data that it gets from this RPC. So this is pretty cool. And also, like, for the UX for the user, we wanted to make it so that it's, like, as simple as turning on and off Bluetooth or Wi-Fi. You can turn on your light client and just, just like have a local RPC on your phone. And so this brings me to the first issue that we have. Basically there is almost no mobile native dApps currently. So basically I'm a big fan of like native dApps. Currently like all of the dApps are like web dApps and then there are some that are progressive web apps, but they still face the platform risk of Apple and Google can shut you down if you are a PWA. And so basically what we created last week actually is like basically an app wrapper that is native. It looks and feels like a native app, but it actually opens up a web view that auto-injects the OS level wallet into the web app. So for users, it's just like opening Aave and the wallet is connected and you can start like basically doing transactions. And so this brings me to basically our announcement from last month. We announced our first hardware. So basically what we first did when we first started was basically bring your own device. We still have a web installer where you can basically buy a pixel by yourself at a local store. It has to be bootloader unlocked. You connect your phone and it just installs ethos on it for you. But people ran into some issues. So basically, you know, because you had to get a specific pixel and it had to be bootloader unlocked and some carriers actually lie about it being bootloader unlocked. And so basically, we then started like selling Pixel 7As running EthOS. But then we also came into a roadblock and basically the roadblock there was that it's very hard to reliably get pixels. And so we were just like, hey, let's do some hardware. And so we started talking with some phone manufacturers and they sent us like some phones that they did in the past but they all really looked bad. We were just thinking, okay, nobody is going to replace their iPhone with a random crypto phone. So, we thought to ourselves, hey, let's first make a really cool, fun and exciting secondary device that because it's a secondary device, it doesn't have to be boring. Basically it's called the DGEN1 because it's our very first device. It's our first device on our journey that we want to like our big mission is like self-sovereign hardware. And basically what is self-sovereign hardware? I'm glad you asked. So basically I wrote a blog post about it last year, and for me self-serve and hardware is where basically not only the OS that runs on top of it is open source, which we have currently, Ethos is fully open source, but also the hardware is open and inspectable. Like literally from the CPU instruction all the way to the app. It's a big mission, but I think it's very important. Because currently with like, CPU instruction all the way to the app. It's a big mission, but I think it's very important. Because currently with like if you look at the current hardware wallet landscape, for example, if you take a ledger, you really rely on security for obscurity. And that's very, very bad, especially if you're storing large amounts of crypto on it. And basically not only that, but also basically last year there was like this big controversy, if you remember. The ledger, they announced like this ledger recovery program where they basically said like yeah, with a firmware update we can extract your private keys. And it was like, oh, my God. Okay. So, basically if a ledger can, a firmware update can extract your private key, what stops a malicious actor from pushing a malicious firmware update? It's basically the ledger firmware signing key. Now I assume and I hope that they hold it very dearly and very secure, but this is like a huge trust assumption for like every ledger to be secured by basically one ledger firmware signing key. So this is very bad. And so for self-server hardware, it's a natural use case for crypto because you don't have to trust. There's no trusted modules. Everything is open. And that's one of the big, big things with the current phone landscape. The iPhones is the worst because it's like, okay, closed OS and closed hardware, you can't inspect it on any level. But with Android it's a little better, you can inspect the OS, but there's still like proprietary blobs of code, like for example, like play services and like device drivers for hardware, which they don't allow you to open source just like ledger, they can't open source some things on the low level stuff. And so this is like very bad as soon as you have crypto in it as well because you're like, okay, I have this phone, it has this OS and it's open source but there's still like some trusted like blobs of code that are closed source and you don't know what they do. And so basically, yeah, that's like our big mission. And yeah, the DGN1 is like our first step towards that mission.", "eventId": "devcon-7", - "slot_start": 1731576600000, - "slot_end": 1731578400000, + "slot_start": 1731574800000, + "slot_end": 1731576600000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1zwoxKxRNSg1zW4w3I3Ad1I6aSDAtCo3sBRkenui4eQ4", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1s1aFTwzOBXNg9v3Cu1EnNW22GUWNxNYFneRubREaJXE", + "resources_slides": "https://drive.google.com/file/d/19jj064cf1jQRV8xcl5AYOsrj3OMhFl48/view", "speakers": [ - "stani-kulechov" + "tomasz-stanczak" ] }, "vector": [ @@ -304705,22 +303858,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -304733,6 +303870,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -304808,11 +303946,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -304840,7 +303978,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -304928,6 +304065,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -304998,7 +304139,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -305008,6 +304148,14 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -305271,47 +304419,46 @@ }, { "session": { - "id": "ethereum-and-robots", - "sourceId": "9G9LSH", - "title": "Ethereum and Robots", - "description": "I will describe how Ethereum can be used in the emerging consumer robots industry (and generally for autonomous machines).\r\n* privacy preserving surveillance\r\n* autonomous transport\r\n* factory to consumer - tokenization models\r\n* Laws of Robotics - zk hardware", - "track": "Real World Ethereum", - "type": "Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "ethereum-citizen-embracing-self-sovereign-digital-identity", + "sourceId": "ATKWT8", + "title": "Ethereum Citizen: Embracing Self-Sovereign Digital Identity", + "description": "The world is changing. Everything is becoming digital. As we seek to extract more from digital services, we are giving them more and more of our personal data.\r\n\r\nBut it doesn't have to be this way. Just as we gained self-sovereignty and ownership over our digital assets and money, we can achieve the same for our digital identities and data using similar and new technologies.\r\n\r\nThis presentation will explain what self-sovereign identity is, why we need it, and where we stand today.", + "track": "Cypherpunk & Privacy", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Collective Intelligence", - "Civil Resistance", - "DePIN", - "Autonomous World", - "robots", - "Autonomous World", - "Civil Resistance", - "Collective Intelligence", - "DePIN" + "Privacy", + "Identity", + "Social", + "data", + "Identity", + "Privacy", + "Social" ], "keywords": [ - "Robots" + "Attestations", + "data" ], - "duration": 1531, + "duration": 600, "language": "en", - "sources_swarmHash": "1f7a566e27b32382aa1713acd471469239bf022cfe7569f8241c430a8ebb9577", - "sources_youtubeId": "SQzTSNurehU", + "sources_swarmHash": "a65e59ff5884726a3266ba344b24a0d68991446dba97d23df9cb9d39182fb641", + "sources_youtubeId": "jK5uGFCH9HY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735c5c19dbb7a90e1266802", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735cae29dbb7a90e1adef32.vtt", - "transcript_text": " Hey, everyone. My name is Marcus. I'm building EthOS. So the first question is what is EthOS? So three years ago, we forked Androids, more specifically Graphene OS. We started building Ethereum, putting Ethereum into the OS layer because we think current OSs see Ethereum as an app. We want to leverage the protocol. The first thing we we did was basically put a light note on it. And, yeah. And so, for example, we have on E4OS, we have a system level wallet or OS level wallet. So we took basically a signer, put it into the OS, and any app can basically connect to it and make signature requests to the user. And the upgrade for the users, they don't have to switch apps or anything and it just works. There's also basically upgrades to privacy and security and basically the reason is it doesn't do any outbound internet transactions so you also don't need any internet to sign anything. Also, yeah, as I mentioned before, we put right now on EthOS, we have two light client proxies. So when we first started, the first thing that we wanted to do was put a light node on it. And at the time, we didn't have much to choose from. So we took the Geflight node, put it into the OS as a system server, and basically what happened was it really into the OS as a system server, and basically what happened was it really turned the phone into a pocket warmer. The battery really, really drained pretty quickly. But it did work when you had a stable internet connection, it would connect to peers, and then you would have a local RPC with which you can send transactions, get blockchain data and also any app could connect to this RPC. Yeah, as I mentioned right now, we have Helios and Nimbus verified proxy. They're both currently Liteclient proxies, not full Liteclients, because of the merge, the Geth Lite node stopped working because the beacon chain was added to Ethereum. And so right now, basically, you still need an essentialised RPC, but it does verify some pieces of data that it gets from this RPC. So this is pretty cool. And also, like, for the UX for the user, we wanted to make it so that it's, like, as simple as turning on and off Bluetooth or Wi-Fi. You can turn on your light client and just, just like have a local RPC on your phone. And so this brings me to the first issue that we have. Basically there is almost no mobile native dApps currently. So basically I'm a big fan of like native dApps. Currently like all of the dApps are like web dApps and then there are some that are progressive web apps, but they still face the platform risk of Apple and Google can shut you down if you are a PWA. And so basically what we created last week actually is like basically an app wrapper that is native. It looks and feels like a native app, but it actually opens up a web view that auto-injects the OS level wallet into the web app. So for users, it's just like opening Aave and the wallet is connected and you can start like basically doing transactions. And so this brings me to basically our announcement from last month. We announced our first hardware. So basically what we first did when we first started was basically bring your own device. We still have a web installer where you can basically buy a pixel by yourself at a local store. It has to be bootloader unlocked. You connect your phone and it just installs ethos on it for you. But people ran into some issues. So basically, you know, because you had to get a specific pixel and it had to be bootloader unlocked and some carriers actually lie about it being bootloader unlocked. And so basically, we then started like selling Pixel 7As running EthOS. But then we also came into a roadblock and basically the roadblock there was that it's very hard to reliably get pixels. And so we were just like, hey, let's do some hardware. And so we started talking with some phone manufacturers and they sent us like some phones that they did in the past but they all really looked bad. We were just thinking, okay, nobody is going to replace their iPhone with a random crypto phone. So, we thought to ourselves, hey, let's first make a really cool, fun and exciting secondary device that because it's a secondary device, it doesn't have to be boring. Basically it's called the DGEN1 because it's our very first device. It's our first device on our journey that we want to like our big mission is like self-sovereign hardware. And basically what is self-sovereign hardware? I'm glad you asked. So basically I wrote a blog post about it last year, and for me self-serve and hardware is where basically not only the OS that runs on top of it is open source, which we have currently, Ethos is fully open source, but also the hardware is open and inspectable. Like literally from the CPU instruction all the way to the app. It's a big mission, but I think it's very important. Because currently with like, CPU instruction all the way to the app. It's a big mission, but I think it's very important. Because currently with like if you look at the current hardware wallet landscape, for example, if you take a ledger, you really rely on security for obscurity. And that's very, very bad, especially if you're storing large amounts of crypto on it. And basically not only that, but also basically last year there was like this big controversy, if you remember. The ledger, they announced like this ledger recovery program where they basically said like yeah, with a firmware update we can extract your private keys. And it was like, oh, my God. Okay. So, basically if a ledger can, a firmware update can extract your private key, what stops a malicious actor from pushing a malicious firmware update? It's basically the ledger firmware signing key. Now I assume and I hope that they hold it very dearly and very secure, but this is like a huge trust assumption for like every ledger to be secured by basically one ledger firmware signing key. So this is very bad. And so for self-server hardware, it's a natural use case for crypto because you don't have to trust. There's no trusted modules. Everything is open. And that's one of the big, big things with the current phone landscape. The iPhones is the worst because it's like, okay, closed OS and closed hardware, you can't inspect it on any level. But with Android it's a little better, you can inspect the OS, but there's still like proprietary blobs of code, like for example, like play services and like device drivers for hardware, which they don't allow you to open source just like ledger, they can't open source some things on the low level stuff. And so this is like very bad as soon as you have crypto in it as well because you're like, okay, I have this phone, it has this OS and it's open source but there's still like some trusted like blobs of code that are closed source and you don't know what they do. And so basically, yeah, that's like our big mission. And yeah, the DGN1 is like our first step towards that mission.", + "sources_streamethId": "6736d9ad74749a4b8937d5e4", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d9ad74749a4b8937d5e4.vtt", + "transcript_text": " Hi, welcome from my site as well. I'm a researcher and founder of Lutrolabs and today I'm going to talk about how Ethereum citizens are embracing self-sourced identity. So who is a citizen based on the dictionary, a person who is a member of a particular country or a person who lives in a particular town or city. We extend this definition to the 21st century, we can just state the network state or any part of the digital community. But who is an Ethereum citizen? So Ethereum citizen is a person who is participating in the Ethereum ecosystem or using the Ethereum technology to achieve its goals while promoting the Ethereum's values. So I just want to say there is really no hard definition similar to Ethereum enlightenment, but you can pretty much tell by feeling what I mean by that. So what are the Ethereum values? So from the technology perspective, this is decentralization, security, and scalability. But from the user perspective, is in decentralization security and scalability but from the user perspective this is self-certainty censorship resistance ownership permissionless and privacy so where are we today so for financial assets uh we are pretty much there uh for smart contracts we are somewhat there so smart contracts can be permissionally deployed to the ethereum network but usually they can be upgraded through the governance process of a decentralized community, but most smart contracts have multisig today. So websites. Websites can be hosted in a decentralized way on a decentralized storage network, but we are also somewhat there. And for identity and data, identity and data should be fully owned and controlled by the users. And we are not there yet, but things are getting better in the last years. So as an Ethereum citizen, I want to control my identifiers so no one can take my identity. So here we have many options. So basically identifiers are Ethereum addresses. ENS handles DIDs or semaphore identifiers in the ZOOPAS. But currently the most adopted thing we have are EOAs, so blockchain addresses with ENS, but this doesn't give us any privacy. And for the data part, we want to control our data and share this closet only to the people we want and when we want. So that means that the data should be stored in our wallets, on our personal servers, or encrypted on the third parties. But for that, if we want to make it that work, we need verifiable data. So what is verifiable data? This is like the famous triangle of trust in the self-sourced identity community. So we have the data issuers, we have users, and we have verifiers. So how the verifiable data works is that the issuers issues the data to the users and they sign over the data. And when users disclose the data to the verifiers, the verifiers can verify the digital signature so they know the data came from the issuer. But here I also want to mention that the data issuer and the user can be the same person because some data can be self-signed or self-issued if the data is not that important, or maybe it is important, but for example, what is my favorite color? This data can be signed by myself. So what's the reality? So financial assets in the decentralized applications are in our control but the data is usually stored in the centralized databases for example list of my favorite favorite nfts or tokens on trading devs this is usually stored on each platform in their centralized databases on every platform you have to connect your social accounts and prove that you're owning them and you have to repeat this on every platform and the kyc also you have to connect your social accounts and prove that you're owning them and you have to repeat this on every platform and the KYC also you have to repeat that on each platform but it's not all that bad things have drastically improved in the last two years so for example just look at your tickets for the DEF CON you can you store them in the zoo pass so the tickets for the DEF CON are one kind of the verifiable data, and we also have other kinds, for example, forecaster and NAND protocol posts. DIDs and verifiable credentials are getting more used in the last years. The proof-carrying data is another type of the verifiable data, and we also have zero-null solutions like ZKIT-TLS. So how would the perfect world look like? User would visit her favorite app. The user needs to fill out some information, for example social profiles or any other data that this DAP needs, and the DAP would be able to fetch the data or from the user or from some other third-party provider where the user is storing his encrypted data, and the DAP could use this data without requiring the data to be created or filled out again. So what are some of the challenges? First challenge is user-friendliness. So data sharing in this way takes a long time. Proof generation time for zero knowledge proofs also takes a long time. Use the developer experience so we don't have any unified SDKs and tools for different solutions. And we have many standards for the data and identity but we also don't have enough standards on the wallet side and how to actually use these standards in the applications. So Ethereum citizens will become self-sovereign when not only their assets, but also their data and identities will be owned by them without any centralized point of failure. And that's it from my side. Thank you. Thank you very much, Vít. So, Q&A, remember, just raise your hand, and I will toss this to you. Are there some questions for Vít? Yep, let's go. Hey, quick question on your view of government data. So because all of our identities, unfortunately, it's tied to some sort of government credential. How does that integrate into the schema that you presented for Ethereum citizens? Yeah, so maybe I can talk about how the idea is currently in Europe, where I'm from. So the European government and governments talk about how the idea is currently in the Europe where I'm from so the the European government and governments are also pushing for the similar concept so you have issuers you have users you have very files verifiers and you also have the data signatures basically the whole system is the same only the issuers are the government entities which are issuing the data. You're saying to integrate with the IDAS 2.0 wallets, that's the plan? Excuse me? So is the plan then to kind of integrate with IDAS 2.0 identity wallets that governments are providing? I think that depends on the applications. For example, if you have some decentralized applications, you don't need for everything the government IDs. But I think that the best approach is to merge on the similar technologies. So basically, if you are working on a wallet, which works in the Ethereum ecosystem, it's not so much work to also support other government use cases or whatever. Are there any more questions? Don't be shy, guys. Yep, we have one. Don't be shy. put it close to your... I'm bad at volleyball. So the question is, like, what do you think holds us back? Is it just the governments and KYC providers being not flexible enough? Or, like, what's the real... I think there are many challenges. So, for example, if you want to develop your application in a way that the users own the data, I mean, like, the user experience in Web3 is already not so great. And if you want to do everything like I said, it gets even worse. So I think that we need more tools, just like the standard technical things that we need to figure out first before we can give it to the user so they can use it more. Thanks. Yep, we have one question. Will be the last question. Yeah, thanks. Do you think there's going to be, is there somehow a bit of a race between governments rolling out citizen identification systems which are mandatory and a version of decentralised identity which you've outlined here? and a version of decentralized identity, which you've outlined here. Are you optimistic that the idealized, decentralized, self-sovereign identity model will prevail in time, or do you think they're at odds with the centralized sort of government-issued IDs? I think that in the short term or midterm, we will have both. So basically, like, I don't see that, for example, getting a passport or anything like that, we will be able to do it with decentralized identity. I think that some kind of documents will always be tied to the government identities. But inside the, like, Ethereum ecosystem or there, I don't think there is be tied to the government identities, but inside the Ethereum ecosystem or there, I don't think there is a need for the government ideas. I think we can go fully with decentralized identity. Yep. So that's all. Thank you very much. Thank you. Pete.", "eventId": "devcon-7", - "slot_start": 1731574800000, - "slot_end": 1731576600000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1s1aFTwzOBXNg9v3Cu1EnNW22GUWNxNYFneRubREaJXE", - "resources_slides": null, + "slot_start": 1731646800000, + "slot_end": 1731647400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1JzCvRvtEDW6bmL33pf1kIydVAzlZM-tN5p_XZlUg02I", + "resources_slides": "https://drive.google.com/file/d/1OnLej0P4QDJF5LDYcZgAfrwkM4Sxr8VA/view", "speakers": [ - "tomasz-stanczak" + "vid-kersic" ] }, "vector": [ @@ -305320,7 +304467,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -305640,9 +304786,8 @@ 0, 0, 0, - 6, - 0, 0, + 6, 0, 0, 0, @@ -306088,7 +305233,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -306100,7 +305244,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -306116,6 +305259,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -306176,13 +305320,13 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -306246,6 +305390,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -306295,7 +305440,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -306627,12 +305771,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -306645,52 +305788,49 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "ethereum-citizen-embracing-self-sovereign-digital-identity", - "sourceId": "ATKWT8", - "title": "Ethereum Citizen: Embracing Self-Sovereign Digital Identity", - "description": "The world is changing. Everything is becoming digital. As we seek to extract more from digital services, we are giving them more and more of our personal data.\r\n\r\nBut it doesn't have to be this way. Just as we gained self-sovereignty and ownership over our digital assets and money, we can achieve the same for our digital identities and data using similar and new technologies.\r\n\r\nThis presentation will explain what self-sovereign identity is, why we need it, and where we stand today.", + "id": "ethereum-culture-expanding-in-the-infinite-garden", + "sourceId": "ZS338S", + "title": "Ethereum Culture Expanding in the Infinite Garden", + "description": "As a designer at the EF for the past 5 years, I’ve witnessed the unique culture of Ethereum and its growth. My talk aims to illuminate the vast cultural landscape of our ecosystem such as Cypherpunk, Regen, Degen, and L2s as subculture. I'm hoping to assist ecosystem participants, especially new comers, in becoming the infinite game players in the Infinite Garden.", "track": "Cypherpunk & Privacy", - "type": "Lightning Talk", - "expertise": "Intermediate", + "type": "Talk", + "expertise": "Beginner", "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Privacy", - "Identity", - "Social", - "data", - "Identity", - "Privacy", - "Social" + "Values", + "infinite", + "garden", + "Values" ], "keywords": [ - "Attestations", - "data" + "Culture", + "Subculture", + "Infinite Garden" ], - "duration": 600, + "duration": 1177, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "682ca4a78f91770b90155971ff92ffde6ddbbe43c418cc0cea0077e0b2843f34", + "sources_youtubeId": "gFBfP7utPI0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736d9ad74749a4b8937d5e4", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d9ad74749a4b8937d5e4.vtt", - "transcript_text": " Hi, welcome from my site as well. I'm a researcher and founder of Lutrolabs and today I'm going to talk about how Ethereum citizens are embracing self-sourced identity. So who is a citizen based on the dictionary, a person who is a member of a particular country or a person who lives in a particular town or city. We extend this definition to the 21st century, we can just state the network state or any part of the digital community. But who is an Ethereum citizen? So Ethereum citizen is a person who is participating in the Ethereum ecosystem or using the Ethereum technology to achieve its goals while promoting the Ethereum's values. So I just want to say there is really no hard definition similar to Ethereum enlightenment, but you can pretty much tell by feeling what I mean by that. So what are the Ethereum values? So from the technology perspective, this is decentralization, security, and scalability. But from the user perspective, is in decentralization security and scalability but from the user perspective this is self-certainty censorship resistance ownership permissionless and privacy so where are we today so for financial assets uh we are pretty much there uh for smart contracts we are somewhat there so smart contracts can be permissionally deployed to the ethereum network but usually they can be upgraded through the governance process of a decentralized community, but most smart contracts have multisig today. So websites. Websites can be hosted in a decentralized way on a decentralized storage network, but we are also somewhat there. And for identity and data, identity and data should be fully owned and controlled by the users. And we are not there yet, but things are getting better in the last years. So as an Ethereum citizen, I want to control my identifiers so no one can take my identity. So here we have many options. So basically identifiers are Ethereum addresses. ENS handles DIDs or semaphore identifiers in the ZOOPAS. But currently the most adopted thing we have are EOAs, so blockchain addresses with ENS, but this doesn't give us any privacy. And for the data part, we want to control our data and share this closet only to the people we want and when we want. So that means that the data should be stored in our wallets, on our personal servers, or encrypted on the third parties. But for that, if we want to make it that work, we need verifiable data. So what is verifiable data? This is like the famous triangle of trust in the self-sourced identity community. So we have the data issuers, we have users, and we have verifiers. So how the verifiable data works is that the issuers issues the data to the users and they sign over the data. And when users disclose the data to the verifiers, the verifiers can verify the digital signature so they know the data came from the issuer. But here I also want to mention that the data issuer and the user can be the same person because some data can be self-signed or self-issued if the data is not that important, or maybe it is important, but for example, what is my favorite color? This data can be signed by myself. So what's the reality? So financial assets in the decentralized applications are in our control but the data is usually stored in the centralized databases for example list of my favorite favorite nfts or tokens on trading devs this is usually stored on each platform in their centralized databases on every platform you have to connect your social accounts and prove that you're owning them and you have to repeat this on every platform and the kyc also you have to connect your social accounts and prove that you're owning them and you have to repeat this on every platform and the KYC also you have to repeat that on each platform but it's not all that bad things have drastically improved in the last two years so for example just look at your tickets for the DEF CON you can you store them in the zoo pass so the tickets for the DEF CON are one kind of the verifiable data, and we also have other kinds, for example, forecaster and NAND protocol posts. DIDs and verifiable credentials are getting more used in the last years. The proof-carrying data is another type of the verifiable data, and we also have zero-null solutions like ZKIT-TLS. So how would the perfect world look like? User would visit her favorite app. The user needs to fill out some information, for example social profiles or any other data that this DAP needs, and the DAP would be able to fetch the data or from the user or from some other third-party provider where the user is storing his encrypted data, and the DAP could use this data without requiring the data to be created or filled out again. So what are some of the challenges? First challenge is user-friendliness. So data sharing in this way takes a long time. Proof generation time for zero knowledge proofs also takes a long time. Use the developer experience so we don't have any unified SDKs and tools for different solutions. And we have many standards for the data and identity but we also don't have enough standards on the wallet side and how to actually use these standards in the applications. So Ethereum citizens will become self-sovereign when not only their assets, but also their data and identities will be owned by them without any centralized point of failure. And that's it from my side. Thank you. Thank you very much, Vít. So, Q&A, remember, just raise your hand, and I will toss this to you. Are there some questions for Vít? Yep, let's go. Hey, quick question on your view of government data. So because all of our identities, unfortunately, it's tied to some sort of government credential. How does that integrate into the schema that you presented for Ethereum citizens? Yeah, so maybe I can talk about how the idea is currently in Europe, where I'm from. So the European government and governments talk about how the idea is currently in the Europe where I'm from so the the European government and governments are also pushing for the similar concept so you have issuers you have users you have very files verifiers and you also have the data signatures basically the whole system is the same only the issuers are the government entities which are issuing the data. You're saying to integrate with the IDAS 2.0 wallets, that's the plan? Excuse me? So is the plan then to kind of integrate with IDAS 2.0 identity wallets that governments are providing? I think that depends on the applications. For example, if you have some decentralized applications, you don't need for everything the government IDs. But I think that the best approach is to merge on the similar technologies. So basically, if you are working on a wallet, which works in the Ethereum ecosystem, it's not so much work to also support other government use cases or whatever. Are there any more questions? Don't be shy, guys. Yep, we have one. Don't be shy. put it close to your... I'm bad at volleyball. So the question is, like, what do you think holds us back? Is it just the governments and KYC providers being not flexible enough? Or, like, what's the real... I think there are many challenges. So, for example, if you want to develop your application in a way that the users own the data, I mean, like, the user experience in Web3 is already not so great. And if you want to do everything like I said, it gets even worse. So I think that we need more tools, just like the standard technical things that we need to figure out first before we can give it to the user so they can use it more. Thanks. Yep, we have one question. Will be the last question. Yeah, thanks. Do you think there's going to be, is there somehow a bit of a race between governments rolling out citizen identification systems which are mandatory and a version of decentralised identity which you've outlined here? and a version of decentralized identity, which you've outlined here. Are you optimistic that the idealized, decentralized, self-sovereign identity model will prevail in time, or do you think they're at odds with the centralized sort of government-issued IDs? I think that in the short term or midterm, we will have both. So basically, like, I don't see that, for example, getting a passport or anything like that, we will be able to do it with decentralized identity. I think that some kind of documents will always be tied to the government identities. But inside the, like, Ethereum ecosystem or there, I don't think there is be tied to the government identities, but inside the Ethereum ecosystem or there, I don't think there is a need for the government ideas. I think we can go fully with decentralized identity. Yep. So that's all. Thank you very much. Thank you. Pete.", + "sources_streamethId": "67370b1774749a4b89860bb8", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67370b1774749a4b89860bb8.vtt", + "transcript_text": " Hi everyone. Good afternoon. Or morning? Afternoon, right? So I want to do a really short presentation about what's happening in the Ethereum culture, actually. And I want to give you some sort of landscape of what the culture looks like. Especially this DevCon has like 60% of people are new to DevCon. And a lot of people might be the entry point. They are very curious about what is Ethereum. So I want to give you the super, kind of like a digest of what is the Ethereum culture expanding. In the Infinite Garden. So my name is Tomo. I'm a designer at the Ethereum Foundation and I'm from Japan. I work on the DevCon and some other stuff in the Ethereum Foundation stuff. Probably you see a lot of things. Not this, but this is, yeah, this is some sort of, like, I do illustration or design. This is from the Ethereum Foundation website. And then DevCon. Like, probably, I don't know how many of you enjoying being at DevCon and this whole design stuff. I worked on this, and then with other people, of course, collaboration is very important. So I'm really appreciating. Thank you so much for enjoying, and I really love to hear what you guys think. And yes, I am excited about the future of DevCon and stuff. Anyways, so Ethereum is a cultural platform. I could say that because I got involved in Ethereum five years ago when I joined to the DevCon Osaka. And then I came to the DevCon and I thought, oh, DevCon is like the Burning Man for techners. Did I say? Yeah, techners. Right? So everybody knows what is Burning Man? Yeah, Burning Man is like this. Right? And having fun and self-expression and having a great moment. Right? And then get together as a community, and sometimes you reflect, right? So very similar somehow. So I was like, okay, I can definitely contribute more to this community. Okay, so now, so what is a culture actually? So culture definition, the art and the other manifestation of the human intellectual achievement regarded collectively, that's cool, and the ideas, customs, and the social behavior of particular people or society, right? And the third one is actually really like, maintain tissue cells, bacteria, etc. in a condition suitable for growth. It sounds like a very ethereum actually. So that's the image of that. Like we're creating actually small things in one of those. So each one has its own unique activities and projects, things. And in terms of the culture, I talk about the music a lot because music is very cultural and that influenced me so much. Do you know whether this picture is the club culture we're really familiar with right now, with the big speakers and lighting. Do you know where it started? Do you have any idea, anyone has an idea where the club culture started? Some people might say like Detroit. Detroit techno things was happening before. Anyone has any idea? Like some... Japan. Oh, Japan is cool. Japan is really cool. I'm proud of Japan. Okay, so the origin, as far as I know, is here. Jamaica, late 60s and 70s, they were creating a sound system in the park, and they created those dancing environment. And one time in the late 90s, a producer forgot to, so they were actually creating copies of records, and they forgot to copy the vocal track to the next record. So it became like the only track record was created by mistake. And then when they played that, people got nuts about it. So it's like the craft culture started by mistake in the late 60s. I think it's the 69 or something. But that's a beginning. And then that's created as the mistake. It's a small thing. So starting from there to that, this is phenomenal. So that's a part of the culture. So we are one of those three things. And then imagine we're reaching 10 years next year, but many years later, what's going to happen? So I want to go a little bit of digest of major subcultures in Ethereum. So cypherpunk. So probably a lot of people heard about this word and probably swag, the rain jacket, is reflecting that ethos of the cypherpunk as well. So cypherpunk is the DIY movement, also prioritizing your privacy and security, and it started in the 90s, so that was kind of like a base of the culture who built cryptographic technologies and also a lot of applications such as Bitcoin and now Ethereum. And that relates to, I think you talked about the, yes, cyberpunk and also the lunapunk also. That's kind of a sub-subculture, right? And we have regions. Regions is creating the participative economy with smart, cartographic, and social impact culture. And of course, we should not forget about the regions because the regions play some interesting role in the ecosystem as well. So those three are major subcultures in Ethereum, and also layer 2s are creating another cultural element. So, let's appreciate and then see how it's going to grow, right? And an example of the cultural experience and ideas, obviously, in the DevCon, this is one of the most active cultural experiences. So I hope you guys are enjoying it. And DevConnect is another event run by Ethereum Foundation. We're doing next year. It happened last year in Istanbul. Next year, we don't know where yet, but we'll see. And local community events are so important we have a lot of like we had josh mentioned probably 100 more local events was happening over the years right so it's really amazing it's called creating and then pop-up pop-up community such as the zara edge city and etc it and etc. It's becoming an interesting experiment, and I attended one of those in Chiang Mai, and it was really fun. And also, yeah, great to observe what the global movement is going to be. Pagoda is one of the local region-oriented group that focuses on Asia. DIAC. DIAC had a session yesterday, and it's trying to be safe. Technology is cool, but also accelerate, just defensive acceleration is very important because we want to create the humanity safe and protected and this is this is a video example of the DIAG for meイエーク So technology is cool, but you don't want to be just like using the phone at the club. You know, just take a photograph or videos. You know, you want to enjoy life. So, yeah, just be sure we create technology for something good. And autonomous world is the concept of, digitally could be also real world, I believe. It's the cryptography and also what did I say? What did I say? Yeah, small contract type of platform to create an automatic and autonomy society movement and then organizations. And also, in this DevCon, I led the artists and writers cohort, and then there was eight installations in the venues. And it was great to experiment for us to work with the artists and writers to learn what is the essence of Infinite Garden and reflect through art and writings. Yeah, we spent about three months to work on this. And then this was really, yeah, I learned so much. Okay, so how is it expanding the Infinite Garden? I believe most people know about the Infinite Garden, I guess, but let me repeat that. The essence of the Infinite Garden is creating a platform or space so that you can keep playing the infinite games. And let me read this quote. A final game is played for the purpose of winning. An infinite game for the purpose of continuing the play. So we, as the Ethereum community, we don't want to create winners and losers. We want everyone to keep playing the game. So that's the essence of Infinite Garden. And when it comes to playing the game of culture, it probably looks more like this. When it comes to playing the game of culture, it probably looks more like this. Like expanding. So we're not experimenting in a small space, but more like we're visioning expanding and connecting. And there was a lot of activities I listed earlier would show up like this. And you are belonging to one of those, I believe. And another way to describe the expansion is like this as well. And we say my solarium is a good example. And yeah, again. Okay, so, okay, I'm running out of time, so I need to go really fast. Okay, you're also building a culture, right? Because you're creating something in Ethereum. So how to build a culture in the Infineon Garden? So learn the core values. These are core values, okay? So you can go to Vitalik's website, blog. It's super good about make Ethereum cypherpunk again. And define your values. Like why are you working on Ethereum? Just reflect why, right? And then create the practice and project. And because culture has more impact than feature. And when you create a culture, from culture to feature, I think that will make things so much better, I believe. And then step four, share your values. Just like I mentioned about the mandala in the opening ceremony. And remember, we are starting like this. Starting with the sound system, and we are creating the global culture together. Okay, thank you. Sorry, I took so long. Thank you very much. So, turn for the Q&A. Oh, we already have one question. Oh, nice. All right, thanks for that. That's a really cool presentation. I think it's generally easier when you're running an open source project or something that's a public good. But what advice would you have for builders that are building for-profit tools in the ecosystem? How do they fit into this ecosystem from a cultural perspective? With my experience, I would say actually paying forward mindset is always somehow works. It's not as the project principle, but more like a principle of life. And then that is somewhat like you just contribute first and then just receive afterwards. And then not expecting too much about it. Expectation kills you. So I don't know if I'm really answering your question, but that's how I have been doing. No, you are. It's like sharing without expectation of returns, not selling, not transactional. Contribute. Got it. Thank you. Okay. Thank you. Have any more questions? Yep. have any more questions yep uh who are the artists that influenced you in your personal style it's definitely Miyazaki uh I grew up in Japan and also the French artist called Mobius they actually both are influencing each other in the 70s and 80s, so that's definitely my biggest influence. All right, are there any more questions? Yep, we have one over there. What are some areas you think people are under-focused on, that people should focus on in the culture of this community? That's a really good question. Let me think. Well, that means actually, I think there's a lot. I feel like Ethereum is covering actually so little. And I'm not saying necessarily Ethereum should cover everything, but I think we are covering our own places and area of interest as a community, and we can expand more, and a lot more. And a lot more I never experienced or never heard yet. So I think it's okay, and I'm curious, what do you think? What do you think you are, you know, what's missing for us? If you have. There's definitely something about people don't know what Ethereum is. If you talk to a normal person and they just think it's the gambling. So there's definitely something there. Maybe it's a product that really helps people or a narrative that makes people feel it isn't just for gambling. Yeah, definitely. I 100% there's a lot of people have a misconception about what is Ethereum. That's actually why I am kind of working on creating the world of Ethereum using my skills to create some familiar world or that's somewhat you can experience. So yeah, definitely, yeah, that's a good challenge I'm taking and I'd love to challenge more. Yep. We also have one over there. So there's a lot of things that contribute to the culture, you know, whether it be people building in the ecosystem, events like this. So what do you think are some of the key pillars that help build a culture within Ethereum, for example? I would say those three pillars, the cypherpunk and region and decision, are major pillars that build this ecosystem. And definitely the cyp, Cyberpunk is the largest. And then we can actually learn what is the essence of Cyberpunk and create your own version. It is really fun and also probably meaningful for you as well. What was your personal reason for joining the Ethereum Foundation? Personal reason to join the Ethereum Foundation? I was actually asked to apply to the job. It was five years ago and and a designer was leaving, and so they were looking for the new person. And then I got a connection to apply. So, I mean, that's a dumb question. Dumb answers. But, yeah, so actually, so I started working, right? And then, again, it came to DevCon, and I just realized what this experience is such amazing. So I just decided to stay. All right. So cool. Thank you so much. Thank you very much.", "eventId": "devcon-7", - "slot_start": 1731646800000, - "slot_end": 1731647400000, + "slot_start": 1731649200000, + "slot_end": 1731650400000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1JzCvRvtEDW6bmL33pf1kIydVAzlZM-tN5p_XZlUg02I", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1A5FoYp0OS56Zm_O5Ba5qVu-PLRcWRf09JijiP2TnAog", + "resources_slides": "https://drive.google.com/file/d/1hemsnmAUCsPPwHytgaVsypcpqqc3eo5C/view", "speakers": [ - "vid-kersic" + "tomo-saito" ] }, "vector": [ @@ -306758,6 +305898,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -307020,8 +306161,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -307494,7 +306633,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -307561,7 +306699,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -307569,6 +306706,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -307625,7 +306763,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -307760,7 +306897,7 @@ 0, 0, 2, - 0, + 2, 0, 0, 0, @@ -308015,8 +307152,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -308030,44 +307165,39 @@ }, { "session": { - "id": "ethereum-culture-expanding-in-the-infinite-garden", - "sourceId": "ZS338S", - "title": "Ethereum Culture Expanding in the Infinite Garden", - "description": "As a designer at the EF for the past 5 years, I’ve witnessed the unique culture of Ethereum and its growth. My talk aims to illuminate the vast cultural landscape of our ecosystem such as Cypherpunk, Regen, Degen, and L2s as subculture. I'm hoping to assist ecosystem participants, especially new comers, in becoming the infinite game players in the Infinite Garden.", - "track": "Cypherpunk & Privacy", + "id": "ethereum-execution-layer-specifications-eels", + "sourceId": "3GCD7S", + "title": "Ethereum Execution Layer Specifications (EELS)", + "description": "An introduction and walk-through of the executable specifications for the Ethereum Execution Layer. \r\nGithub (https://github.com/ethereum/execution-specs)\r\n\r\nEELS is an implementation of the EVM in Python that has been optimised for readability. A great tool for EIP authors looking to prototype new ideas on the EVM, it is easy to understand as well as update with new features.", + "track": "Core Protocol", "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "expertise": "Intermediate", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Values", - "infinite", - "garden", - "Values" + "Core Protocol", + "Layer 1" ], "keywords": [ - "Culture", - "Subculture", - "Infinite Garden" + "Execution", + "Layer" ], - "duration": 1177, + "duration": 1253, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "5152428075c4cdb0ae87fd4ba618e21a8b8d00dee0da4e8f53acff649df95802", + "sources_youtubeId": "WEvCFg0Z1D4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67370b1774749a4b89860bb8", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67370b1774749a4b89860bb8.vtt", - "transcript_text": " Hi everyone. Good afternoon. Or morning? Afternoon, right? So I want to do a really short presentation about what's happening in the Ethereum culture, actually. And I want to give you some sort of landscape of what the culture looks like. Especially this DevCon has like 60% of people are new to DevCon. And a lot of people might be the entry point. They are very curious about what is Ethereum. So I want to give you the super, kind of like a digest of what is the Ethereum culture expanding. In the Infinite Garden. So my name is Tomo. I'm a designer at the Ethereum Foundation and I'm from Japan. I work on the DevCon and some other stuff in the Ethereum Foundation stuff. Probably you see a lot of things. Not this, but this is, yeah, this is some sort of, like, I do illustration or design. This is from the Ethereum Foundation website. And then DevCon. Like, probably, I don't know how many of you enjoying being at DevCon and this whole design stuff. I worked on this, and then with other people, of course, collaboration is very important. So I'm really appreciating. Thank you so much for enjoying, and I really love to hear what you guys think. And yes, I am excited about the future of DevCon and stuff. Anyways, so Ethereum is a cultural platform. I could say that because I got involved in Ethereum five years ago when I joined to the DevCon Osaka. And then I came to the DevCon and I thought, oh, DevCon is like the Burning Man for techners. Did I say? Yeah, techners. Right? So everybody knows what is Burning Man? Yeah, Burning Man is like this. Right? And having fun and self-expression and having a great moment. Right? And then get together as a community, and sometimes you reflect, right? So very similar somehow. So I was like, okay, I can definitely contribute more to this community. Okay, so now, so what is a culture actually? So culture definition, the art and the other manifestation of the human intellectual achievement regarded collectively, that's cool, and the ideas, customs, and the social behavior of particular people or society, right? And the third one is actually really like, maintain tissue cells, bacteria, etc. in a condition suitable for growth. It sounds like a very ethereum actually. So that's the image of that. Like we're creating actually small things in one of those. So each one has its own unique activities and projects, things. And in terms of the culture, I talk about the music a lot because music is very cultural and that influenced me so much. Do you know whether this picture is the club culture we're really familiar with right now, with the big speakers and lighting. Do you know where it started? Do you have any idea, anyone has an idea where the club culture started? Some people might say like Detroit. Detroit techno things was happening before. Anyone has any idea? Like some... Japan. Oh, Japan is cool. Japan is really cool. I'm proud of Japan. Okay, so the origin, as far as I know, is here. Jamaica, late 60s and 70s, they were creating a sound system in the park, and they created those dancing environment. And one time in the late 90s, a producer forgot to, so they were actually creating copies of records, and they forgot to copy the vocal track to the next record. So it became like the only track record was created by mistake. And then when they played that, people got nuts about it. So it's like the craft culture started by mistake in the late 60s. I think it's the 69 or something. But that's a beginning. And then that's created as the mistake. It's a small thing. So starting from there to that, this is phenomenal. So that's a part of the culture. So we are one of those three things. And then imagine we're reaching 10 years next year, but many years later, what's going to happen? So I want to go a little bit of digest of major subcultures in Ethereum. So cypherpunk. So probably a lot of people heard about this word and probably swag, the rain jacket, is reflecting that ethos of the cypherpunk as well. So cypherpunk is the DIY movement, also prioritizing your privacy and security, and it started in the 90s, so that was kind of like a base of the culture who built cryptographic technologies and also a lot of applications such as Bitcoin and now Ethereum. And that relates to, I think you talked about the, yes, cyberpunk and also the lunapunk also. That's kind of a sub-subculture, right? And we have regions. Regions is creating the participative economy with smart, cartographic, and social impact culture. And of course, we should not forget about the regions because the regions play some interesting role in the ecosystem as well. So those three are major subcultures in Ethereum, and also layer 2s are creating another cultural element. So, let's appreciate and then see how it's going to grow, right? And an example of the cultural experience and ideas, obviously, in the DevCon, this is one of the most active cultural experiences. So I hope you guys are enjoying it. And DevConnect is another event run by Ethereum Foundation. We're doing next year. It happened last year in Istanbul. Next year, we don't know where yet, but we'll see. And local community events are so important we have a lot of like we had josh mentioned probably 100 more local events was happening over the years right so it's really amazing it's called creating and then pop-up pop-up community such as the zara edge city and etc it and etc. It's becoming an interesting experiment, and I attended one of those in Chiang Mai, and it was really fun. And also, yeah, great to observe what the global movement is going to be. Pagoda is one of the local region-oriented group that focuses on Asia. DIAC. DIAC had a session yesterday, and it's trying to be safe. Technology is cool, but also accelerate, just defensive acceleration is very important because we want to create the humanity safe and protected and this is this is a video example of the DIAG for meイエーク So technology is cool, but you don't want to be just like using the phone at the club. You know, just take a photograph or videos. You know, you want to enjoy life. So, yeah, just be sure we create technology for something good. And autonomous world is the concept of, digitally could be also real world, I believe. It's the cryptography and also what did I say? What did I say? Yeah, small contract type of platform to create an automatic and autonomy society movement and then organizations. And also, in this DevCon, I led the artists and writers cohort, and then there was eight installations in the venues. And it was great to experiment for us to work with the artists and writers to learn what is the essence of Infinite Garden and reflect through art and writings. Yeah, we spent about three months to work on this. And then this was really, yeah, I learned so much. Okay, so how is it expanding the Infinite Garden? I believe most people know about the Infinite Garden, I guess, but let me repeat that. The essence of the Infinite Garden is creating a platform or space so that you can keep playing the infinite games. And let me read this quote. A final game is played for the purpose of winning. An infinite game for the purpose of continuing the play. So we, as the Ethereum community, we don't want to create winners and losers. We want everyone to keep playing the game. So that's the essence of Infinite Garden. And when it comes to playing the game of culture, it probably looks more like this. When it comes to playing the game of culture, it probably looks more like this. Like expanding. So we're not experimenting in a small space, but more like we're visioning expanding and connecting. And there was a lot of activities I listed earlier would show up like this. And you are belonging to one of those, I believe. And another way to describe the expansion is like this as well. And we say my solarium is a good example. And yeah, again. Okay, so, okay, I'm running out of time, so I need to go really fast. Okay, you're also building a culture, right? Because you're creating something in Ethereum. So how to build a culture in the Infineon Garden? So learn the core values. These are core values, okay? So you can go to Vitalik's website, blog. It's super good about make Ethereum cypherpunk again. And define your values. Like why are you working on Ethereum? Just reflect why, right? And then create the practice and project. And because culture has more impact than feature. And when you create a culture, from culture to feature, I think that will make things so much better, I believe. And then step four, share your values. Just like I mentioned about the mandala in the opening ceremony. And remember, we are starting like this. Starting with the sound system, and we are creating the global culture together. Okay, thank you. Sorry, I took so long. Thank you very much. So, turn for the Q&A. Oh, we already have one question. Oh, nice. All right, thanks for that. That's a really cool presentation. I think it's generally easier when you're running an open source project or something that's a public good. But what advice would you have for builders that are building for-profit tools in the ecosystem? How do they fit into this ecosystem from a cultural perspective? With my experience, I would say actually paying forward mindset is always somehow works. It's not as the project principle, but more like a principle of life. And then that is somewhat like you just contribute first and then just receive afterwards. And then not expecting too much about it. Expectation kills you. So I don't know if I'm really answering your question, but that's how I have been doing. No, you are. It's like sharing without expectation of returns, not selling, not transactional. Contribute. Got it. Thank you. Okay. Thank you. Have any more questions? Yep. have any more questions yep uh who are the artists that influenced you in your personal style it's definitely Miyazaki uh I grew up in Japan and also the French artist called Mobius they actually both are influencing each other in the 70s and 80s, so that's definitely my biggest influence. All right, are there any more questions? Yep, we have one over there. What are some areas you think people are under-focused on, that people should focus on in the culture of this community? That's a really good question. Let me think. Well, that means actually, I think there's a lot. I feel like Ethereum is covering actually so little. And I'm not saying necessarily Ethereum should cover everything, but I think we are covering our own places and area of interest as a community, and we can expand more, and a lot more. And a lot more I never experienced or never heard yet. So I think it's okay, and I'm curious, what do you think? What do you think you are, you know, what's missing for us? If you have. There's definitely something about people don't know what Ethereum is. If you talk to a normal person and they just think it's the gambling. So there's definitely something there. Maybe it's a product that really helps people or a narrative that makes people feel it isn't just for gambling. Yeah, definitely. I 100% there's a lot of people have a misconception about what is Ethereum. That's actually why I am kind of working on creating the world of Ethereum using my skills to create some familiar world or that's somewhat you can experience. So yeah, definitely, yeah, that's a good challenge I'm taking and I'd love to challenge more. Yep. We also have one over there. So there's a lot of things that contribute to the culture, you know, whether it be people building in the ecosystem, events like this. So what do you think are some of the key pillars that help build a culture within Ethereum, for example? I would say those three pillars, the cypherpunk and region and decision, are major pillars that build this ecosystem. And definitely the cyp, Cyberpunk is the largest. And then we can actually learn what is the essence of Cyberpunk and create your own version. It is really fun and also probably meaningful for you as well. What was your personal reason for joining the Ethereum Foundation? Personal reason to join the Ethereum Foundation? I was actually asked to apply to the job. It was five years ago and and a designer was leaving, and so they were looking for the new person. And then I got a connection to apply. So, I mean, that's a dumb question. Dumb answers. But, yeah, so actually, so I started working, right? And then, again, it came to DevCon, and I just realized what this experience is such amazing. So I just decided to stay. All right. So cool. Thank you so much. Thank you very much.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731649200000, - "slot_end": 1731650400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1A5FoYp0OS56Zm_O5Ba5qVu-PLRcWRf09JijiP2TnAog", - "resources_slides": null, + "slot_start": 1731402000000, + "slot_end": 1731403800000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1tBeUpTPFPiF-99JI_q0F1DV1g8Bx09ZHLkprfgVzn2c", + "resources_slides": "https://drive.google.com/file/d/1Y4rDk-2mnBsfHal-yDu0-ywHbIKGTh6F/view", "speakers": [ - "tomo-saito" + "guruprasad-kamath" ] }, "vector": [ @@ -308075,7 +307205,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -308135,7 +307264,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -308399,6 +307527,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -308835,9 +307964,11 @@ 0, 0, 0, + 6, 0, 0, 0, + 2, 0, 0, 0, @@ -308946,7 +308077,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -309137,8 +308267,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -309382,6 +308510,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -309394,10 +308523,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -309407,46 +308532,38 @@ }, { "session": { - "id": "ethereum-execution-layer-specifications-eels", - "sourceId": "3GCD7S", - "title": "Ethereum Execution Layer Specifications (EELS)", - "description": "An introduction and walk-through of the executable specifications for the Ethereum Execution Layer. \r\nGithub (https://github.com/ethereum/execution-specs)\r\n\r\nEELS is an implementation of the EVM in Python that has been optimised for readability. A great tool for EIP authors looking to prototype new ideas on the EVM, it is easy to understand as well as update with new features.", - "track": "Core Protocol", + "id": "ethereum-in-30-minutes", + "sourceId": "GAJPCN", + "title": "Ethereum in 30 minutes", + "description": "Don’t miss the Devcon Opening Ceremony, where we’ll set the stage for an incredible event ahead, with talks from Vitalik Buterin (Founder of Ethereum), Aya Miyaguchi (Executive Director of the Ethereum Foundation), Josh Stark (Ethereum Foundation Leadership), Skylar Weaver (Devcon Team Lead), and more surprise guests.", + "track": "Real World Ethereum", "type": "Talk", - "expertise": "Intermediate", - "audience": "Developer", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Core Protocol", - "Layer 1" - ], - "keywords": [ - "Execution", - "Layer" - ], - "duration": 1253, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "5152428075c4cdb0ae87fd4ba618e21a8b8d00dee0da4e8f53acff649df95802", - "sources_youtubeId": "WEvCFg0Z1D4", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731402000000, - "slot_end": 1731403800000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1tBeUpTPFPiF-99JI_q0F1DV1g8Bx09ZHLkprfgVzn2c", - "resources_slides": null, "speakers": [ - "guruprasad-kamath" - ] + "vitalik-buterin" + ], + "eventId": "devcon-7", + "slot_start": 1731384000000, + "slot_end": 1731385800000, + "slot_roomId": "main-stage", + "sources_youtubeId": "ei3tDRMjw6k", + "sources_swarmHash": "d4b974f86276f34632b9a6361a60ff2c85d5da50b1aa85c09829c824eb97c5a9", + "resources_presentation": "https://docs.google.com/presentation/d/1c4kXKhLTBksDY0GKRITW1Zog1_t1FjxKAJm7icOjg3I", + "resources_slides": "" }, "vector": [ 0, 0, 0, 0, + 0, + 0, 6, 0, 0, @@ -309634,6 +308751,17 @@ 0, 0, 0, + 6, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -309770,7 +308898,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -310209,22 +309336,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -310760,11 +309871,10 @@ 2, 0, 0, + 2, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -310779,29 +309889,43 @@ }, { "session": { - "id": "ethereum-in-30-minutes", - "sourceId": "GAJPCN", - "title": "Ethereum in 30 minutes", - "description": "Don’t miss the Devcon Opening Ceremony, where we’ll set the stage for an incredible event ahead, with talks from Vitalik Buterin (Founder of Ethereum), Aya Miyaguchi (Executive Director of the Ethereum Foundation), Josh Stark (Ethereum Foundation Leadership), Skylar Weaver (Devcon Team Lead), and more surprise guests.", + "id": "ethereum-in-the-classroom-or-teaching-solidity-to-high-school-students-in-buenos-aires", + "sourceId": "9HFAES", + "title": "Ethereum in the Classroom | Teaching Solidity to High School Students in Buenos Aires", + "description": "ETH Kipu is breaking new ground by introducing Ethereum education to teenagers in Argentina. Discover how we collaborated with the Buenos Aires Ministry of Education to create hands-on learning experiences, teaching students to build smart contracts using Solidity. This talk will share best practices from our experience and how it can be replicated globally, sharing the insights we have discovered in the classroom and how we develop this partnership.", "track": "Real World Ethereum", - "type": "Talk", - "expertise": "", - "audience": "Engineering", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Academic", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [ - "vitalik-buterin" + "tags": [ + "Design Thinking", + "Ethereum for Good", + "Public good" + ], + "keywords": [ + "Education" ], + "duration": 457, + "language": "en", + "sources_swarmHash": "a724073c7b9bc55d470144a87b5f8abf0eac448ffd43ef9515329272cfceb31b", + "sources_youtubeId": "1HOOnlu1qQ4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6736df2a1b0f83434d8917c4", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731384000000, - "slot_end": 1731385800000, - "slot_roomId": "main-stage", - "sources_youtubeId": "ei3tDRMjw6k", - "sources_swarmHash": "d4b974f86276f34632b9a6361a60ff2c85d5da50b1aa85c09829c824eb97c5a9", - "resources_presentation": "https://docs.google.com/presentation/d/1c4kXKhLTBksDY0GKRITW1Zog1_t1FjxKAJm7icOjg3I" + "slot_start": 1731559200000, + "slot_end": 1731559800000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1clRG027QMaA-_D-yds9TfGuZXmzRy5tpHKs67z97Mqw", + "resources_slides": "https://drive.google.com/file/d/1Fl3xaDogBkq8Rckhi2JkasdvrsQHzbIh/view", + "speakers": [ + "juan-david-reyes", + "romina-sejas" + ] }, "vector": [ 0, @@ -310997,15 +310121,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -311140,6 +310255,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -311668,6 +310785,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -311684,6 +310802,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -311701,6 +310820,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -312122,8 +311242,6 @@ 2, 0, 0, - 2, - 0, 0, 0, 0, @@ -312132,6 +311250,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -312140,42 +311259,41 @@ }, { "session": { - "id": "ethereum-in-the-classroom-or-teaching-solidity-to-high-school-students-in-buenos-aires", - "sourceId": "9HFAES", - "title": "Ethereum in the Classroom | Teaching Solidity to High School Students in Buenos Aires", - "description": "ETH Kipu is breaking new ground by introducing Ethereum education to teenagers in Argentina. Discover how we collaborated with the Buenos Aires Ministry of Education to create hands-on learning experiences, teaching students to build smart contracts using Solidity. This talk will share best practices from our experience and how it can be replicated globally, sharing the insights we have discovered in the classroom and how we develop this partnership.", - "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Academic", + "id": "ethereum-needs-native-l2", + "sourceId": "9RNWDX", + "title": "Ethereum needs native L2", + "description": "Right now, L2beat tracks 116 L2s. However, they represent a wide range of trust assumptions, which makes assets—or more abstractly, messages—from these L2s non-fungible and thus significantly hampers interoperability. We are advocating for Ethereum to deploy a large number of native L2s, developed and governed by Ethereum's open-source developers. These L2s would be highly interoperable with L1, fulfilling Ethereum's early promise to provide sharding using L2 technology.", + "track": "Layer 2", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Design Thinking", - "Ethereum for Good", - "Public good" + "Cross-L2", + "Ethereum Roadmap", + "Scalability" ], "keywords": [ - "Education" + "interoperability" ], - "duration": 457, + "duration": 1619, "language": "en", "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_youtubeId": "QGelE6UjHEw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736df2a1b0f83434d8917c4", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6736e7311b0f83434d194833", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736e7311b0f83434d194833.vtt", + "transcript_text": " The controversial presentation and I kind of expect to make well kind of everyone angry because I'm criticizing to some extent the existing L2s and to some extent the existing Ethereum core roadmap. But I think it needs to be done. So in this role, I'm founder of Gnosis, but in this capacity I'm just speaking as someone who has built for over 10 years now on Ethereum. So we started building on Ethereum when it was still a testnet, deployed one of the first apps a couple of weeks after Ethereum was released, and have continuously been builders in the space. So, let me make a few comments on the previous presentation. I have the absolutely highest respect for Jesse, for what BASE is doing, what Coinbase is doing in general, but I do think the claim to say we are bringing people to Ethereum Coinbase is doing in general. But I do think the claim to say we are bringing people to Ethereum, we are bringing the next billion people to Ethereum, is wrong. We are bringing the next billion people to base. And that is a significant difference. So just a few examples. So Jesse was also talking about the 30% fee that centralized platforms can charge from you, of course, referring to Apple, that is something, for example, Base can absolutely do as well. So if you are an app on Base, it's absolutely in their hands to control how much fees you're paying for a transaction. And of course, they can start charging a 30% cut. And this is typically something that doesn't happen early on. That happens much later. Once a platform is established, a lot of kind of lock-in effects happens. And it often even doesn't happen with the first generation. So ultimately, Coinbase is a regular company and yeah, they are great people. I absolutely, again, have highest respect for Jesse and Brian. But they are employees at the company and at some point they might leave. They might retire. They might do something else. And then another set of people comes. And ultimately, it's a company controlled by shareholders. There's a fiduciary duty to maximize shareholder value. So, oh, we have this asset base. Oh, we can spike up fees. We can take more fees from those applications. And in a way, it's even our duty to do it because we have to maximize value for our shareholders. So my claim is bringing people to those L2 ecosystems is quite different from being on Ethereum. The claim, of course, is always rollups inherit the security of Ethereum. And we see labels like it's secured by Ethereum, you kind of get what you get that you would get from Ethereum. But the reality is absolutely none of those that top L2s actually, yeah, kind of truly inherits the security of Ethereum. So if you look into detail, funds can be stolen here, funds can be stolen here, funds can be stolen here, funds can be stolen here, funds can be stolen. And then on top of that, now we are saying, okay, let's do chain abstraction. So let's make it invisible to the user on what chain they actually are. But in a world where the underlying security is quite, well, different and opaque, chain abstraction actually means, in that case, risk abstraction. There was a nice talk two days ago. So if you're abstracting the chain, you're abstracting the risk. And that's, well, that's dangerous. So at some point, funds might be lost. And then you can no longer abstract the chain. And you need to tell the user, oh, sorry, your funds are lost. But there are even more risks that even L2 beat does not cover. Yesterday there was a great presentation by James showing and again just referring to the presentation showing that on any chain with a centralized sequencer that's again for example BASIC, arbitrage optimism, if you have funds as a user on a with a centralized sequencer that's again, for example, based up from optimism. If you have funds as a user on a money market like Aave and Compound, the sequencer can indefinitely and at zero cost prevent you from accessing your funds, from withdrawing your funds. And you might think, yeah, but you can force include a transaction via L1. No, that won't help. So roughly the idea is you create this withdraw request, and because the sequencer ultimately can control the exact transaction ordering, they can alter the state of that money market so they can just, for a sub-second, borrow too many funds out of that money market so that your request to get funds out or your own funds out fails and then kind of directly after you repay, so they pay zero interest because it's the same time, it's in the same time stamp. So just kind of those kind of things, yeah, in my view, again, truly show if you build on one of those two systems, you're far from inheriting or getting the security that Ethereum promises. So what does Ethereum security actually mean? It means things, it's a culture, it's a process, it's much more than code. So Ethereum security means public core developer calls, public discussions, multiple client implementations, rigorous testing, highest level of promise of backwards compatibility. And kind of those things cannot be derived or exported automatically to an L2. Just because you kind of deploy some code on Ethereum, that process and that culture cannot be derived. Of course, there can be efforts to kind of promote that culture. And to some extent, Ethereum is doing that. And kind of Talik is going around and saying, yeah, if you don't do those things, then I won't call you an L2 anymore. So, yeah, that culture can try to promote this culture. And L2 is, of course, doing a great job in kind of trying to export that culture. But again, it cannot be derived with code. So looking at another data point, again, the claim assets are secured by Ethereum, but the reality is only, or it's the part that is actually of assets that is actually coming from Ethereum is getting less or the fraction is getting smaller and smaller. So most assets, again, this is for example base, but it's very similar on other chains and on smaller chains, it's even much worse. Most assets are actually not coming from Ethereum. So this is here the canonical, the, oh yeah, so back here in time, the canonical bridge was still the dominant. So that is kind of the one, the assets that are actually kind of secured by Ethereum. Now the dominant part are native assets. So those are assets issued on the L2 or on that other chain directly. And they do not inherit, there's not a concept of exiting those to Ethereum. And there are reasons for that. So if you use the canonical bridge and you want to derive that security and you currently want to say, let's say, send a message from Arbitrum to Optimism and get a message back, that transfer takes two weeks. So if you actually use the canonical bridge, it would take you two weeks to do that so a pigeon is faster in transporting a message for most parts of the world so yes, no one is using that mechanism instead we are using new kind of bridges or external bridges that don't inherit the security of Ethereum so if most assets are not natively bridged, and sequencing is also not done by Ethereum, so assets not bridged means from Ethereum not secured by Ethereum, sequencing not done by Ethereum, then the role of Ethereum kind of is reduced to being kind of this checkpointing systems. So here again rollups could choose to how they build blocks and they can either choose to use their own sequencing which means they can do super fast confirmations, they capture their own MEV. But the disadvantage is that they can only not synchronously read into Ethereum. And that is the rational choice to do if you optimize for connectiveness to other chains or for yeah for for TreadFi or you can go kind of what's called based where you let Ethereum do the block building and the sequencing and here you are really optimizing for connectiveness to Ethereum but that's only rational if it's more important for you to be as closely connected to the kind of economic zone that Ethereum is. But the reality is only 1% of value is choosing to do that. So here's my proposal for Ethereum to fix that and to address those issues. And the proposal is that Ethereum itself should develop and deploy ZK proven EVMs, rollups, and deploy 128 equal instances of that that are highly interoperable. And that are truly kind of, if you build on those, you truly build on Ethereum. So what would that mean, being built by Ethereum? It would mean that you don't even think about introducing a multistick. That would be unthinkable that Ethereum deploys something and it has some multistick as the upgrade mechanism. It would mean that we at least have, well, we want to have multi-clients, so we would have at least two independent implementations of, let's say, the proof systems. That's the most important part here. Rigorous testing, thousands of eyes that look at the stuff and actually care about all the details that are often pulled under the rug. So the idea is, yes, we can still have L2s, a wide range of designs. Within that we have based L2s, so L2s that use Ethereum for block production and for sequencing slash block production. But even a subset of those would be native L2s. So truly built by Ethereum, governed by Ethereum. And again, we can here see some things. So reads into L2 are synchronous here. But of course, also the economic perspective is important. So only in the native roll-up, essentially all value of that roll-up is captured by Ethereum itself. Now, one question could be, is that actually sharding what I'm proposing here? No, not exactly. So sharding would be a design where you would have, in a way, multiple L2 instances. And that was kind of still the promise a couple of years ago that we would have 1024 shards. So essentially instances of Ethereum L1. And they would all be kind of live on the same layer. What I'm proposing is not that. But instead, but still something that comes, tries to come as close as possible. Have the L1 and then have those 128 equivalent L2s. But still in this hierarchy. And looking more at it, yeah, some things. So on an L1, you can do synchronous reads and writes, and that's what we all love about Ethereum, this composability. The composability within this larger system would still be pretty strong. So from an L2, the idea is you can still do synchronous reads into Ethereum. So meaning you can execute some code on the L2. You can actually read a contract on L1 in that execution. So let's say in price oracle or anything that lives on Ethereum. And you can kind of even read the function, continue with that result in the L2 process and do things. So kind of the L2 can do things dependent on the state of the L1 so it has immediate transparent access. The other way around, from the L1, you cannot read immediately the state of the L2s. That is asynchronous reads. But writes are actually synchronous. So if you can very well do a transaction on L1 that in the same transaction, so atomically also creates a write into the L2 and affect the state, you will just not in the context of L1 be able to immediately get kind of the result of that state change. So again, the read will be asynchronous. Same in communication between L2s. So you can synchronously make a transaction that affects the state of two L2s, but again, you cannot read from one L2 to the other. So the general idea is stuff that everyone should be able to read, a kind of contentious state might live on the L1, and on all the L2s, they can access the state synchronously, but there can be much more non-contentious state. So all the things I have proposed so far would be possible today on Ethereum without any kind of upgrades or changes to Ethereum. It would essentially just take the Ethereum community to decide to build that. Of course, if we do that, we can actually make those L2s even more powerful. So two things I'm proposing here that can be done. If we make those L2 native, we can make them also native to the economics of Ethereum. So Ethereum, of course, let's say issues ESERV to reward validators to participate in the consensus. In the same way, it could also kind of redirect or direct this issuance towards proving the correctness of all those L2s. Another one is that I'm suggesting that those L2s should have distinct namespaces, so meaning an address should be clearly attributable to one L2 or to one kind of in a way chain, either L1 and L2. And in my view, that is a big problem that right now you have an address and the same address on various chains can mean very different things. So, for example, if you have a safe, then actually the same address can exist on multiple chains, but can have completely different things. So for example, if you have a safe, then actually the same address can exist on multiple chains, but it can have completely different owners. So what I am suggesting here is that we shouldn't have this address collusion. So each L2 kind of uses an additional salt to have its own address space. And if we have that, we could allow sending a message from the L2 with its unique address into the L1 and actually have the message sender on the transaction be that unique address that only exists on the L1. So that allows you to do things like, let's say, you hold a token on L1 by an address or a contract that only lives on an L2. But yeah, some technical details. So here's kind of the spiral I expect or I see already happening if we don't go this route. So if we don't go this route, it means the economic zone that is Ethereum and Ethereum block building becomes less relevant. And therefore it already makes it less attractive to use Ethereum as a block builder or as a sequencer. And again, we already see this today. 99% of the economic value of L2s choose not to be based and not to use Ethereum as a block builder. At the same time, more and more native assets are not coming from Ethereum and are not really secured by Ethereum. Either they are natively issued or they use external bridges. So for those, the economic security of Ethereum also matters less and less. So in total, that means that the relationship between roll-ups and Ethereum really becomes weaker and weaker. And yeah, I'm going so far and say it just becomes a meme. So now, so essentially in my view, that's the crossroad we are in front of. With native roll-ups, we can kind of continue to have Ethereum the most relevant or economic zone. I would claim that should be the goal, to be the most relevant economic zone even in the world. That's a place where prices are... There's also the other perspective. Some believe Ethereum should be a meme or is a meme. And they promote this idea of, yeah, let's say Ether is money. And their perspective might be, if we do native roll-ups, we kind of damage the meme. We damage our collective maybe religion or kind of, and because we create tension within this nice family that we all are. So we disenfranchise those roll-ups so therefore the ETH meme gets weaker and all that economic stuff doesn't really matter. It's just about saying ETH is money and spreading ETH and therefore it's better to not do native roll rollups. All right. So final slide. For whom is it to decide what route we go? Absolutely. For all of us that are here, for everyone who is Ethereum and we are all Ethereum it's not just let's say some core developers or the Ethereum foundation essentially it is yeah Ethereum and anyone can essentially affect changes to Ethereum thank you very much applause applause applause thank you very much. Thank you so much. And while we're still on that note, we have some questions from everyone here. And the first one is, there are 128 identical rollups. Which one should I deploy my DAP on? Yeah. So, I mean, the first important part would be that right now, if you wanted to deploy somewhere right now, well, you either use L1 Ethereum or you have to choose one of those many ecosystems, be it Superchain or AgLayer or something. And this would give you the opportunity to actually just say, okay, I want to be on Ethereum. Now, if you have to choose a particular one, then again, reads and writes within a particular L2 are synchronous. So if there are other applications you want to regularly interact with, then yes, it would be wise to choose that one. If you're fine to say okay, I just need to access, I want this close access to the L1, they are all equally close to the L1, then you really should just choose the rollup that is used the least because that will be the cheapest. Why not implement this on Gnosis? Yeah, I mean, maybe, but I think really, I mean, well, Ethereum is the big thing. So Ethereum has that credible chance to be this most important economic zone. As much as I would like Gnosis to become that, we are not there. I mean, Ethereum is 100 times bigger. And again, I mean, saying that again, so there are all those ecosystems, and Gnosis is one of them, that also tries to do valuable things. And again, I think that's totally valid. I'm just saying, I'm not saying if you come to Gnosis, you'll come to Ethereum. No, you come to Gnosis. And we try to do valuable things there and in the same way I would say other ecosystems again, they are great ecosystems and they bring great value but you should understand that you are building on that ecosystem and not necessarily on Ethereum. Alright, why 128? Why not start with 4 or 16? Yeah, I mean, the idea is really to make it clear that building on Ethereum or building here is long-term viable and that, of addresses costs so I do think, well I absolutely also want to share the vision to bring a billion people on chain and for that Ethereum needs to be much more ambitious and I think 100x increase in effective block space is really what should be aimed for on a timeline for let's say two years, I would say it could be realistic for that and not the 4X or something like that. Alright, the next one wasn't necessarily a question, it was more a compliment. They just agree completely and they're thanking you for stating the hard truth. But then we've got, do you consider this as rugging existing roll-ups? Yeah, I mean, well, rugging, yeah. So I think existing roll-ups, well, already have big time advantage. They are live. They are already built. So that will take at least, again, I think, let's say another two years. But then, yes, if existing roll-ups will not offer anything other than kind of just provide EVM block space, then they will have a strong competition. But I do absolutely think that all those existing roll-ups are great people, innovative people and I think they will be able to kind of evolve and create something or essentially have additional innovation on top and if they do that then yeah they can just be much more kind of basic EVM block space as we know and love it since 10 years. That's what I'm proposing here, to just create much more of that. And then there can be those L2s that are more innovative, that do additional things, that do cutting edge, maybe built-in privacy, maybe some form of other sequencing that is much faster. There are all kind of ways to innovate. So I think, yeah, again, both absolutely can live. All right. Unfortunately, we are out of time. Thank you, everyone, for your questions. And thank you once again, Martin, for your phenomenal talk.", "eventId": "devcon-7", - "slot_start": 1731559200000, - "slot_end": 1731559800000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1clRG027QMaA-_D-yds9TfGuZXmzRy5tpHKs67z97Mqw", - "resources_slides": null, + "slot_start": 1731643200000, + "slot_end": 1731645000000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1kNj2hbZYPECNuJmWk7WXk0CzL745n9QV5DwtBh6rF6A", + "resources_slides": "https://drive.google.com/file/d/17IT9Dzi9Fwk0w29j6oQ23UQVnm6yyqDF/view", "speakers": [ - "juan-david-reyes", - "romina-sejas" + "koeppelmann" ] }, "vector": [ @@ -312185,12 +311303,8 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -312358,6 +311472,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -312507,8 +311622,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -313039,7 +312152,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -313056,7 +312168,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -313126,6 +312237,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -313133,6 +312245,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -313493,10 +312606,11 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -313506,7 +312620,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -313515,41 +312628,48 @@ }, { "session": { - "id": "ethereum-needs-native-l2", - "sourceId": "9RNWDX", - "title": "Ethereum needs native L2", - "description": "Right now, L2beat tracks 116 L2s. However, they represent a wide range of trust assumptions, which makes assets—or more abstractly, messages—from these L2s non-fungible and thus significantly hampers interoperability. We are advocating for Ethereum to deploy a large number of native L2s, developed and governed by Ethereum's open-source developers. These L2s would be highly interoperable with L1, fulfilling Ethereum's early promise to provide sharding using L2 technology.", - "track": "Layer 2", - "type": "Talk", - "expertise": "Intermediate", + "id": "ethereum-real-world-economy", + "sourceId": "JSYMFD", + "title": "Ethereum Real World Economy", + "description": "Ethereum’s role as universal settlement layer is growing fast. Tradfi companies like Stripe are building on-chain, while native projects like Polymarket are increasingly impactful in the real world.\r\n\r\nThis panel will debate the future of “Real-World Ethereum”. What does that mean? How do we maximize growth opportunities while avoiding capture? What can we learn from history? How do we best compete, and how do we ensure Ethereum values as we power more and more of the world outside crypto?", + "track": "Real World Ethereum", + "type": "Panel", + "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Cross-L2", "Ethereum Roadmap", - "Scalability" + "Use Cases", + "e/acc", + "case", + "use", + "e/acc", + "Ethereum Roadmap", + "Use Cases" ], "keywords": [ - "interoperability" + "stablecoins", + "real-world-use", + "use-cases" ], - "duration": 1619, + "duration": 3314, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "0744b811712e4fc06596318988aea1107a479258a4a14e81c272d0eefccfc715", + "sources_youtubeId": "3A0b9y7OmUI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736e7311b0f83434d194833", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736e7311b0f83434d194833.vtt", - "transcript_text": " The controversial presentation and I kind of expect to make well kind of everyone angry because I'm criticizing to some extent the existing L2s and to some extent the existing Ethereum core roadmap. But I think it needs to be done. So in this role, I'm founder of Gnosis, but in this capacity I'm just speaking as someone who has built for over 10 years now on Ethereum. So we started building on Ethereum when it was still a testnet, deployed one of the first apps a couple of weeks after Ethereum was released, and have continuously been builders in the space. So, let me make a few comments on the previous presentation. I have the absolutely highest respect for Jesse, for what BASE is doing, what Coinbase is doing in general, but I do think the claim to say we are bringing people to Ethereum Coinbase is doing in general. But I do think the claim to say we are bringing people to Ethereum, we are bringing the next billion people to Ethereum, is wrong. We are bringing the next billion people to base. And that is a significant difference. So just a few examples. So Jesse was also talking about the 30% fee that centralized platforms can charge from you, of course, referring to Apple, that is something, for example, Base can absolutely do as well. So if you are an app on Base, it's absolutely in their hands to control how much fees you're paying for a transaction. And of course, they can start charging a 30% cut. And this is typically something that doesn't happen early on. That happens much later. Once a platform is established, a lot of kind of lock-in effects happens. And it often even doesn't happen with the first generation. So ultimately, Coinbase is a regular company and yeah, they are great people. I absolutely, again, have highest respect for Jesse and Brian. But they are employees at the company and at some point they might leave. They might retire. They might do something else. And then another set of people comes. And ultimately, it's a company controlled by shareholders. There's a fiduciary duty to maximize shareholder value. So, oh, we have this asset base. Oh, we can spike up fees. We can take more fees from those applications. And in a way, it's even our duty to do it because we have to maximize value for our shareholders. So my claim is bringing people to those L2 ecosystems is quite different from being on Ethereum. The claim, of course, is always rollups inherit the security of Ethereum. And we see labels like it's secured by Ethereum, you kind of get what you get that you would get from Ethereum. But the reality is absolutely none of those that top L2s actually, yeah, kind of truly inherits the security of Ethereum. So if you look into detail, funds can be stolen here, funds can be stolen here, funds can be stolen here, funds can be stolen here, funds can be stolen. And then on top of that, now we are saying, okay, let's do chain abstraction. So let's make it invisible to the user on what chain they actually are. But in a world where the underlying security is quite, well, different and opaque, chain abstraction actually means, in that case, risk abstraction. There was a nice talk two days ago. So if you're abstracting the chain, you're abstracting the risk. And that's, well, that's dangerous. So at some point, funds might be lost. And then you can no longer abstract the chain. And you need to tell the user, oh, sorry, your funds are lost. But there are even more risks that even L2 beat does not cover. Yesterday there was a great presentation by James showing and again just referring to the presentation showing that on any chain with a centralized sequencer that's again for example BASIC, arbitrage optimism, if you have funds as a user on a with a centralized sequencer that's again, for example, based up from optimism. If you have funds as a user on a money market like Aave and Compound, the sequencer can indefinitely and at zero cost prevent you from accessing your funds, from withdrawing your funds. And you might think, yeah, but you can force include a transaction via L1. No, that won't help. So roughly the idea is you create this withdraw request, and because the sequencer ultimately can control the exact transaction ordering, they can alter the state of that money market so they can just, for a sub-second, borrow too many funds out of that money market so that your request to get funds out or your own funds out fails and then kind of directly after you repay, so they pay zero interest because it's the same time, it's in the same time stamp. So just kind of those kind of things, yeah, in my view, again, truly show if you build on one of those two systems, you're far from inheriting or getting the security that Ethereum promises. So what does Ethereum security actually mean? It means things, it's a culture, it's a process, it's much more than code. So Ethereum security means public core developer calls, public discussions, multiple client implementations, rigorous testing, highest level of promise of backwards compatibility. And kind of those things cannot be derived or exported automatically to an L2. Just because you kind of deploy some code on Ethereum, that process and that culture cannot be derived. Of course, there can be efforts to kind of promote that culture. And to some extent, Ethereum is doing that. And kind of Talik is going around and saying, yeah, if you don't do those things, then I won't call you an L2 anymore. So, yeah, that culture can try to promote this culture. And L2 is, of course, doing a great job in kind of trying to export that culture. But again, it cannot be derived with code. So looking at another data point, again, the claim assets are secured by Ethereum, but the reality is only, or it's the part that is actually of assets that is actually coming from Ethereum is getting less or the fraction is getting smaller and smaller. So most assets, again, this is for example base, but it's very similar on other chains and on smaller chains, it's even much worse. Most assets are actually not coming from Ethereum. So this is here the canonical, the, oh yeah, so back here in time, the canonical bridge was still the dominant. So that is kind of the one, the assets that are actually kind of secured by Ethereum. Now the dominant part are native assets. So those are assets issued on the L2 or on that other chain directly. And they do not inherit, there's not a concept of exiting those to Ethereum. And there are reasons for that. So if you use the canonical bridge and you want to derive that security and you currently want to say, let's say, send a message from Arbitrum to Optimism and get a message back, that transfer takes two weeks. So if you actually use the canonical bridge, it would take you two weeks to do that so a pigeon is faster in transporting a message for most parts of the world so yes, no one is using that mechanism instead we are using new kind of bridges or external bridges that don't inherit the security of Ethereum so if most assets are not natively bridged, and sequencing is also not done by Ethereum, so assets not bridged means from Ethereum not secured by Ethereum, sequencing not done by Ethereum, then the role of Ethereum kind of is reduced to being kind of this checkpointing systems. So here again rollups could choose to how they build blocks and they can either choose to use their own sequencing which means they can do super fast confirmations, they capture their own MEV. But the disadvantage is that they can only not synchronously read into Ethereum. And that is the rational choice to do if you optimize for connectiveness to other chains or for yeah for for TreadFi or you can go kind of what's called based where you let Ethereum do the block building and the sequencing and here you are really optimizing for connectiveness to Ethereum but that's only rational if it's more important for you to be as closely connected to the kind of economic zone that Ethereum is. But the reality is only 1% of value is choosing to do that. So here's my proposal for Ethereum to fix that and to address those issues. And the proposal is that Ethereum itself should develop and deploy ZK proven EVMs, rollups, and deploy 128 equal instances of that that are highly interoperable. And that are truly kind of, if you build on those, you truly build on Ethereum. So what would that mean, being built by Ethereum? It would mean that you don't even think about introducing a multistick. That would be unthinkable that Ethereum deploys something and it has some multistick as the upgrade mechanism. It would mean that we at least have, well, we want to have multi-clients, so we would have at least two independent implementations of, let's say, the proof systems. That's the most important part here. Rigorous testing, thousands of eyes that look at the stuff and actually care about all the details that are often pulled under the rug. So the idea is, yes, we can still have L2s, a wide range of designs. Within that we have based L2s, so L2s that use Ethereum for block production and for sequencing slash block production. But even a subset of those would be native L2s. So truly built by Ethereum, governed by Ethereum. And again, we can here see some things. So reads into L2 are synchronous here. But of course, also the economic perspective is important. So only in the native roll-up, essentially all value of that roll-up is captured by Ethereum itself. Now, one question could be, is that actually sharding what I'm proposing here? No, not exactly. So sharding would be a design where you would have, in a way, multiple L2 instances. And that was kind of still the promise a couple of years ago that we would have 1024 shards. So essentially instances of Ethereum L1. And they would all be kind of live on the same layer. What I'm proposing is not that. But instead, but still something that comes, tries to come as close as possible. Have the L1 and then have those 128 equivalent L2s. But still in this hierarchy. And looking more at it, yeah, some things. So on an L1, you can do synchronous reads and writes, and that's what we all love about Ethereum, this composability. The composability within this larger system would still be pretty strong. So from an L2, the idea is you can still do synchronous reads into Ethereum. So meaning you can execute some code on the L2. You can actually read a contract on L1 in that execution. So let's say in price oracle or anything that lives on Ethereum. And you can kind of even read the function, continue with that result in the L2 process and do things. So kind of the L2 can do things dependent on the state of the L1 so it has immediate transparent access. The other way around, from the L1, you cannot read immediately the state of the L2s. That is asynchronous reads. But writes are actually synchronous. So if you can very well do a transaction on L1 that in the same transaction, so atomically also creates a write into the L2 and affect the state, you will just not in the context of L1 be able to immediately get kind of the result of that state change. So again, the read will be asynchronous. Same in communication between L2s. So you can synchronously make a transaction that affects the state of two L2s, but again, you cannot read from one L2 to the other. So the general idea is stuff that everyone should be able to read, a kind of contentious state might live on the L1, and on all the L2s, they can access the state synchronously, but there can be much more non-contentious state. So all the things I have proposed so far would be possible today on Ethereum without any kind of upgrades or changes to Ethereum. It would essentially just take the Ethereum community to decide to build that. Of course, if we do that, we can actually make those L2s even more powerful. So two things I'm proposing here that can be done. If we make those L2 native, we can make them also native to the economics of Ethereum. So Ethereum, of course, let's say issues ESERV to reward validators to participate in the consensus. In the same way, it could also kind of redirect or direct this issuance towards proving the correctness of all those L2s. Another one is that I'm suggesting that those L2s should have distinct namespaces, so meaning an address should be clearly attributable to one L2 or to one kind of in a way chain, either L1 and L2. And in my view, that is a big problem that right now you have an address and the same address on various chains can mean very different things. So, for example, if you have a safe, then actually the same address can exist on multiple chains, but can have completely different things. So for example, if you have a safe, then actually the same address can exist on multiple chains, but it can have completely different owners. So what I am suggesting here is that we shouldn't have this address collusion. So each L2 kind of uses an additional salt to have its own address space. And if we have that, we could allow sending a message from the L2 with its unique address into the L1 and actually have the message sender on the transaction be that unique address that only exists on the L1. So that allows you to do things like, let's say, you hold a token on L1 by an address or a contract that only lives on an L2. But yeah, some technical details. So here's kind of the spiral I expect or I see already happening if we don't go this route. So if we don't go this route, it means the economic zone that is Ethereum and Ethereum block building becomes less relevant. And therefore it already makes it less attractive to use Ethereum as a block builder or as a sequencer. And again, we already see this today. 99% of the economic value of L2s choose not to be based and not to use Ethereum as a block builder. At the same time, more and more native assets are not coming from Ethereum and are not really secured by Ethereum. Either they are natively issued or they use external bridges. So for those, the economic security of Ethereum also matters less and less. So in total, that means that the relationship between roll-ups and Ethereum really becomes weaker and weaker. And yeah, I'm going so far and say it just becomes a meme. So now, so essentially in my view, that's the crossroad we are in front of. With native roll-ups, we can kind of continue to have Ethereum the most relevant or economic zone. I would claim that should be the goal, to be the most relevant economic zone even in the world. That's a place where prices are... There's also the other perspective. Some believe Ethereum should be a meme or is a meme. And they promote this idea of, yeah, let's say Ether is money. And their perspective might be, if we do native roll-ups, we kind of damage the meme. We damage our collective maybe religion or kind of, and because we create tension within this nice family that we all are. So we disenfranchise those roll-ups so therefore the ETH meme gets weaker and all that economic stuff doesn't really matter. It's just about saying ETH is money and spreading ETH and therefore it's better to not do native roll rollups. All right. So final slide. For whom is it to decide what route we go? Absolutely. For all of us that are here, for everyone who is Ethereum and we are all Ethereum it's not just let's say some core developers or the Ethereum foundation essentially it is yeah Ethereum and anyone can essentially affect changes to Ethereum thank you very much applause applause applause thank you very much. Thank you so much. And while we're still on that note, we have some questions from everyone here. And the first one is, there are 128 identical rollups. Which one should I deploy my DAP on? Yeah. So, I mean, the first important part would be that right now, if you wanted to deploy somewhere right now, well, you either use L1 Ethereum or you have to choose one of those many ecosystems, be it Superchain or AgLayer or something. And this would give you the opportunity to actually just say, okay, I want to be on Ethereum. Now, if you have to choose a particular one, then again, reads and writes within a particular L2 are synchronous. So if there are other applications you want to regularly interact with, then yes, it would be wise to choose that one. If you're fine to say okay, I just need to access, I want this close access to the L1, they are all equally close to the L1, then you really should just choose the rollup that is used the least because that will be the cheapest. Why not implement this on Gnosis? Yeah, I mean, maybe, but I think really, I mean, well, Ethereum is the big thing. So Ethereum has that credible chance to be this most important economic zone. As much as I would like Gnosis to become that, we are not there. I mean, Ethereum is 100 times bigger. And again, I mean, saying that again, so there are all those ecosystems, and Gnosis is one of them, that also tries to do valuable things. And again, I think that's totally valid. I'm just saying, I'm not saying if you come to Gnosis, you'll come to Ethereum. No, you come to Gnosis. And we try to do valuable things there and in the same way I would say other ecosystems again, they are great ecosystems and they bring great value but you should understand that you are building on that ecosystem and not necessarily on Ethereum. Alright, why 128? Why not start with 4 or 16? Yeah, I mean, the idea is really to make it clear that building on Ethereum or building here is long-term viable and that, of addresses costs so I do think, well I absolutely also want to share the vision to bring a billion people on chain and for that Ethereum needs to be much more ambitious and I think 100x increase in effective block space is really what should be aimed for on a timeline for let's say two years, I would say it could be realistic for that and not the 4X or something like that. Alright, the next one wasn't necessarily a question, it was more a compliment. They just agree completely and they're thanking you for stating the hard truth. But then we've got, do you consider this as rugging existing roll-ups? Yeah, I mean, well, rugging, yeah. So I think existing roll-ups, well, already have big time advantage. They are live. They are already built. So that will take at least, again, I think, let's say another two years. But then, yes, if existing roll-ups will not offer anything other than kind of just provide EVM block space, then they will have a strong competition. But I do absolutely think that all those existing roll-ups are great people, innovative people and I think they will be able to kind of evolve and create something or essentially have additional innovation on top and if they do that then yeah they can just be much more kind of basic EVM block space as we know and love it since 10 years. That's what I'm proposing here, to just create much more of that. And then there can be those L2s that are more innovative, that do additional things, that do cutting edge, maybe built-in privacy, maybe some form of other sequencing that is much faster. There are all kind of ways to innovate. So I think, yeah, again, both absolutely can live. All right. Unfortunately, we are out of time. Thank you, everyone, for your questions. And thank you once again, Martin, for your phenomenal talk.", + "sources_streamethId": "6736bb429dbb7a90e12ebbde", "eventId": "devcon-7", - "slot_start": 1731643200000, - "slot_end": 1731645000000, + "slot_start": 1731571200000, + "slot_end": 1731574800000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1kNj2hbZYPECNuJmWk7WXk0CzL745n9QV5DwtBh6rF6A", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1UVP1zLQ1cszDLmjMKl61KN2rP616jJTze1YhwSPWhms", + "resources_slides": "https://drive.google.com/file/d/1LhECBOcENoagT8fgXyxbFrerlrSlKZd9/view", "speakers": [ - "koeppelmann" + "tim-beiko", + "dc-posch", + "liam-horne" ] }, "vector": [ @@ -313559,7 +312679,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -313699,6 +312818,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -313728,12 +312848,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -313745,6 +312859,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -313887,6 +313002,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -314384,6 +313500,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -314444,7 +313561,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -314461,6 +313577,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -314504,7 +313621,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -314621,6 +313737,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -314869,8 +313986,6 @@ 0, 2, 0, - 0, - 0, 2, 0, 0, @@ -314889,48 +314004,37 @@ }, { "session": { - "id": "ethereum-real-world-economy", - "sourceId": "JSYMFD", - "title": "Ethereum Real World Economy", - "description": "Ethereum’s role as universal settlement layer is growing fast. Tradfi companies like Stripe are building on-chain, while native projects like Polymarket are increasingly impactful in the real world.\r\n\r\nThis panel will debate the future of “Real-World Ethereum”. What does that mean? How do we maximize growth opportunities while avoiding capture? What can we learn from history? How do we best compete, and how do we ensure Ethereum values as we power more and more of the world outside crypto?", + "id": "ethereums-ultimate-gift-will-be-birthing-digital-matter", + "sourceId": "XSCFZR", + "title": "Ethereum's Ultimate Gift Will Be Birthing Digital Matter", + "description": "Bitcoin created Digital Gold, intangible yet valued like real gold. Ethereum will birth Digital Worlds which culture will treat as real. Unlike Bitcoin's scarce digital coins and tamper-proof IOUs, these worlds will have scarce digital matter and tamper-proof physics. Within them, inhabitants will use primitives like smart items to build economies and civilizations with society-shifting GDPs.", "track": "Real World Ethereum", - "type": "Panel", + "type": "Talk", "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Ethereum Roadmap", - "Use Cases", - "e/acc", - "case", - "use", - "e/acc", - "Ethereum Roadmap", + "Autonomous World", + "Gaming", "Use Cases" ], - "keywords": [ - "stablecoins", - "real-world-use", - "use-cases" - ], - "duration": 3314, + "keywords": [], + "duration": 1025, "language": "en", - "sources_swarmHash": "0744b811712e4fc06596318988aea1107a479258a4a14e81c272d0eefccfc715", - "sources_youtubeId": "3A0b9y7OmUI", + "sources_swarmHash": "b2b83d28ec83257b3cf45d5993e985e81d999f07117256d8a488907ff1599d39", + "sources_youtubeId": "ZwkGlbjT1SQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736bb429dbb7a90e12ebbde", + "sources_streamethId": "6734901b9dbb7a90e1916dd8", "eventId": "devcon-7", - "slot_start": 1731571200000, - "slot_end": 1731574800000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1UVP1zLQ1cszDLmjMKl61KN2rP616jJTze1YhwSPWhms", - "resources_slides": null, + "slot_start": 1731494400000, + "slot_end": 1731496200000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/15oxvM3TxOCUK4NDmYqvX1h3RKEylrnyt66ZdyLe_RR0", + "resources_slides": "https://drive.google.com/file/d/1AzSsLIGm8Zem7ED3gLXlLUIeifzZ7Dcy/view", "speakers": [ - "tim-beiko", - "dc-posch", - "liam-horne" + "dhrumil-shah" ] }, "vector": [ @@ -315011,6 +314115,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -315077,8 +314182,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -315120,7 +314223,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -315264,7 +314366,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -315795,6 +314896,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -315841,7 +314944,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -315877,7 +314979,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -316002,8 +315103,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -316270,37 +315369,49 @@ }, { "session": { - "id": "ethereums-ultimate-gift-will-be-birthing-digital-matter", - "sourceId": "XSCFZR", - "title": "Ethereum's Ultimate Gift Will Be Birthing Digital Matter", - "description": "Bitcoin created Digital Gold, intangible yet valued like real gold. Ethereum will birth Digital Worlds which culture will treat as real. Unlike Bitcoin's scarce digital coins and tamper-proof IOUs, these worlds will have scarce digital matter and tamper-proof physics. Within them, inhabitants will use primitives like smart items to build economies and civilizations with society-shifting GDPs.", - "track": "Real World Ethereum", - "type": "Talk", - "expertise": "Beginner", - "audience": "Engineering", + "id": "ethereums-values-and-ethos-alignment-pre-merge-to-now", + "sourceId": "UHAESN", + "title": "Ethereum's Values and Ethos Alignment: Pre-Merge to Now", + "description": "If you ask Ethereans to describe \"What is Ethereum?\" in 1 sentence, what would it be? Likely, you will get many different answers depending on who you're speaking to. Some visions have changed over time and some stayed true to the cypherpunk values such as decentralization, trustlessness & censorship-resistance. Or is it more important for us to focus on DA & scalability at L1? What should L1 actually be responsible for? Is local block building dead? Are timing games bad? What do we value today?", + "track": "Cypherpunk & Privacy", + "type": "Panel", + "expertise": "Intermediate", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Autonomous World", - "Gaming", - "Use Cases" + "Layer 1", + "Ethereum Roadmap", + "Coordination", + "alignment", + "Coordination", + "Ethereum Roadmap", + "Layer 1" ], - "keywords": [], - "duration": 1025, + "keywords": [ + "ethos", + "values", + "alignment" + ], + "duration": 3274, "language": "en", - "sources_swarmHash": "b2b83d28ec83257b3cf45d5993e985e81d999f07117256d8a488907ff1599d39", - "sources_youtubeId": "ZwkGlbjT1SQ", + "sources_swarmHash": "48570ff43a87a9358ca409423d50e18a75c02e7c2ac81f5234dbbc3fa5501617", + "sources_youtubeId": "0-plV0zJbxs", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734901b9dbb7a90e1916dd8", + "sources_streamethId": "6735a04c9dbb7a90e1a35460", "eventId": "devcon-7", - "slot_start": 1731494400000, - "slot_end": 1731496200000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/15oxvM3TxOCUK4NDmYqvX1h3RKEylrnyt66ZdyLe_RR0", - "resources_slides": null, + "slot_start": 1731564000000, + "slot_end": 1731567600000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1pDeSitEvmVhEFya_w3q8q2Uq4_YVvfaQsg5BA5nTUaI", + "resources_slides": "https://drive.google.com/file/d/1hJkWPBPStt_lVLOdvAn_9M752WGCqWgx/view", "speakers": [ - "dhrumil-shah" + "peter-szilagyi", + "ahmad-bitar", + "phil-ngo", + "nixo", + "mark-tyneway" ] }, "vector": [ @@ -316309,7 +315420,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -316356,6 +315466,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -316381,11 +315492,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -316477,6 +315583,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -316572,6 +315679,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -316637,6 +315745,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -317068,6 +316178,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -317134,7 +316245,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -317165,8 +316275,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -317210,6 +316318,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -317246,6 +316355,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -317370,6 +316480,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -317613,6 +316724,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -317620,7 +316732,6 @@ 0, 0, 0, - 2, 0, 2, 0, @@ -317630,64 +316741,54 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "ethereums-values-and-ethos-alignment-pre-merge-to-now", - "sourceId": "UHAESN", - "title": "Ethereum's Values and Ethos Alignment: Pre-Merge to Now", - "description": "If you ask Ethereans to describe \"What is Ethereum?\" in 1 sentence, what would it be? Likely, you will get many different answers depending on who you're speaking to. Some visions have changed over time and some stayed true to the cypherpunk values such as decentralization, trustlessness & censorship-resistance. Or is it more important for us to focus on DA & scalability at L1? What should L1 actually be responsible for? Is local block building dead? Are timing games bad? What do we value today?", - "track": "Cypherpunk & Privacy", - "type": "Panel", + "id": "ethersjs-api-hidden-gems", + "sourceId": "EG8ML8", + "title": "Ethers.js - API Hidden Gems", + "description": "There are many shortcuts and powerful API features in Ethers.js which go unnoticed or under-exploited. The goal of this talk is to raise awareness, provide examples and encourage usage of some of these useful APIs to unlock features which can improve user experience, user security and be more transparent to users.", + "track": "Developer Experience", + "type": "Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Layer 1", - "Ethereum Roadmap", - "Coordination", - "alignment", - "Coordination", - "Ethereum Roadmap", - "Layer 1" + "DevEx", + "Testing", + "UI/UX", + "api", + "DevEx", + "Testing", + "UI/UX" ], "keywords": [ - "ethos", - "values", - "alignment" + "Ethers", + "API" ], - "duration": 3274, + "duration": 1192, "language": "en", - "sources_swarmHash": "48570ff43a87a9358ca409423d50e18a75c02e7c2ac81f5234dbbc3fa5501617", - "sources_youtubeId": "0-plV0zJbxs", + "sources_swarmHash": "6ee306a0ea634950b19333665c6e0f1af4c15ef985147faa92f247be8e7ba05d", + "sources_youtubeId": "zjApYb3mtAg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735a04c9dbb7a90e1a35460", + "sources_streamethId": "673d954917a97b4f4dd733aa", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731564000000, - "slot_end": 1731567600000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1pDeSitEvmVhEFya_w3q8q2Uq4_YVvfaQsg5BA5nTUaI", - "resources_slides": null, + "slot_start": 1731646800000, + "slot_end": 1731648600000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1B_Zxh9JTKekXGn74kLQf28CCReGTzSYFG5ED2_8egac", + "resources_slides": "https://drive.google.com/file/d/1a2bYjTpqhMlCvM5mpJ7OdBG8g1HxQdkY/view", "speakers": [ - "peter-szilagyi", - "ahmad-bitar", - "phil-ngo", - "nixo", - "mark-tyneway" + "richard-moore" ] }, "vector": [ - 0, - 0, 0, 0, 0, @@ -317737,7 +316838,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -317854,7 +316954,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -317950,7 +317049,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -318017,14 +317115,14 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -318452,7 +317550,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -318470,6 +317567,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -318498,6 +317596,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -318592,7 +317691,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -318629,7 +317727,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -318684,6 +317781,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -318755,9 +317853,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -319006,10 +318104,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -319022,53 +318120,44 @@ }, { "session": { - "id": "ethersjs-api-hidden-gems", - "sourceId": "EG8ML8", - "title": "Ethers.js - API Hidden Gems", - "description": "There are many shortcuts and powerful API features in Ethers.js which go unnoticed or under-exploited. The goal of this talk is to raise awareness, provide examples and encourage usage of some of these useful APIs to unlock features which can improve user experience, user security and be more transparent to users.", - "track": "Developer Experience", - "type": "Talk", + "id": "ethos-dgen1-self-sovereign-os-hardware", + "sourceId": "TALWUM", + "title": "ethOS + dGEN1: Self sovereign OS + Hardware", + "description": "In this talk I will talk about ethOS, the dGEN1 and the concept of self sovereign software and hardware.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "DevEx", - "Testing", - "UI/UX", - "api", - "DevEx", - "Testing", + "DePIN", + "Mobile", "UI/UX" ], - "keywords": [ - "Ethers", - "API" - ], - "duration": 1192, + "keywords": [], + "duration": 482, "language": "en", - "sources_swarmHash": "6ee306a0ea634950b19333665c6e0f1af4c15ef985147faa92f247be8e7ba05d", - "sources_youtubeId": "zjApYb3mtAg", + "sources_swarmHash": "ea1cab369dd91136941e6ea8e57b72d60156dc26dbee471bcb30235241d5a2cb", + "sources_youtubeId": "Eb-wJh1PK5k", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673d954917a97b4f4dd733aa", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6735cae29dbb7a90e1adef32", "eventId": "devcon-7", - "slot_start": 1731646800000, - "slot_end": 1731648600000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1B_Zxh9JTKekXGn74kLQf28CCReGTzSYFG5ED2_8egac", - "resources_slides": null, + "slot_start": 1731577500000, + "slot_end": 1731578400000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1_547FFGifntr2F9NLRt6mgJnjr6QzNRpm-JcA8hqP_c", + "resources_slides": "https://drive.google.com/file/d/1gpkHojawFWW5S9r_sbheRTMrM-XQ8Yt4/view", "speakers": [ - "richard-moore" + "markus-haas" ] }, "vector": [ 0, + 6, 0, 0, - 6, 0, 0, 0, @@ -319835,6 +318924,32 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -319875,7 +318990,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -320060,7 +319174,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -320135,59 +319248,30 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -320383,9 +319467,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -320401,48 +319485,56 @@ }, { "session": { - "id": "ethos-dgen1-self-sovereign-os-hardware", - "sourceId": "TALWUM", - "title": "ethOS + dGEN1: Self sovereign OS + Hardware", - "description": "In this talk I will talk about ethOS, the dGEN1 and the concept of self sovereign software and hardware.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "Intermediate", + "id": "eve-frontier-challenges-lessons-and-future-of-building-an-autonomous-world-on-ethereum", + "sourceId": "QLK8UE", + "title": "EVE Frontier - challenges, lessons and future of building an autonomous world on Ethereum", + "description": "CCP Games—the creators of the legendary space-based MMO EVE Online, home to millions of space merchants, pirates, and explorers—is building a new world, and it is going to live onchain and run on the EVM.\r\n\r\nHear from the CCP team as they discuss challenges, learnings, and open questions of building massive virtual worlds onchain—what to put onchain first? What game mechanics are best suited onchain? What are the unlocks?—as well as what EVE Frontier might bring to the Ethereum ecosystem.", + "track": "Real World Ethereum", + "type": "Talk", + "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, + "keywords": [ + "MUD", + "EVE Frontier", + "EVE Online" + ], "tags": [ - "DePIN", - "Mobile", - "UI/UX" + "Gaming", + "Autonomous World", + "eve", + "online", + "Autonomous World", + "Gaming" ], - "keywords": [], - "duration": 482, "language": "en", - "sources_swarmHash": "ea1cab369dd91136941e6ea8e57b72d60156dc26dbee471bcb30235241d5a2cb", - "sources_youtubeId": "Eb-wJh1PK5k", + "sources_swarmHash": "fd9b8519e91577c8456926c1087173ad4da44d3aa5bfdad5333a9ed609273e33", + "sources_youtubeId": "B7DCH9FwlWw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735cae29dbb7a90e1adef32", - "eventId": "devcon-7", - "slot_start": 1731577500000, - "slot_end": 1731578400000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1_547FFGifntr2F9NLRt6mgJnjr6QzNRpm-JcA8hqP_c", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "markus-haas" - ] + "justin-glibert", + "hilmar-petursson" + ], + "eventId": "devcon-7", + "slot_start": 1731468600000, + "slot_end": 1731470400000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1mqLIgd8le45XgG2FPsR3vi1IafiikIiEzC9TaHmFCvk", + "resources_slides": "https://drive.google.com/file/d/1Xa8HcAom0MyYJsXPFDd7ou0BSgTerpO5/view" }, "vector": [ - 0, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -320618,6 +319710,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -321208,12 +320301,6 @@ 0, 0, 0, - 2, - 2, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -321245,7 +320332,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -321300,6 +320386,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -321507,6 +320595,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -321751,8 +320841,6 @@ 0, 2, 0, - 0, - 0, 2, 0, 0, @@ -321771,39 +320859,34 @@ }, { "session": { - "id": "eve-frontier-challenges-lessons-and-future-of-building-an-autonomous-world-on-ethereum", - "sourceId": "QLK8UE", - "title": "EVE Frontier - challenges, lessons and future of building an autonomous world on Ethereum", - "description": "CCP Games—the creators of the legendary space-based MMO EVE Online, home to millions of space merchants, pirates, and explorers—is building a new world, and it is going to live onchain and run on the EVM.\r\n\r\nHear from the CCP team as they discuss challenges, learnings, and open questions of building massive virtual worlds onchain—what to put onchain first? What game mechanics are best suited onchain? What are the unlocks?—as well as what EVE Frontier might bring to the Ethereum ecosystem.", - "track": "Real World Ethereum", - "type": "Talk", + "id": "eve-frontier-mud-day-demo", + "sourceId": "RMKJTL", + "title": "EVE Frontier - MUD Day Demo", + "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications.\r\n\r\nEVE Frontier, is a single-shard survival game from CCP Games—the creators of the legendary space-based MMO EVE Online.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", + "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Engineering", + "audience": "Product", "featured": false, - "doNotRecord": false, - "keywords": [ - "MUD", - "EVE Frontier", - "EVE Online" - ], + "doNotRecord": true, + "keywords": [], "tags": [ "Gaming", "Autonomous World", - "eve", - "online", "Autonomous World", "Gaming" ], "language": "en", "speakers": [ - "justin-glibert", - "hilmar-petursson" + "toniya-sundaram", + "scott-mccabe" ], "eventId": "devcon-7", - "slot_start": 1731468600000, - "slot_end": 1731470400000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1mqLIgd8le45XgG2FPsR3vi1IafiikIiEzC9TaHmFCvk" + "slot_start": 1731556500000, + "slot_end": 1731556800000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1uN2SOUzGZIHw0d3Pw3RkvvmxeEi6RqnN2J0-JbWUMHI", + "resources_slides": "" }, "vector": [ 0, @@ -321812,16 +320895,13 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -321960,6 +321040,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -321988,7 +321070,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -322141,7 +321222,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -322877,8 +321957,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -323124,11 +322202,11 @@ 0, 2, 0, - 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -323142,43 +322220,48 @@ }, { "session": { - "id": "eve-frontier-mud-day-demo", - "sourceId": "RMKJTL", - "title": "EVE Frontier - MUD Day Demo", - "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications.\r\n\r\nEVE Frontier, is a single-shard survival game from CCP Games—the creators of the legendary space-based MMO EVE Online.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "everything-you-need-to-know-about-state-expiry", + "sourceId": "MZXQKJ", + "title": "Everything you need to know about state expiry", + "description": "State growth is a ticking time bomb for Ethereum, yet concrete solutions remain elusive. While statelessness offers promise, it doesn't address the root cause. Enter state expiry – a compelling answer to our growing state problem. In this talk, I'll dive into the analysis of Ethereum's state growth problem down to the key-value pair level, the evolution of state expiry proposals, and the latest research on Ethereum's state expiry solutions.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, - "doNotRecord": true, - "keywords": [], + "doNotRecord": false, "tags": [ - "Gaming", - "Autonomous World", - "Autonomous World", - "Gaming" + "Core Protocol", + "Protocol Design", + "Verkle trees", + "state", + "expiry", + "Core Protocol", + "Protocol Design", + "Verkle trees" ], - "language": "en", - "speakers": [ - "toniya-sundaram", - "scott-mccabe" + "keywords": [ + "Statelessness", + "State expiry" ], + "duration": 1345, + "language": "en", + "sources_swarmHash": "a17917322f9b68b641f7a7bb0aff74f02310d39e0fe79821d91feb668a19936e", + "sources_youtubeId": "6j-7ZY2ITw8", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673427cc9dbb7a90e19eae0e", "eventId": "devcon-7", - "slot_start": 1731556500000, - "slot_end": 1731556800000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1uN2SOUzGZIHw0d3Pw3RkvvmxeEi6RqnN2J0-JbWUMHI" + "slot_start": 1731468600000, + "slot_end": 1731470400000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/18L4p0t-mR02cVw6JDvMHqUal5ARSQzWsubskb_x8FzA", + "resources_slides": "https://drive.google.com/file/d/12ZTllKMycuMXs7Kb8AXKCPsJSZPJArno/view", + "speakers": [ + "han" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -323322,26 +322405,6 @@ 0, 0, 0, - 6, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -323534,6 +322597,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -323965,6 +323029,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -323991,6 +323056,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -324032,8 +323098,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -324139,6 +323203,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -324266,6 +323331,20 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -324487,7 +323566,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -324497,6 +323575,14 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -324507,45 +323593,50 @@ }, { "session": { - "id": "everything-you-need-to-know-about-state-expiry", - "sourceId": "MZXQKJ", - "title": "Everything you need to know about state expiry", - "description": "State growth is a ticking time bomb for Ethereum, yet concrete solutions remain elusive. While statelessness offers promise, it doesn't address the root cause. Enter state expiry – a compelling answer to our growing state problem. In this talk, I'll dive into the analysis of Ethereum's state growth problem down to the key-value pair level, the evolution of state expiry proposals, and the latest research on Ethereum's state expiry solutions.", + "id": "evm-charts-2024-whats-hot-whats-not", + "sourceId": "R3UPGT", + "title": "EVM Charts 2024: What's hot? What's not?", + "description": "Thanks to the openness and transparency of blockchain we can study how developers actually use it. In this session we will compare the usage of EVM on mainnet from the last Devcon to this Devcon. Including questions like:\r\n* Which opcodes have become more/less popular?\r\n* Which precompiles have become more/less popular?\r\n* Has average memory consumption increased/decreased?\r\n* How actively are new features being used?\r\n* Are transactions getting more complicated?", "track": "Core Protocol", - "type": "Talk", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ "Core Protocol", - "Protocol Design", - "Verkle trees", - "state", - "expiry", + "Architecture", + "Gas", + "EVM", + "trend", + "usage", + "Architecture", "Core Protocol", - "Protocol Design", - "Verkle trees" + "Gas" ], "keywords": [ - "Statelessness", - "State expiry" + "Opcodes", + "Precompiles", + "EVM Metrics", + "Protocol Optimization", + "Statistics", + "evm usage trends" ], - "duration": 1345, + "duration": 445, "language": "en", - "sources_swarmHash": "a17917322f9b68b641f7a7bb0aff74f02310d39e0fe79821d91feb668a19936e", - "sources_youtubeId": "6j-7ZY2ITw8", + "sources_swarmHash": "557c6cca7b7b2b59d76ce07897cfbc711c0cf196474cb55b4bf76b0106349118", + "sources_youtubeId": "m1tdQfaKt7Q", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673427cc9dbb7a90e19eae0e", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731468600000, - "slot_end": 1731470400000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/18L4p0t-mR02cVw6JDvMHqUal5ARSQzWsubskb_x8FzA", - "resources_slides": null, + "slot_start": 1731471000000, + "slot_end": 1731471600000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1jchtIsIrvcgl2q1AJ62ke7MdCNqf6zK1fAUfSJtbTac", + "resources_slides": "https://drive.google.com/file/d/1LCV95XfblA3Uh2QXTVa9UmKy4zTpown4/view", "speakers": [ - "han" + "dominic-bruetsch" ] }, "vector": [ @@ -325316,9 +324407,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -325346,7 +324434,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -325363,6 +324450,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -325493,7 +324581,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -325508,6 +324595,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -325527,6 +324615,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -325860,15 +324949,12 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -325885,58 +324971,36 @@ }, { "session": { - "id": "evm-charts-2024-whats-hot-whats-not", - "sourceId": "R3UPGT", - "title": "EVM Charts 2024: What's hot? What's not?", - "description": "Thanks to the openness and transparency of blockchain we can study how developers actually use it. In this session we will compare the usage of EVM on mainnet from the last Devcon to this Devcon. Including questions like:\r\n* Which opcodes have become more/less popular?\r\n* Which precompiles have become more/less popular?\r\n* Has average memory consumption increased/decreased?\r\n* How actively are new features being used?\r\n* Are transactions getting more complicated?", - "track": "Core Protocol", + "id": "evm-memory-repricing-and-gentest", + "sourceId": "MTWH38", + "title": "EVM Memory Repricing & Gentest", + "description": "Memory is a critical resource that enables complex computations within the Ethereum Virtual Machine (EVM). The cost of using memory, designed to prevent its abuse, has not been revised since the inception of Ethereum. However, efficiency gains from hardware advancements and client code optimizations warrants periodic repricing of this cost. We explore possible ways to make memory more accessible.", + "track": "[CLS] EPF Day", "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Research", "featured": false, - "doNotRecord": false, + "doNotRecord": true, + "keywords": [], "tags": [ - "Core Protocol", - "Architecture", - "Gas", - "EVM", - "trend", - "usage", - "Architecture", - "Core Protocol", - "Gas" - ], - "keywords": [ - "Opcodes", - "Precompiles", - "EVM Metrics", - "Protocol Optimization", - "Statistics", - "evm usage trends" + "EVM-equivalent" ], - "duration": 445, "language": "en", - "sources_swarmHash": "557c6cca7b7b2b59d76ce07897cfbc711c0cf196474cb55b4bf76b0106349118", - "sources_youtubeId": "m1tdQfaKt7Q", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731471000000, - "slot_end": 1731471600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1jchtIsIrvcgl2q1AJ62ke7MdCNqf6zK1fAUfSJtbTac", - "resources_slides": null, "speakers": [ - "dominic-bruetsch" - ] + "raxhvl" + ], + "eventId": "devcon-7", + "slot_start": 1731468600000, + "slot_end": 1731469500000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1e6KETyrkOalajDAo2_dDl3Cg0oUXzpb7Ehl8aawzChY", + "resources_slides": "" }, "vector": [ 0, 0, 0, 0, - 6, 0, 0, 0, @@ -325948,6 +325012,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -326702,7 +325767,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -326745,7 +325809,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -326890,7 +325953,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -326910,7 +325972,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -327007,10 +326068,9 @@ 0, 0, 0, - 2, - 2, 0, 0, + 2, 0, 0, 0, @@ -327268,35 +326328,50 @@ }, { "session": { - "id": "evm-memory-repricing-and-gentest", - "sourceId": "MTWH38", - "title": "EVM Memory Repricing & Gentest", - "description": "Memory is a critical resource that enables complex computations within the Ethereum Virtual Machine (EVM). The cost of using memory, designed to prevent its abuse, has not been revised since the inception of Ethereum. However, efficiency gains from hardware advancements and client code optimizations warrants periodic repricing of this cost. We explore possible ways to make memory more accessible.", - "track": "[CLS] EPF Day", - "type": "Lightning Talk", + "id": "evm-object-format-eof-history-and-motivation", + "sourceId": "SEUZGU", + "title": "EVM Object Format (EOF) - History and motivation", + "description": "EOF is one of the important parts of the upcoming Pectra upgrade, delivering long-standing feature requests to the EVM. This talk aims to provide insight into its history, significance, and role in Ethereum and EVM improvement, and explore the rationale for including it in the next upgrade, its potential impacts and implications, as well as long-term advantages and possible challenges.", + "track": "Core Protocol", + "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, - "doNotRecord": true, - "keywords": [], + "doNotRecord": false, "tags": [ - "EVM-equivalent" + "Core Protocol", + "developer", + "experience", + "Core", + "Protocol" ], - "language": "en", - "speakers": [ - "raxhvl" + "keywords": [ + "EVM", + "Developer Experience" ], + "duration": 1503, + "language": "en", + "sources_swarmHash": "f05d6e3e2b2f1bbc704ce9e98664b8dac849778797f474d69fa7dd09e007a496", + "sources_youtubeId": "X2mlptWzphc", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731468600000, - "slot_end": 1731469500000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1e6KETyrkOalajDAo2_dDl3Cg0oUXzpb7Ehl8aawzChY" + "slot_start": 1731400200000, + "slot_end": 1731402000000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1V-NCCshtl60AkHuWhlJDZ4XszkThsUvFQ_L8gM-hAro", + "resources_slides": "https://drive.google.com/file/d/1zJLa4mjTTFCHx2tL0Xu-pAbtfy2GyRiV/view", + "speakers": [ + "danno-ferrin" + ] }, "vector": [ 0, 0, 0, 0, + 6, 0, 0, 0, @@ -327308,7 +326383,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -328060,6 +327134,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -328215,6 +327290,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -328346,13 +327422,8 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, + 2, 0, 0, 0, @@ -328605,13 +327676,10 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, - 0, 2, 0, 0, @@ -328624,48 +327692,53 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "evm-object-format-eof-history-and-motivation", - "sourceId": "SEUZGU", - "title": "EVM Object Format (EOF) - History and motivation", - "description": "EOF is one of the important parts of the upcoming Pectra upgrade, delivering long-standing feature requests to the EVM. This talk aims to provide insight into its history, significance, and role in Ethereum and EVM improvement, and explore the rationale for including it in the next upgrade, its potential impacts and implications, as well as long-term advantages and possible challenges.", + "id": "evm-object-format-eof-managing-the-bytecode-chaos", + "sourceId": "UU9BTK", + "title": "EVM Object Format (EOF): Managing the Bytecode Chaos", + "description": "Currently, EVM bytecode, while being powerful and simple, is lacking structure. This leads to many complexities when introducing new EIPs and maintaining backwards compatibility.\r\n\r\nIn this talk, we illustrate some use cases of the EVM Object Format (EOF). Next, we provide a quick overview of the main changes introduced by the EOF and related EIPs, along with code examples. Finally, we discuss potential benefits and drawbacks that could arise with the introduction of EOF", "track": "Core Protocol", "type": "Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Core Protocol", - "developer", - "experience", - "Core", - "Protocol" - ], "keywords": [ - "EVM", - "Developer Experience" + "EOF", + "EIP", + "upgrades" + ], + "tags": [ + "Ethereum Roadmap", + "Protocol Design", + "Security", + "upgrade", + "Ethereum Roadmap", + "Protocol Design", + "Security" ], - "duration": 1503, "language": "en", - "sources_swarmHash": "f05d6e3e2b2f1bbc704ce9e98664b8dac849778797f474d69fa7dd09e007a496", - "sources_youtubeId": "X2mlptWzphc", + "sources_swarmHash": "9cde575870d589f4c0332dd64baf91d16d9b34b3e7ce8ae9eb17a332db3219a9", + "sources_youtubeId": "WKVgCoNp39g", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731400200000, - "slot_end": 1731402000000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1V-NCCshtl60AkHuWhlJDZ4XszkThsUvFQ_L8gM-hAro", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "danno-ferrin" - ] + "alex-murashkin" + ], + "eventId": "devcon-7", + "slot_start": 1731552300000, + "slot_end": 1731554100000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/11DBlWa1M4JLbQS2Ik4OU6nxzNoPj1nINFepAqbY2qIk", + "resources_slides": "https://drive.google.com/file/d/1F38P9FcBhg245q1oDEN1fk90xsPVNBVC/view" }, "vector": [ 0, @@ -329416,6 +328489,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -329438,8 +328512,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -329463,6 +328535,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -329594,8 +328667,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -329610,6 +328681,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -329727,8 +328799,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -329980,8 +329050,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -330004,39 +329072,43 @@ }, { "session": { - "id": "evm-object-format-eof-managing-the-bytecode-chaos", - "sourceId": "UU9BTK", - "title": "EVM Object Format (EOF): Managing the Bytecode Chaos", - "description": "Currently, EVM bytecode, while being powerful and simple, is lacking structure. This leads to many complexities when introducing new EIPs and maintaining backwards compatibility.\r\n\r\nIn this talk, we illustrate some use cases of the EVM Object Format (EOF). Next, we provide a quick overview of the main changes introduced by the EOF and related EIPs, along with code examples. Finally, we discuss potential benefits and drawbacks that could arise with the introduction of EOF", + "id": "evmmax-fast-modular-arithmetic-in-evm", + "sourceId": "7CWEHH", + "title": "EVMMAX. Fast Modular Arithmetic in EVM", + "description": "On the top of EVM Object Format we build an extension which allows contract developers to implement optimized advanced cryptography functions. This feature allows us to implement existing and future ECC precompiles counterparts directly in EVM. Adding new ECC functions (i.e. bls precompiles or functions based on a new, unknown yet, elliptic curve) to the protocol won't require introducing new precompiles. It can be achieved easier and without any risk for the consensus.", "track": "Core Protocol", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developer", "featured": false, "doNotRecord": false, + "tags": [ + "Cryptography", + "EVM", + "Cryptography" + ], "keywords": [ "EOF", - "EIP", - "upgrades" - ], - "tags": [ - "Ethereum Roadmap", - "Protocol Design", - "Security", - "upgrade", - "Ethereum Roadmap", - "Protocol Design", - "Security" + "EVM" ], + "duration": 1500, "language": "en", - "speakers": [ - "alex-murashkin" - ], + "sources_swarmHash": "774eda5d353b7a3c077cb4c10c27d5cc267a272e847e3d76cdd4032de56593fb", + "sources_youtubeId": "4mGDMCdlzz4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67357dc39dbb7a90e1f50a68", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67357dc39dbb7a90e1f50a68.vtt", + "transcript_text": " I'm Radek from IPSIN Long Research and Development team at Ethereum Foundation and today I want to talk to you about some extension we're gonna add to the to the Ethereum virtual machine in the near future. So the idea has a couple of years already, and it was initiated by some core devs to improve the way how the cryptography is handled or cryptography-related functions are handled in the EVM. The main reason was to avoid adding or limit the need to adding new precompiles. But before we get into the details, I would like to make some simple introduction and explain the reasoning behind EVM Max. So what's the EVM Max? So EVM Max translates to EVMterior Virtual Machine Modular Arithmetic Extension. It's a set of new modular arithmetic instructions which support every sized-capped, which is important, odd modules. At the current spec, we define addition, subtractions, and multiplication. We also consider adding exponentiation, which is helpful for calculation some more advanced modular arithmetic functions like modular inversion or square root. One every important thing which should be noticed here is that this proposal is built on the top of the EVM object format, which we had just a presentation of in a minute. And also there was one presentation made by Dano on Tuesday. So EVM Max makes a usage of EOF immediate arguments and the validation of the immediate bytecode also. And it makes it possible to validate EVM Max code before the deployment of the contract to the chain. But I'm not going to get into details of this validation, because it's not the main topic of this presentation. But it's worth mentioning that EOF is a crucial dependency which makes EVM Max easier to implement in an efficient way. It doesn't mean that EOF is a dependency which has to be before. It just makes the implementation of EVX much easier in an efficient way. before, it just makes the implementation of EVX much easier in an efficient way. So, but where exactly EVM Max is located in the cryptography related EVM stack? So we have basic operations in the modular arithmetic on the bottom level, which are used to implement the second level, the elliptic curve cryptography primitives like point addition, multiplication and more advanced like pairing verification. And these primitives are used to implement ECC algorithms like signature verification and Zika related functions. But EVM-Max implements only the bottom one level on this diagram. In the Epsilon team we also use EVM max to implement the second level to make sure that the set of instructions of EVM max is offered and the API offers right write abstractions, write abstraction and efficiency. So, sorry, too fast. What is going on? Yeah, okay. So we now know what's EVM Maxis in general. So one of the reasons I already mentioned at the beginning of the presentation, but there are a couple more reasons I want to list. So in the Ethereum community, there is a need to make the implementation of the cryptography in EVM much easier and efficient. I'm just kidding. Easier and efficient. So. Yeah. So with EVM, we won't need to wait for a specific precompile to be delivered in a fork. We also would like EVMX to be a tool which allows to avoid adding new precompiles in the future. It will make the core devs' lives much easier because they won't need to maintain the very specialized cryptographic libraries in the EVM so they they will not have a headache like what exactly this function do I'm not the cryptographer exactly or we don't have a specialized cryptographer in the team so why do we need to maintain this complicated libraries, which we take basically very often from some external libraries, which are already implemented. But so EVM Max will deliver a tool which should make precompile for some reason will be still needed to be implemented and to use by the EVM, we can imagine that the EVM max bytecode can define the specification of the precompiles, how they should be implemented exactly. But let's get a little bit deeper into the EVM Max instruction details. So, we can split it into three different parts. So, the first part is responsible for setting up the EVM Max context. The second one is just a set of modular arithmetic instructions I already mentioned. And the third one is responsible for EVM, EVM max context communication. So let's get into details of these three parts. So first, SetupX creates the EVM max context if it doesn't exist yet, which means it initializes modulus values and allocates EVM max value slots in the dedicated for EVM max memory only these slots can be only accessed by the EVM max of code of course and Also initialize some specific constant values like for example R squared which must be which is used for Which is used by Montgomery form. In this context, if the context is already defined, so the setup just only switches to this already defined context. Second part are the arithmetic instructions. So basically, they perform the arithmetic operation according to their names, as you can see so everybody who who know a little bit about the arithmetic modular arithmetic should be able to know what's what they exactly do but it's worth noticing that they operate only on indexes of the slots in the EVM Max context. So the indexes are static and can be validated on the deployment before deployment of the contract to the mainnet, which allows to validate them exactly the same way as all the other", "eventId": "devcon-7", - "slot_start": 1731552300000, - "slot_end": 1731554100000, + "slot_start": 1731556800000, + "slot_end": 1731558600000, "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/11DBlWa1M4JLbQS2Ik4OU6nxzNoPj1nINFepAqbY2qIk" + "resources_presentation": "https://docs.google.com/presentation/d/1fh8W3duOjm-uN-PLpwXQdH39CtC5VtYT9yOjlpTE8hk", + "resources_slides": "https://drive.google.com/file/d/10qL1fOoK-38pPgEH0jC_QpqqrsCvzhrX/view", + "speakers": [ + "rodiazet" + ] }, "vector": [ 0, @@ -330790,10 +329862,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -330804,6 +329872,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -330836,7 +329905,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -330982,7 +330050,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -330999,6 +330066,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -331118,7 +330186,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -331357,9 +330424,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -331375,49 +330442,44 @@ }, { "session": { - "id": "evmmax-fast-modular-arithmetic-in-evm", - "sourceId": "7CWEHH", - "title": "EVMMAX. Fast Modular Arithmetic in EVM", - "description": "On the top of EVM Object Format we build an extension which allows contract developers to implement optimized advanced cryptography functions. This feature allows us to implement existing and future ECC precompiles counterparts directly in EVM. Adding new ECC functions (i.e. bls precompiles or functions based on a new, unknown yet, elliptic curve) to the protocol won't require introducing new precompiles. It can be achieved easier and without any risk for the consensus.", - "track": "Core Protocol", - "type": "Talk", + "id": "evolution-of-scams", + "sourceId": "WZWPE9", + "title": "Evolution of Scams", + "description": "The goal of this talk will be to give a quick history of the evolution of scams and the new techniques employed to combat them. I was previously the co-founder of Wallet Guard, which has since been acquired by Consensys. I now am responsible for the research and development of the security engine employed by MetaMask to protect its users.", + "track": "Security", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Cryptography", - "EVM", - "Cryptography" + "metamask", + "Hacks", + "Security" ], "keywords": [ - "EOF", - "EVM" + "Security", + "Drainers", + "MetaMask" ], - "duration": 1500, + "duration": 558, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "10b2a71955b16582577039f8e25b02ba983b4c003975aa7d0a9f7e11ca72f537", + "sources_youtubeId": "SgkEwSDkBnI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67357dc39dbb7a90e1f50a68", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67357dc39dbb7a90e1f50a68.vtt", - "transcript_text": " I'm Radek from IPSIN Long Research and Development team at Ethereum Foundation and today I want to talk to you about some extension we're gonna add to the to the Ethereum virtual machine in the near future. So the idea has a couple of years already, and it was initiated by some core devs to improve the way how the cryptography is handled or cryptography-related functions are handled in the EVM. The main reason was to avoid adding or limit the need to adding new precompiles. But before we get into the details, I would like to make some simple introduction and explain the reasoning behind EVM Max. So what's the EVM Max? So EVM Max translates to EVMterior Virtual Machine Modular Arithmetic Extension. It's a set of new modular arithmetic instructions which support every sized-capped, which is important, odd modules. At the current spec, we define addition, subtractions, and multiplication. We also consider adding exponentiation, which is helpful for calculation some more advanced modular arithmetic functions like modular inversion or square root. One every important thing which should be noticed here is that this proposal is built on the top of the EVM object format, which we had just a presentation of in a minute. And also there was one presentation made by Dano on Tuesday. So EVM Max makes a usage of EOF immediate arguments and the validation of the immediate bytecode also. And it makes it possible to validate EVM Max code before the deployment of the contract to the chain. But I'm not going to get into details of this validation, because it's not the main topic of this presentation. But it's worth mentioning that EOF is a crucial dependency which makes EVM Max easier to implement in an efficient way. It doesn't mean that EOF is a dependency which has to be before. It just makes the implementation of EVX much easier in an efficient way. before, it just makes the implementation of EVX much easier in an efficient way. So, but where exactly EVM Max is located in the cryptography related EVM stack? So we have basic operations in the modular arithmetic on the bottom level, which are used to implement the second level, the elliptic curve cryptography primitives like point addition, multiplication and more advanced like pairing verification. And these primitives are used to implement ECC algorithms like signature verification and Zika related functions. But EVM-Max implements only the bottom one level on this diagram. In the Epsilon team we also use EVM max to implement the second level to make sure that the set of instructions of EVM max is offered and the API offers right write abstractions, write abstraction and efficiency. So, sorry, too fast. What is going on? Yeah, okay. So we now know what's EVM Maxis in general. So one of the reasons I already mentioned at the beginning of the presentation, but there are a couple more reasons I want to list. So in the Ethereum community, there is a need to make the implementation of the cryptography in EVM much easier and efficient. I'm just kidding. Easier and efficient. So. Yeah. So with EVM, we won't need to wait for a specific precompile to be delivered in a fork. We also would like EVMX to be a tool which allows to avoid adding new precompiles in the future. It will make the core devs' lives much easier because they won't need to maintain the very specialized cryptographic libraries in the EVM so they they will not have a headache like what exactly this function do I'm not the cryptographer exactly or we don't have a specialized cryptographer in the team so why do we need to maintain this complicated libraries, which we take basically very often from some external libraries, which are already implemented. But so EVM Max will deliver a tool which should make precompile for some reason will be still needed to be implemented and to use by the EVM, we can imagine that the EVM max bytecode can define the specification of the precompiles, how they should be implemented exactly. But let's get a little bit deeper into the EVM Max instruction details. So, we can split it into three different parts. So, the first part is responsible for setting up the EVM Max context. The second one is just a set of modular arithmetic instructions I already mentioned. And the third one is responsible for EVM, EVM max context communication. So let's get into details of these three parts. So first, SetupX creates the EVM max context if it doesn't exist yet, which means it initializes modulus values and allocates EVM max value slots in the dedicated for EVM max memory only these slots can be only accessed by the EVM max of code of course and Also initialize some specific constant values like for example R squared which must be which is used for Which is used by Montgomery form. In this context, if the context is already defined, so the setup just only switches to this already defined context. Second part are the arithmetic instructions. So basically, they perform the arithmetic operation according to their names, as you can see so everybody who who know a little bit about the arithmetic modular arithmetic should be able to know what's what they exactly do but it's worth noticing that they operate only on indexes of the slots in the EVM Max context. So the indexes are static and can be validated on the deployment before deployment of the contract to the mainnet, which allows to validate them exactly the same way as all the other", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731556800000, - "slot_end": 1731558600000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1fh8W3duOjm-uN-PLpwXQdH39CtC5VtYT9yOjlpTE8hk", - "resources_slides": null, + "slot_start": 1731408600000, + "slot_end": 1731409200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1fLuDyHluumURppoq7gyTD9d7Z-wKLdy5qsCg-Tytso0", + "resources_slides": "https://drive.google.com/file/d/1Vz1SLhZKIwhKSL27b_6HrlSDrJTI1Vfu/view", "speakers": [ - "rodiazet" + "ohm" ] }, "vector": [ - 0, - 0, - 0, - 0, 6, 0, 0, @@ -331755,13 +330817,11 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -332168,6 +331228,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -332178,7 +331239,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -332372,7 +331432,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -332423,6 +331482,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -332496,6 +331556,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -332733,7 +331794,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -332745,52 +331805,56 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "evolution-of-scams", - "sourceId": "WZWPE9", - "title": "Evolution of Scams", - "description": "The goal of this talk will be to give a quick history of the evolution of scams and the new techniques employed to combat them. I was previously the co-founder of Wallet Guard, which has since been acquired by Consensys. I now am responsible for the research and development of the security engine employed by MetaMask to protect its users.", - "track": "Security", + "id": "exploring-auction-mechanisms-in-protocol-design", + "sourceId": "WAKEL9", + "title": "Exploring Auction Mechanisms in Protocol Design", + "description": "Auction mechanisms are fascinating, and so are protocol designs. When you put both together, things get really interesting. In this talk, we'll dive into various auction mechanisms and see how they shape protocol design choices. We'll cover key aspects like the timing game, MEV burn, and participant trusts. Then we will look at case studies: Ethereum, Optimism, and Arbitrum. For each case, we'll conclude how protocol impacts auction or vice versa.", + "track": "Cryptoeconomics", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Research", + "expertise": "Beginner", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "metamask", - "Hacks", - "Security" - ], "keywords": [ - "Security", - "Drainers", - "MetaMask" + "Auction" + ], + "tags": [ + "Core Protocol", + "Economics", + "MEV", + "auction", + "Core Protocol", + "Economics", + "MEV" ], - "duration": 558, "language": "en", - "sources_swarmHash": "10b2a71955b16582577039f8e25b02ba983b4c003975aa7d0a9f7e11ca72f537", - "sources_youtubeId": "SgkEwSDkBnI", + "sources_swarmHash": "312aa12503f300c3636ada438111a3dccdf8d3fac350ae974219de7171eb5568", + "sources_youtubeId": "rd7f7697yoY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "terence" + ], "eventId": "devcon-7", - "slot_start": 1731408600000, - "slot_end": 1731409200000, + "slot_start": 1731485400000, + "slot_end": 1731486000000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1fLuDyHluumURppoq7gyTD9d7Z-wKLdy5qsCg-Tytso0", - "resources_slides": null, - "speakers": [ - "ohm" - ] + "resources_presentation": "https://docs.google.com/presentation/d/1SW7qjLygGhslLlaFTINPgoKm5AVWrY8ItfsnIvPnUq4", + "resources_slides": "https://drive.google.com/file/d/1nM63U0oMNehJSp9rBYhZSRDjjLiWBJE4/view" }, "vector": [ - 6, 0, 0, + 6, 0, 0, 0, @@ -333538,7 +332602,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -333556,6 +332619,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -333571,6 +332635,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -333793,10 +332858,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -334104,9 +333165,6 @@ 0, 2, 0, - 0, - 0, - 0, 2, 0, 0, @@ -334119,47 +333177,51 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "exploring-auction-mechanisms-in-protocol-design", - "sourceId": "WAKEL9", - "title": "Exploring Auction Mechanisms in Protocol Design", - "description": "Auction mechanisms are fascinating, and so are protocol designs. When you put both together, things get really interesting. In this talk, we'll dive into various auction mechanisms and see how they shape protocol design choices. We'll cover key aspects like the timing game, MEV burn, and participant trusts. Then we will look at case studies: Ethereum, Optimism, and Arbitrum. For each case, we'll conclude how protocol impacts auction or vice versa.", - "track": "Cryptoeconomics", + "id": "exploring-mud-worlds-with-blockscout", + "sourceId": "QTLXWL", + "title": "Exploring MUD worlds with Blockscout", + "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications.\r\nShowcase of the Blockscout features that help users and developers explore any MUD world on-chain.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", "type": "Lightning Talk", "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, "keywords": [ - "Auction" + "Block", + "Explorers" ], "tags": [ - "Core Protocol", - "Economics", - "MEV", - "auction", - "Core Protocol", - "Economics", - "MEV" + "Appchains", + "Interface" ], "language": "en", + "sources_swarmHash": "841831dffa918db69a90da87a1750660825bfa2fe51a60e3a0df71c3ebe110e1", + "sources_youtubeId": "rB9lwWtNgtI", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "terence" + "kirill-fedoseev" ], "eventId": "devcon-7", - "slot_start": 1731485400000, - "slot_end": 1731486000000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1SW7qjLygGhslLlaFTINPgoKm5AVWrY8ItfsnIvPnUq4" + "slot_start": 1731555300000, + "slot_end": 1731555600000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1K-pTNAyptFNuvxYIVpjPCZK8H_NM7O6asR23AlFcIro", + "resources_slides": "" }, "vector": [ 0, 0, - 6, 0, 0, 0, @@ -334170,6 +333232,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -334910,7 +333973,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -334927,7 +333989,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -334943,7 +334004,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -335050,6 +334110,9 @@ 0, 0, 0, + 2, + 0, + 0, 0, 0, 0, @@ -335204,43 +334267,38 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, 0, 0, 0, @@ -335493,41 +334551,42 @@ }, { "session": { - "id": "exploring-mud-worlds-with-blockscout", - "sourceId": "QTLXWL", - "title": "Exploring MUD worlds with Blockscout", - "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications.\r\nShowcase of the Blockscout features that help users and developers explore any MUD world on-chain.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Lightning Talk", - "expertise": "Beginner", + "id": "exploring-proof-of-personhood-privacy-biometrics-and-why-it-needs-ethereum", + "sourceId": "TVSAZU", + "title": "Exploring Proof of Personhood: Privacy, Biometrics, and Why It Needs Ethereum.", + "description": "In this session, Remco Bloemen will explore the urgent need for proof of personhood and privacy in a digital-first world. Using insights from Vitalik’s blogpost 07/23, Remco explains why Ethereum’s trustless infrastructure is key to achieving privacy-preserving identity solutions through technologies like zero-knowledge proofs (ZK) and multi-party computation (MPC). This talk is designed to educate developers on creating equitable digital identity solutions without compromising user privacy.", + "track": "Real World Ethereum", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Block", - "Explorers" - ], "tags": [ - "Appchains", - "Interface" + "Identity", + "Privacy", + "Zero-Knowledge" ], - "language": "en", - "speakers": [ - "kirill-fedoseev" + "keywords": [ + "N/A" ], + "duration": 1479, + "language": "en", + "sources_swarmHash": "cefd278367af0d091b677cea10e548bc18dedd7bdc45fcbc3702cd2f211fcf46", + "sources_youtubeId": "q3rpu8aDRA8", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731555300000, - "slot_end": 1731555600000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1K-pTNAyptFNuvxYIVpjPCZK8H_NM7O6asR23AlFcIro" + "slot_start": 1731409200000, + "slot_end": 1731411000000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1tSAo9i2l_HRD2OBB2F6SjjF1Z6zy8MLucrc8FO5rWQs", + "resources_slides": "https://drive.google.com/file/d/1xFOoVR6Ynp_k7aaex4BPgCsc3uswcpvF/view", + "speakers": [ + "remco-bloeman" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -335866,13 +334925,13 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -336288,6 +335347,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -336324,6 +335384,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -336390,6 +335451,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -336415,7 +335477,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -336604,7 +335665,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -336836,9 +335896,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 2, 0, @@ -336858,39 +335918,45 @@ }, { "session": { - "id": "exploring-proof-of-personhood-privacy-biometrics-and-why-it-needs-ethereum", - "sourceId": "TVSAZU", - "title": "Exploring Proof of Personhood: Privacy, Biometrics, and Why It Needs Ethereum.", - "description": "In this session, Remco Bloemen will explore the urgent need for proof of personhood and privacy in a digital-first world. Using insights from Vitalik’s blogpost 07/23, Remco explains why Ethereum’s trustless infrastructure is key to achieving privacy-preserving identity solutions through technologies like zero-knowledge proofs (ZK) and multi-party computation (MPC). This talk is designed to educate developers on creating equitable digital identity solutions without compromising user privacy.", - "track": "Real World Ethereum", + "id": "exploring-the-future-of-account-abstraction", + "sourceId": "S7NYUJ", + "title": "Exploring the Future of Account Abstraction", + "description": "Discover the journey of Ethereum's Account Abstraction (AA) from inception to its current state, challenges tackled by ERC-4337, and future roadmap: modular native AA approach for L2 and L1, and EOA improvement (EIP-7702).", + "track": "Core Protocol", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", - "featured": false, + "audience": "Developer", + "featured": true, "doNotRecord": false, "tags": [ - "Identity", - "Privacy", - "Zero-Knowledge" + "Ethereum Roadmap", + "In-protocol Account Abstraction", + "Account Abstraction", + "aa", + "roadmap", + "Account Abstraction", + "Ethereum Roadmap", + "In-protocol Account Abstraction" ], "keywords": [ - "N/A" + "AA", + "roadmap" ], - "duration": 1479, + "duration": 1607, "language": "en", - "sources_swarmHash": "cefd278367af0d091b677cea10e548bc18dedd7bdc45fcbc3702cd2f211fcf46", - "sources_youtubeId": "q3rpu8aDRA8", + "sources_swarmHash": "7113fad0f81b154b61afb9bb5436b692012893f3198e31f3a13aa0c6f220fad1", + "sources_youtubeId": "63Wd5mPla-M", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "67358f729dbb7a90e1afa28e", "eventId": "devcon-7", - "slot_start": 1731409200000, - "slot_end": 1731411000000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1tSAo9i2l_HRD2OBB2F6SjjF1Z6zy8MLucrc8FO5rWQs", - "resources_slides": null, + "slot_start": 1731560400000, + "slot_end": 1731562200000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1-B8ZzQJNuc1_e9BR0rIfLQYc9lXZ8nuO1aV56lK7dKM", + "resources_slides": "https://drive.google.com/file/d/1R4q2zvwtGX3OUwpEgYCUr1YKAx9BGbZk/view", "speakers": [ - "remco-bloeman" + "yoav-weiss" ] }, "vector": [ @@ -336898,9 +335964,9 @@ 0, 0, 0, + 6, 0, 0, - 6, 0, 0, 0, @@ -337657,7 +336723,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -337761,14 +336826,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -337843,6 +336900,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -337869,6 +336927,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -337980,6 +337039,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -338212,9 +337273,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -338230,45 +337291,44 @@ }, { "session": { - "id": "exploring-the-future-of-account-abstraction", - "sourceId": "S7NYUJ", - "title": "Exploring the Future of Account Abstraction", - "description": "Discover the journey of Ethereum's Account Abstraction (AA) from inception to its current state, challenges tackled by ERC-4337, and future roadmap: modular native AA approach for L2 and L1, and EOA improvement (EIP-7702).", - "track": "Core Protocol", - "type": "Talk", + "id": "exploring-various-approaches-to-achieve-effective-decentralization-for-intent-based-protocols", + "sourceId": "LGZYYW", + "title": "Exploring various approaches to achieve effective decentralization for Intent-Based protocols", + "description": "Intents are emerging as the gold standard for transacting on-chain. However, they do come with decentralization trade-offs. In this talk, I'd like to present the status quo, various architectures, and new tradeoffs in terms of where they fit in the trilemma of fees, execution speed, and execution guarantees. The objective is to achieve maximum decentralization while maintaining a great UX and efficiency.", + "track": "Usability", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", - "featured": true, + "audience": "Engineering", + "featured": false, "doNotRecord": false, "tags": [ - "Ethereum Roadmap", - "In-protocol Account Abstraction", - "Account Abstraction", - "aa", - "roadmap", - "Account Abstraction", - "Ethereum Roadmap", - "In-protocol Account Abstraction" + "TEE", + "Decentralization", + "Homomorphic Encryption", + "Intents", + "MPC", + "ZKP" ], "keywords": [ - "AA", - "roadmap" + "TEE" ], - "duration": 1607, + "duration": 529, "language": "en", - "sources_swarmHash": "7113fad0f81b154b61afb9bb5436b692012893f3198e31f3a13aa0c6f220fad1", - "sources_youtubeId": "63Wd5mPla-M", + "sources_swarmHash": "e19db64e8f8dd4b52315070de1744acf9726b9198101d6cdbd72062edc4ee5b1", + "sources_youtubeId": "Vy_-uON0FTg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67358f729dbb7a90e1afa28e", + "sources_streamethId": "67385ce81b0f83434dae2a14", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67385ce81b0f83434dae2a14.vtt", + "transcript_text": " Fees, execution speed, and execution guarantees. So welcome to the stage, Munir. Everyone. Okay. Everyone? Everyone? Okay. I'm glad to be here. It seems like this is an intense morning, and a very intense one. Sam Munir from Lata Labs. We're known for building PowerSwap, a five-year-old protocol, and recently Portugus, that allowed us to launch parasoft delta so the first time paraswap is now in the intent space and and it's a great thing um so i guess you heard a lot about intense i believe it's now uh something that is second nature uh so i'm gonna jump directly to what are intense in the context of trading on chain. So it's basically one way of expressing an outcome without describing the details of that outcome in simple terms. So if you are a user, you want to swap a token A for a B, well, what you do, you sign an order or an intent. You send it to a backend API in general, like 99.99 of the time. But behind the scenes, there are what you call solvers, resolvers, market makers, agents, how we call them, that will compete for user flow and will bid on an auction. And the best bidder is going to be the winner. So we have two entities here. We have the user who have a desire, like I want to swap token A for B. We have solvers, agents, you name it, competing for user flow. And as I said, there is an API in between that does the whole work. Users still are mastering their key. The system is like work as an EDFI protocol, but the thing is that API, which acts as a central point of failure, if it stops working, the whole system is not useful, is not working at all. So that's a problem. And this is what we're working on in Porticus in trying to create an alternative. But the challenge is, and what we are also proposing, is a way so that we can still preserve the user experience, because if we introduce a fancy decentralized layer, let's say an app chain, well, we won't ask users now to interact with yet and yet another chain. That's not going to be great. It's not going to be good for UX. So the thing is, we keep that backend API, that centralized back-end API. But the thing is, now it's optional. So you can use it. You can run it yourself. So the user and the relayer can be the same thing. And that allows us to still give the user exactly the same experience, but still having a neutral network that can run this system of intent interactions. So the system looks like this. We have on the top left the user interaction. So it means user wants to do action XYZ. I talked about swap, but it can be I want to provide liquidity to Uniswap. I want to do a cross-chain liquidity providing on another AMM, or maybe I want to deposit my USDC on Aave and earn yield on another chain, or maybe completely abstracting that chain. I use a relayer. I can be my own relayer. It can run literally on my laptop. So I publish the auction or my intents on the network. We have an auction manager. This is the heart of the system that you're going to zoom into. The rest of the process is the same. We have a competition. The winner is going to win. Get to execute the user trade. Rest is not new. So we have multiple ways for achieving that. Unfortunately, we don't have a lot of time to get really deep. But there is the possibility of running the auction manager on an app chain. Something that's not new, we have like DYDX who are running the whole order book and the matching engine on an app chain. It's working well. The big limitation here is privacy, because we may be pre-licking MEV on the user side, but also on the agent side, on the solver side, like all the bits would be public. There are solutions yet, but here we get into more complexity, like commit and reveal schemes that can be a bit challenging as well, but it's one of the approaches. The other one is one I like a lot, is the TE, maybe, or MPC, or a combination of both, where we can have a bunch of nodes who are running on, if we go with TE, running on a trusted execution environment, so it means like a closed environment, a private environment, so we solve the privacy issue of like pre-licking of MEV, and the issue with TE is we rely on a specialized hardware, issue with MPC is a bit slower than TE. So maybe a combination of both could do the job. It's something that's still a work in progress or a research in progress. The other one is like the holy grail for computing on encrypted data that can be a perfect solution, but unfortunately, this is FHE, a full homomorphic encryption, where the nodes can compute on data fully blindly without having any idea of what's inside and getting to select, in this case, select the winning agents based on the best bidder in terms of price, gas, you name it. But the problem is FHE latency is not acceptable yet. It's way, way too slow. It can work on a batch computations and not in real time, where it's very important in the case of trading on chain and many other use cases. So that's maybe we still maybe a few years ahead. There is some work on building some ASICs, again, specialized hardware, which would become a limitation. But still, yeah, this could be the best solution at some point. But we're still a few years ahead. Thank you, guys. That's all I had. Happy to answer any questions. Thank you, Munir. All right. We have time for questions. Raise your hand if you want to ask one. You've been amazing in terms of asking questions so far. Let's keep it up. Let's keep going. Yes. All right. One, two, three. Is there a specific service which solves this intent problem usable by many DEXs, cross-chain swaps, or each build their own intent solvers? You mean on the product side or on the agent side? On the product side. Does it make sense to have a scalable product here, which is serving this intent services for many, many cross chain solutions? Yeah, for cross chain, I think it's becoming more and more like a normal thing between codes. So we're seeing more and more protocols normalizing cross-chain interactions. So if that's the question, I would say that's just a matter of time. We're not far away from having, making cross-chain is like the normal thing to do and thinking beyond cross-chain. If it's a question about having generalized purpose intents, it's something I believe less that it's going to be the case, because the complexity of each protocols makes them so different to have a general purpose solving mechanism. So if that's the question, I hope I answered. All right. Time for another question over there. So as someone who has not explored Intents yet, I want to ask how to participate as an agent or a solver in this network. Like, what exactly is a solver? Is it a piece of code or like a market maker bidding for the prices or such? Yeah, it's definitely too broad. It's always a piece of code anyways. Like someone has to write a code that will answer a user intent between code, like user desire to do something. The simplest case is I want to swap token A for B. And the one who provides the best price is going to be like coding and competing with others. Like you may run a fancy algorithm and connect it to many exchanges, the Texas buyer, of course, and running this algorithm that will answer the user intent. But it can be other things. Like nobody has a integration of Aave that will let any user put USDC cross-chain on another chain and earn a better APY. That's also a use case that can be fulfilled by an agent. All right.", "eventId": "devcon-7", - "slot_start": 1731560400000, - "slot_end": 1731562200000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1-B8ZzQJNuc1_e9BR0rIfLQYc9lXZ8nuO1aV56lK7dKM", - "resources_slides": null, + "slot_start": 1731558000000, + "slot_end": 1731558600000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1LaXJZlFuHU9E1WzvaA5EEprE3pUrcWOtPgm0VCJNCN8", + "resources_slides": "https://drive.google.com/file/d/1zrsTtk7_Z99jlv7r-ZWdtJychKaQS8dq/view", "speakers": [ - "yoav-weiss" + "mounir-benchemled" ] }, "vector": [ @@ -338276,11 +337336,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -339074,6 +338134,7 @@ 0, 0, 0, + 0, 2, 0, 0, @@ -339094,6 +338155,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -339107,6 +338169,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -339118,6 +338182,15 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -339215,7 +338288,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -339242,7 +338314,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -339311,54 +338382,38 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 2, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, 0, 0, 0, @@ -339590,9 +338645,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -339608,56 +338663,59 @@ }, { "session": { - "id": "exploring-various-approaches-to-achieve-effective-decentralization-for-intent-based-protocols", - "sourceId": "LGZYYW", - "title": "Exploring various approaches to achieve effective decentralization for Intent-Based protocols", - "description": "Intents are emerging as the gold standard for transacting on-chain. However, they do come with decentralization trade-offs. In this talk, I'd like to present the status quo, various architectures, and new tradeoffs in terms of where they fit in the trilemma of fees, execution speed, and execution guarantees. The objective is to achieve maximum decentralization while maintaining a great UX and efficiency.", - "track": "Usability", - "type": "Lightning Talk", + "id": "fair-combinatorial-auction-for-trade-intents-how-to-design-mechanisms-without-a-numeraire", + "sourceId": "AAYWGY", + "title": "Fair combinatorial auction for trade intents: how to design mechanisms without a numeraire", + "description": "When designing mechanisms on the blockchain, there may be no single asset that can be used to reallocate the benefits of participating in the mechanism among its participants. Hence, the designer cannot separately address achieving an objective and sharing the resulting gains, as the objective affects how/whether these gains can be shared. This raises fairness concerns. We discuss the relevance of this issue for trade intent auctions and propose a novel mechanism: the fair combinatorial auction.", + "track": "Cryptoeconomics", + "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Academic", "featured": false, "doNotRecord": false, "tags": [ - "TEE", - "Decentralization", - "Homomorphic Encryption", + "Mechanism design", "Intents", - "MPC", - "ZKP" + "research", + "Intents", + "Mechanism design" ], "keywords": [ - "TEE" + "Batch auctions", + "dutch auctions", + "auctions", + "CoW Swap", + "research" ], - "duration": 529, + "duration": 1599, "language": "en", - "sources_swarmHash": "e19db64e8f8dd4b52315070de1744acf9726b9198101d6cdbd72062edc4ee5b1", - "sources_youtubeId": "Vy_-uON0FTg", + "sources_swarmHash": "98494810a7c4921a159b0c29002e4cf7c8682f01a667f1ad0195bce4f11e623e", + "sources_youtubeId": "Y0ix2IuIJGk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67385ce81b0f83434dae2a14", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67385ce81b0f83434dae2a14.vtt", - "transcript_text": " Fees, execution speed, and execution guarantees. So welcome to the stage, Munir. Everyone. Okay. Everyone? Everyone? Okay. I'm glad to be here. It seems like this is an intense morning, and a very intense one. Sam Munir from Lata Labs. We're known for building PowerSwap, a five-year-old protocol, and recently Portugus, that allowed us to launch parasoft delta so the first time paraswap is now in the intent space and and it's a great thing um so i guess you heard a lot about intense i believe it's now uh something that is second nature uh so i'm gonna jump directly to what are intense in the context of trading on chain. So it's basically one way of expressing an outcome without describing the details of that outcome in simple terms. So if you are a user, you want to swap a token A for a B, well, what you do, you sign an order or an intent. You send it to a backend API in general, like 99.99 of the time. But behind the scenes, there are what you call solvers, resolvers, market makers, agents, how we call them, that will compete for user flow and will bid on an auction. And the best bidder is going to be the winner. So we have two entities here. We have the user who have a desire, like I want to swap token A for B. We have solvers, agents, you name it, competing for user flow. And as I said, there is an API in between that does the whole work. Users still are mastering their key. The system is like work as an EDFI protocol, but the thing is that API, which acts as a central point of failure, if it stops working, the whole system is not useful, is not working at all. So that's a problem. And this is what we're working on in Porticus in trying to create an alternative. But the challenge is, and what we are also proposing, is a way so that we can still preserve the user experience, because if we introduce a fancy decentralized layer, let's say an app chain, well, we won't ask users now to interact with yet and yet another chain. That's not going to be great. It's not going to be good for UX. So the thing is, we keep that backend API, that centralized back-end API. But the thing is, now it's optional. So you can use it. You can run it yourself. So the user and the relayer can be the same thing. And that allows us to still give the user exactly the same experience, but still having a neutral network that can run this system of intent interactions. So the system looks like this. We have on the top left the user interaction. So it means user wants to do action XYZ. I talked about swap, but it can be I want to provide liquidity to Uniswap. I want to do a cross-chain liquidity providing on another AMM, or maybe I want to deposit my USDC on Aave and earn yield on another chain, or maybe completely abstracting that chain. I use a relayer. I can be my own relayer. It can run literally on my laptop. So I publish the auction or my intents on the network. We have an auction manager. This is the heart of the system that you're going to zoom into. The rest of the process is the same. We have a competition. The winner is going to win. Get to execute the user trade. Rest is not new. So we have multiple ways for achieving that. Unfortunately, we don't have a lot of time to get really deep. But there is the possibility of running the auction manager on an app chain. Something that's not new, we have like DYDX who are running the whole order book and the matching engine on an app chain. It's working well. The big limitation here is privacy, because we may be pre-licking MEV on the user side, but also on the agent side, on the solver side, like all the bits would be public. There are solutions yet, but here we get into more complexity, like commit and reveal schemes that can be a bit challenging as well, but it's one of the approaches. The other one is one I like a lot, is the TE, maybe, or MPC, or a combination of both, where we can have a bunch of nodes who are running on, if we go with TE, running on a trusted execution environment, so it means like a closed environment, a private environment, so we solve the privacy issue of like pre-licking of MEV, and the issue with TE is we rely on a specialized hardware, issue with MPC is a bit slower than TE. So maybe a combination of both could do the job. It's something that's still a work in progress or a research in progress. The other one is like the holy grail for computing on encrypted data that can be a perfect solution, but unfortunately, this is FHE, a full homomorphic encryption, where the nodes can compute on data fully blindly without having any idea of what's inside and getting to select, in this case, select the winning agents based on the best bidder in terms of price, gas, you name it. But the problem is FHE latency is not acceptable yet. It's way, way too slow. It can work on a batch computations and not in real time, where it's very important in the case of trading on chain and many other use cases. So that's maybe we still maybe a few years ahead. There is some work on building some ASICs, again, specialized hardware, which would become a limitation. But still, yeah, this could be the best solution at some point. But we're still a few years ahead. Thank you, guys. That's all I had. Happy to answer any questions. Thank you, Munir. All right. We have time for questions. Raise your hand if you want to ask one. You've been amazing in terms of asking questions so far. Let's keep it up. Let's keep going. Yes. All right. One, two, three. Is there a specific service which solves this intent problem usable by many DEXs, cross-chain swaps, or each build their own intent solvers? You mean on the product side or on the agent side? On the product side. Does it make sense to have a scalable product here, which is serving this intent services for many, many cross chain solutions? Yeah, for cross chain, I think it's becoming more and more like a normal thing between codes. So we're seeing more and more protocols normalizing cross-chain interactions. So if that's the question, I would say that's just a matter of time. We're not far away from having, making cross-chain is like the normal thing to do and thinking beyond cross-chain. If it's a question about having generalized purpose intents, it's something I believe less that it's going to be the case, because the complexity of each protocols makes them so different to have a general purpose solving mechanism. So if that's the question, I hope I answered. All right. Time for another question over there. So as someone who has not explored Intents yet, I want to ask how to participate as an agent or a solver in this network. Like, what exactly is a solver? Is it a piece of code or like a market maker bidding for the prices or such? Yeah, it's definitely too broad. It's always a piece of code anyways. Like someone has to write a code that will answer a user intent between code, like user desire to do something. The simplest case is I want to swap token A for B. And the one who provides the best price is going to be like coding and competing with others. Like you may run a fancy algorithm and connect it to many exchanges, the Texas buyer, of course, and running this algorithm that will answer the user intent. But it can be other things. Like nobody has a integration of Aave that will let any user put USDC cross-chain on another chain and earn a better APY. That's also a use case that can be fulfilled by an agent. All right.", + "sources_streamethId": "6736fa1d1b0f83434deba3c9", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736fa1d1b0f83434deba3c9.vtt", + "transcript_text": " Hello everyone. All right, so yeah, today I'd like to discuss with you a new auction design that we plan to implement at Kao Protocol and a little bit of kind of the thinking process behind it. So how did we get to this specific auction design? Which has to do with the fact that, you know, blockchain is an environment where there may not be a numeraire. And, of course, I'll explain what it is in a second. All right, so many talks that have to do with DeFi start by saying, okay, look, we have all this mechanism from traditional finance, but they don't necessarily quite apply to our environment because, say, you know, in the context of blockchain, transaction can be reordered, and therefore we have MEV. I'm going to follow a similar template, but I'm not going to focus so much on the usual suspects, and instead focus on a different element, which is the absence of a numeraire. First of all, what is a numeraire? A numeraire is an asset that in your market or in your mechanism, your environment, you can assume that everybody likes and is happy to receive and can be used to share value between people. Now, off-chain, there is usually a numeraire in the environments you consider. If we want to look at financial markets, well, typically all stocks are traded against the national currency. So the national currency is the numeraire. A possible exception could be forex markets where people exchange national currencies. But when you look carefully that's not quite an exception in my opinion, because first of all there are very few assets exchanged and then essentially the vast majority of these trades is anyway set against the US dollar so even in that environment there is theoretically there is no numerare, anybody may be demanding different assets and exchanging any currency for any currency, really most of the volume goes through the dollar anyway now on chain things are very different there may be specific environments where you can assume that there is a numeraire but I think generally this is not it may not be the case because we have anonymous participants that could live anywhere in the world. When you look at financial markets on-chain, people may want to swap any asset for any other asset. There are like thousands. I don't know if there is a number out there. I know that ChaosSwap in its history has traded more than 11,000 different tokens. So there are at least that many different tokens on chain at the moment, and probably even more, which makes it very different than in any kind of the Forex market, for example. Also because here what matters is not the number of tokens, it's the number of token pairs. So that kind of blows up the complexity of the problem. So why does this matter? Because the numeraire and the absence of the numeraire determines how easy it is for people to share the benefit of collaborations. This is something that we know from a branch of gaming theory called cooperative gaming theory, which is the branch of game theory that has nothing to do with auction, in fact. But it turns out that it also matters when you're designing certain type of auctions, such as trade intent auctions. Now, what is a trade intent? A trade intent is essentially an order in which a user specifies a sell token and a buy token and might specify also a limit price or a slippage tolerance. And then it delegates to another agent, a solver, the exact execution of that order. And this is done in the context of an auction, where solvers are going to propose prices for each order, for the different orders. And let's say the details are important, but for the moment, let's just say that the solver that proposes the best price wins the auction and then has the right to complete this order and actually execute it on-chain. Now, this is an environment where you can have separate auction for different trade intents. An order comes in, you run an auction. Another order comes in, you run an auction. And this is how protocol like 1-inch fusion and Uniswap X work. However, there are typically additional efficiencies when orders are executed together. This could be in the form of coincidence of wants. Maybe people can trade directly with each other without really having to access an external market. It could be in the form of gas savings. Coincidence of wants can also arise as an intermediate hop of trades anyway. Generally, you can squeeze out additional efficiencies by executing trades together. But of course, the problem becomes in which assets those efficiencies materialize. Maybe it's extra ETH because it's gas savings. Maybe it's something else. But then not everybody may actually want the assets in which those efficiencies are actually generated and the question becomes then, how do you share those extra efficiency if it's in an asset that not everybody is demanding? And then, yeah, then the question becomes, well, do you want to execute the two orders together even if there are efficiency because you cannot really share them?", "eventId": "devcon-7", - "slot_start": 1731558000000, - "slot_end": 1731558600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1LaXJZlFuHU9E1WzvaA5EEprE3pUrcWOtPgm0VCJNCN8", - "resources_slides": null, + "slot_start": 1731650400000, + "slot_end": 1731652200000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1LquF7sJyYCfQkhUppmol316cCEsxjzZbhvuKG4u7QnU", + "resources_slides": "https://drive.google.com/file/d/15rKUV-nLrt_X7cUmhjr7ummR7-HJj2LC/view", "speakers": [ - "mounir-benchemled" + "andrea-canidio" ] }, "vector": [ 0, 0, + 6, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -340404,6 +339462,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -340451,13 +339510,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -340475,7 +339532,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -340489,8 +339545,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -340502,7 +339556,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -340674,6 +339727,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -340734,7 +339788,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -340967,7 +340020,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -340977,6 +340029,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -340985,54 +340038,48 @@ }, { "session": { - "id": "fair-combinatorial-auction-for-trade-intents-how-to-design-mechanisms-without-a-numeraire", - "sourceId": "AAYWGY", - "title": "Fair combinatorial auction for trade intents: how to design mechanisms without a numeraire", - "description": "When designing mechanisms on the blockchain, there may be no single asset that can be used to reallocate the benefits of participating in the mechanism among its participants. Hence, the designer cannot separately address achieving an objective and sharing the resulting gains, as the objective affects how/whether these gains can be shared. This raises fairness concerns. We discuss the relevance of this issue for trade intent auctions and propose a novel mechanism: the fair combinatorial auction.", - "track": "Cryptoeconomics", - "type": "Talk", + "id": "farcaster-frames-building-embeddable-ethereum-apps", + "sourceId": "NPGET3", + "title": "Farcaster frames: building embeddable Ethereum apps", + "description": "Frames are an open standard for creating embeddable, interactive apps in social media feeds and on the web. They help solve one of the hardest problems for Ethereum dapp developers: distribution. Although frames originated on Farcaster, it's now possible to build cross-platform frames that work on Farcaster, Lens, XMTP, and the open web. In this hands on workshop we'll introduce the core concepts behind frames and build a simple frame app that interacts with a smart contract.", + "track": "Developer Experience", + "type": "Workshop", "expertise": "Intermediate", - "audience": "Academic", - "featured": false, + "audience": "Engineering", + "featured": true, "doNotRecord": false, "tags": [ - "Mechanism design", - "Intents", - "research", - "Intents", - "Mechanism design" + "Developer Infrastructure", + "Social", + "farcaster", + "Developer Infrastructure", + "Social" ], "keywords": [ - "Batch auctions", - "dutch auctions", - "auctions", - "CoW Swap", - "research" + "Farcaster" ], - "duration": 1599, + "duration": 5086, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "8e0e0c17254242e8c66955524eb158e4655137ffbc89bd6592179981209be316", + "sources_youtubeId": "LnEpR575FRA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736fa1d1b0f83434deba3c9", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736fa1d1b0f83434deba3c9.vtt", - "transcript_text": " Hello everyone. All right, so yeah, today I'd like to discuss with you a new auction design that we plan to implement at Kao Protocol and a little bit of kind of the thinking process behind it. So how did we get to this specific auction design? Which has to do with the fact that, you know, blockchain is an environment where there may not be a numeraire. And, of course, I'll explain what it is in a second. All right, so many talks that have to do with DeFi start by saying, okay, look, we have all this mechanism from traditional finance, but they don't necessarily quite apply to our environment because, say, you know, in the context of blockchain, transaction can be reordered, and therefore we have MEV. I'm going to follow a similar template, but I'm not going to focus so much on the usual suspects, and instead focus on a different element, which is the absence of a numeraire. First of all, what is a numeraire? A numeraire is an asset that in your market or in your mechanism, your environment, you can assume that everybody likes and is happy to receive and can be used to share value between people. Now, off-chain, there is usually a numeraire in the environments you consider. If we want to look at financial markets, well, typically all stocks are traded against the national currency. So the national currency is the numeraire. A possible exception could be forex markets where people exchange national currencies. But when you look carefully that's not quite an exception in my opinion, because first of all there are very few assets exchanged and then essentially the vast majority of these trades is anyway set against the US dollar so even in that environment there is theoretically there is no numerare, anybody may be demanding different assets and exchanging any currency for any currency, really most of the volume goes through the dollar anyway now on chain things are very different there may be specific environments where you can assume that there is a numeraire but I think generally this is not it may not be the case because we have anonymous participants that could live anywhere in the world. When you look at financial markets on-chain, people may want to swap any asset for any other asset. There are like thousands. I don't know if there is a number out there. I know that ChaosSwap in its history has traded more than 11,000 different tokens. So there are at least that many different tokens on chain at the moment, and probably even more, which makes it very different than in any kind of the Forex market, for example. Also because here what matters is not the number of tokens, it's the number of token pairs. So that kind of blows up the complexity of the problem. So why does this matter? Because the numeraire and the absence of the numeraire determines how easy it is for people to share the benefit of collaborations. This is something that we know from a branch of gaming theory called cooperative gaming theory, which is the branch of game theory that has nothing to do with auction, in fact. But it turns out that it also matters when you're designing certain type of auctions, such as trade intent auctions. Now, what is a trade intent? A trade intent is essentially an order in which a user specifies a sell token and a buy token and might specify also a limit price or a slippage tolerance. And then it delegates to another agent, a solver, the exact execution of that order. And this is done in the context of an auction, where solvers are going to propose prices for each order, for the different orders. And let's say the details are important, but for the moment, let's just say that the solver that proposes the best price wins the auction and then has the right to complete this order and actually execute it on-chain. Now, this is an environment where you can have separate auction for different trade intents. An order comes in, you run an auction. Another order comes in, you run an auction. And this is how protocol like 1-inch fusion and Uniswap X work. However, there are typically additional efficiencies when orders are executed together. This could be in the form of coincidence of wants. Maybe people can trade directly with each other without really having to access an external market. It could be in the form of gas savings. Coincidence of wants can also arise as an intermediate hop of trades anyway. Generally, you can squeeze out additional efficiencies by executing trades together. But of course, the problem becomes in which assets those efficiencies materialize. Maybe it's extra ETH because it's gas savings. Maybe it's something else. But then not everybody may actually want the assets in which those efficiencies are actually generated and the question becomes then, how do you share those extra efficiency if it's in an asset that not everybody is demanding? And then, yeah, then the question becomes, well, do you want to execute the two orders together even if there are efficiency because you cannot really share them?", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731650400000, - "slot_end": 1731652200000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1LquF7sJyYCfQkhUppmol316cCEsxjzZbhvuKG4u7QnU", - "resources_slides": null, + "slot_start": 1731400800000, + "slot_end": 1731406200000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1liuvnLXBUAB0kNGDh3VePfZNkfZ-ECHpzPYsSrv_d-M", + "resources_slides": "https://drive.google.com/file/d/1JiBcsKGvRN0eGITIl7aEACqQ5QDOhBJv/view", "speakers": [ - "andrea-canidio" + "horsefacts" ] }, "vector": [ 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -341787,10 +340834,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -341961,6 +341004,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -342053,9 +341097,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -342117,6 +341158,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -342347,6 +341389,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -342356,7 +341399,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -342365,50 +341407,52 @@ }, { "session": { - "id": "farcaster-frames-building-embeddable-ethereum-apps", - "sourceId": "NPGET3", - "title": "Farcaster frames: building embeddable Ethereum apps", - "description": "Frames are an open standard for creating embeddable, interactive apps in social media feeds and on the web. They help solve one of the hardest problems for Ethereum dapp developers: distribution. Although frames originated on Farcaster, it's now possible to build cross-platform frames that work on Farcaster, Lens, XMTP, and the open web. In this hands on workshop we'll introduce the core concepts behind frames and build a simple frame app that interacts with a smart contract.", - "track": "Developer Experience", - "type": "Workshop", - "expertise": "Intermediate", + "id": "financial-nihilism-vs-foss-culture-the-battle-for-ethereums-soul", + "sourceId": "SSXAMG", + "title": "Financial Nihilism vs FOSS Culture: The Battle for Ethereum’s Soul", + "description": "In recent years, the Ethereum ecosystem has witnessed a stark dichotomy: the rise of financial nihilism through memecoins and rampant speculation on one side, and the foundational principles of the FOSS (Free and Open Source Software) community, emphasising public goods, interdependence, and intrinsic rewards, on the other. \r\n\r\nThis talk will delve into the experiences of interacting with FOSS developers, shedding light on their views and concerns regarding Ethereum’s current trajectory.", + "track": "Cypherpunk & Privacy", + "type": "Talk", + "expertise": "Beginner", "audience": "Engineering", - "featured": true, + "featured": false, "doNotRecord": false, "tags": [ - "Developer Infrastructure", - "Social", - "farcaster", - "Developer Infrastructure", - "Social" + "Values", + "FOSS", + "Decentralization", + "culture", + "Decentralization", + "FOSS", + "Values" ], "keywords": [ - "Farcaster" + "Culture" ], - "duration": 5086, + "duration": 1584, "language": "en", - "sources_swarmHash": "8e0e0c17254242e8c66955524eb158e4655137ffbc89bd6592179981209be316", - "sources_youtubeId": "LnEpR575FRA", + "sources_swarmHash": "d2fa049d664484b158c36db2c05b9ff461267f4ee44787a45a7ba182dabe07fc", + "sources_youtubeId": "Q_bsYpVfhHs", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731400800000, - "slot_end": 1731406200000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1liuvnLXBUAB0kNGDh3VePfZNkfZ-ECHpzPYsSrv_d-M", - "resources_slides": null, + "slot_start": 1731400200000, + "slot_end": 1731402000000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1Qlvu4fLzJTTaotuNmKf4QNZRg5u7Im3WjKq6D2kS0Vw", + "resources_slides": "https://drive.google.com/file/d/1i7CzB6NvxJVj1UN__gl7M6rTgJ3PVW6U/view", "speakers": [ - "horsefacts" + "eleftherios-diakomichalis" ] }, "vector": [ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -343208,7 +342252,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -343254,6 +342297,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -343275,6 +342319,21 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -343334,7 +342393,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -343372,6 +342430,14 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -343457,39 +342523,14 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, 0, 0, 0, @@ -343719,8 +342760,6 @@ 0, 2, 0, - 0, - 0, 2, 0, 0, @@ -343739,44 +342778,37 @@ }, { "session": { - "id": "financial-nihilism-vs-foss-culture-the-battle-for-ethereums-soul", - "sourceId": "SSXAMG", - "title": "Financial Nihilism vs FOSS Culture: The Battle for Ethereum’s Soul", - "description": "In recent years, the Ethereum ecosystem has witnessed a stark dichotomy: the rise of financial nihilism through memecoins and rampant speculation on one side, and the foundational principles of the FOSS (Free and Open Source Software) community, emphasising public goods, interdependence, and intrinsic rewards, on the other. \r\n\r\nThis talk will delve into the experiences of interacting with FOSS developers, shedding light on their views and concerns regarding Ethereum’s current trajectory.", - "track": "Cypherpunk & Privacy", + "id": "financialization-in-games", + "sourceId": "EF3P9X", + "title": "Financialization in Games", + "description": "This talk will cover different financialization strategies we explored while building Project Mirage, and our lessons and learnings throughout the journey.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", "type": "Talk", - "expertise": "Beginner", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Values", - "FOSS", - "Decentralization", - "culture", - "Decentralization", - "FOSS", - "Values" - ], "keywords": [ - "Culture" + "n/a" ], - "duration": 1584, + "tags": [], "language": "en", - "sources_swarmHash": "d2fa049d664484b158c36db2c05b9ff461267f4ee44787a45a7ba182dabe07fc", - "sources_youtubeId": "sNPxhnznfEA", + "sources_swarmHash": "5c8a8fb6d0d12eb090c158168905777b8ac1977d8de29e41317e18172fc20ebe", + "sources_youtubeId": "49oV_zgRoN0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731400200000, - "slot_end": 1731402000000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1Qlvu4fLzJTTaotuNmKf4QNZRg5u7Im3WjKq6D2kS0Vw", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "eleftherios-diakomichalis" - ] + "y77cao" + ], + "eventId": "devcon-7", + "slot_start": 1731579300000, + "slot_end": 1731580800000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/15r4rPTnKvKjpyxmg1BaFjdTPs2MB_Up2KIg320EKBjc", + "resources_slides": "https://drive.google.com/file/d/1645nYcNYuXS59sQqQ8JfuhvLuTTWhqs0/view" }, "vector": [ 0, @@ -343784,7 +342816,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -343792,6 +342823,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -344632,7 +343664,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -344654,7 +343685,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -344765,7 +343795,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -344866,8 +343895,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -345093,9 +344120,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 2, 0, @@ -345115,29 +344142,26 @@ }, { "session": { - "id": "financialization-in-games", - "sourceId": "EF3P9X", - "title": "Financialization in Games", - "description": "This talk will cover different financialization strategies we explored while building Project Mirage, and our lessons and learnings throughout the journey.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "find-yourself-on-the-mat", + "sourceId": "PYKTTA", + "title": "Find Yourself on the Mat", + "description": "By master Aoei \r\n- Self-tune\r\n- Find yourself along the journey with Oracle Cards\r\n - Gentle yoga flow & Stretching for Office Syndrome\r\n \r\nNov 12 16:45 - 17:30", + "track": "Entertainment", + "type": "Mixed Formats", + "expertise": "Beginner", + "audience": "Hobby", "featured": false, "doNotRecord": false, - "keywords": [ - "n/a" - ], + "keywords": [], "tags": [], "language": "en", - "speakers": [ - "y77cao" - ], + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731579300000, - "slot_end": 1731580800000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/15r4rPTnKvKjpyxmg1BaFjdTPs2MB_Up2KIg320EKBjc" + "slot_start": 1731404700000, + "slot_end": 1731407400000, + "slot_roomId": "decompression-room", + "resources_presentation": "https://docs.google.com/presentation/d/1TFFR57Pxj41MY1aoKmiTItEaSPtPRaK4-BbRLFZTnQQ", + "resources_slides": "" }, "vector": [ 0, @@ -345149,9 +344173,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -345491,8 +344512,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -346454,8 +345473,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 2, @@ -346467,6 +345484,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -346476,36 +345495,54 @@ }, { "session": { - "id": "find-yourself-on-the-mat", - "sourceId": "PYKTTA", - "title": "Find Yourself on the Mat", - "description": "By master Aoei \r\n- Self-tune\r\n- Find yourself along the journey with Oracle Cards\r\n - Gentle yoga flow & Stretching for Office Syndrome\r\n \r\nNov 12 16:45 - 17:30", - "track": "Entertainment", - "type": "Mixed Formats", + "id": "finding-bugs-42-tips-from-4-security-researchers", + "sourceId": "AZNENK", + "title": "Finding Bugs: 42 Tips from 4 Security Researchers", + "description": "Billions of dollars are at risk, and protocols spend millions on security through audits and bug bounties. Have you ever wondered how you can become a top security researcher securing these billions?\r\n\r\nIn this workshop, 4 recognized security researchers share their experiences on smart contract security with practical tools & techniques to find & report vulnerabilities. Security researchers, even aspirational ones, can take away some key advice to improve their smart contract security skills.", + "track": "Security", + "type": "Workshop", "expertise": "Beginner", - "audience": "Hobby", + "audience": "Research", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Security", + "Auditing", + "Bug", + "Bounties", + "smart", + "contracts", + "Auditing", + "Bounties", + "Bug", + "Security" + ], + "keywords": [ + "Education", + "Hacks", + "Smart Contract Security" + ], + "duration": 5654, "language": "en", - "speakers": [], + "sources_swarmHash": "5115b9b314e63c202aea765f7fc8025db430ff8d7f370ddddc28e16273af4e24", + "sources_youtubeId": "8d2UuzEBVdM", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731404700000, - "slot_end": 1731407400000, - "slot_roomId": "decompression-room", - "resources_presentation": "https://docs.google.com/presentation/d/1TFFR57Pxj41MY1aoKmiTItEaSPtPRaK4-BbRLFZTnQQ" + "slot_start": 1731465900000, + "slot_end": 1731471300000, + "slot_roomId": "classroom-b", + "resources_presentation": "https://docs.google.com/presentation/d/1HZSm9H-PuHEKe3mrj7Wl9hDODP3kP9iQMoLjxXQ81Iw", + "resources_slides": "https://drive.google.com/file/d/1G8I9OkSEix5bqs0J6xrSDPkrVxLyCh0e/view", + "speakers": [ + "0xrajeev", + "joran-honig", + "nat-chin", + "tincho" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -346761,6 +345798,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -346783,6 +345821,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -346855,6 +345894,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -347250,6 +346291,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -347401,6 +346443,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -347509,6 +346552,9 @@ 0, 0, 0, + 2, + 2, + 2, 0, 0, 0, @@ -347522,12 +346568,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -347816,13 +346857,13 @@ 2, 0, 0, + 2, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -347833,59 +346874,52 @@ }, { "session": { - "id": "finding-bugs-42-tips-from-4-security-researchers", - "sourceId": "AZNENK", - "title": "Finding Bugs: 42 Tips from 4 Security Researchers", - "description": "Billions of dollars are at risk, and protocols spend millions on security through audits and bug bounties. Have you ever wondered how you can become a top security researcher securing these billions?\r\n\r\nIn this workshop, 4 recognized security researchers share their experiences on smart contract security with practical tools & techniques to find & report vulnerabilities. Security researchers, even aspirational ones, can take away some key advice to improve their smart contract security skills.", - "track": "Security", - "type": "Workshop", + "id": "finding-rough-consensus-on-issuance", + "sourceId": "GSKJK8", + "title": "Finding rough consensus on issuance", + "description": "lido and ef researchers agree on far more than people think. this talk is an attempt to synthesize and explain my take on the big picture as simply as possible with plenty of humour.", + "track": "Core Protocol", + "type": "Talk", "expertise": "Beginner", - "audience": "Research", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Security", - "Auditing", - "Bug", - "Bounties", - "smart", - "contracts", - "Auditing", - "Bounties", - "Bug", - "Security" + "ethereum", + "Economics", + "Ethereum Roadmap", + "Politics" ], "keywords": [ - "Education", - "Hacks", - "Smart Contract Security" + "Issuance", + "Lido", + "Ethereum" ], - "duration": 5654, + "duration": 1540, "language": "en", - "sources_swarmHash": "5115b9b314e63c202aea765f7fc8025db430ff8d7f370ddddc28e16273af4e24", - "sources_youtubeId": "8d2UuzEBVdM", + "sources_swarmHash": "606060e792644aaedb6856326c95c995d3a5181cdd1ed28148e53af3a8009506", + "sources_youtubeId": "7TM2YL4ZRNI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6736eee41b0f83434d94c564", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731465900000, - "slot_end": 1731471300000, - "slot_roomId": "classroom-b", - "resources_presentation": "https://docs.google.com/presentation/d/1HZSm9H-PuHEKe3mrj7Wl9hDODP3kP9iQMoLjxXQ81Iw", - "resources_slides": null, + "slot_start": 1731555000000, + "slot_end": 1731556800000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1UZfs00-12fFWsIVRmhuoFq4kD-ulyhRRnI5VXPFmdeQ", + "resources_slides": "https://drive.google.com/file/d/1G_ebeSAhDqQtnNGD04MxXfLQUPfqeRED/view", "speakers": [ - "0xrajeev", - "joran-honig", - "nat-chin", - "tincho" + "sacha" ] }, "vector": [ - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -348135,7 +347169,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -348160,7 +347193,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -348233,11 +347265,10 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -348632,7 +347663,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -348668,6 +347698,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -348784,7 +347815,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -348825,6 +347855,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -348893,9 +347924,6 @@ 0, 0, 0, - 2, - 2, - 2, 0, 0, 0, @@ -348910,7 +347938,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -348946,6 +347973,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -348971,6 +347999,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -349200,11 +348229,11 @@ 2, 0, 0, - 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -349217,51 +348246,50 @@ }, { "session": { - "id": "finding-rough-consensus-on-issuance", - "sourceId": "GSKJK8", - "title": "Finding rough consensus on issuance", - "description": "lido and ef researchers agree on far more than people think. this talk is an attempt to synthesize and explain my take on the big picture as simply as possible with plenty of humour.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "firefly-build-your-own-hardware-wallet", + "sourceId": "LMZKZS", + "title": "Firefly - Build your own hardware wallet", + "description": "Build your own Firefly hardware wallet and write your first custom firmware in a short interactive session. All parts provided, just bring a laptop and USB-C cable.", + "track": "Developer Experience", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "ethereum", - "Economics", - "Ethereum Roadmap", - "Politics" + "DevEx", + "Hacks", + "Hardware wallets", + "arduino", + "DevEx", + "Hacks", + "Hardware wallets" ], "keywords": [ - "Issuance", - "Lido", - "Ethereum" + "DIY", + "Arduino" ], - "duration": 1540, + "duration": 564, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "22e79a1778a0d016c579c6d3bff0ed86601dc90b1a2f896324503482136f2c30", + "sources_youtubeId": "NWdMDKMZdpQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736eee41b0f83434d94c564", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731555000000, - "slot_end": 1731556800000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1UZfs00-12fFWsIVRmhuoFq4kD-ulyhRRnI5VXPFmdeQ", - "resources_slides": null, + "slot_start": 1731473400000, + "slot_end": 1731474000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/12mlEi-XhwS1335VqCql4XOq2MN1ZU6WJeQLvyAc-QHU", + "resources_slides": "https://drive.google.com/file/d/1-ljAhJeqAvbFjYFghgSnECvbfwng3v6d/view", "speakers": [ - "sacha" + "richard-moore" ] }, "vector": [ 0, 0, 0, - 0, 6, 0, 0, @@ -349591,6 +348619,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -349612,7 +348641,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -350037,6 +349065,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -350044,7 +349073,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -350201,8 +349229,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -350263,6 +349289,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -350320,7 +349347,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -350347,6 +349373,7 @@ 0, 0, 2, + 2, 0, 0, 0, @@ -350569,6 +349596,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -350581,10 +349609,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -350594,51 +349618,49 @@ }, { "session": { - "id": "firefly-build-your-own-hardware-wallet", - "sourceId": "LMZKZS", - "title": "Firefly - Build your own hardware wallet", - "description": "Build your own Firefly hardware wallet and write your first custom firmware in a short interactive session. All parts provided, just bring a laptop and USB-C cable.", - "track": "Developer Experience", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Developer", + "id": "folding-starks-with-the-mova-folding-scheme", + "sourceId": "J78CHZ", + "title": "Folding STARKs with the Mova folding scheme", + "description": "We will present a new folding scheme that is 5 to 10 times more efficient than Nova, and 2.5 to 4 times more efficient than Hypernova. We will then explain how to use the scheme so as to construct a folding scheme for STARK proofs.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Expert", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "DevEx", - "Hacks", - "Hardware wallets", - "arduino", - "DevEx", - "Hacks", - "Hardware wallets" + "ZKP", + "Zero-Knowledge", + "STARK", + "post-quantum", + "STARK", + "Zero-Knowledge", + "ZKP" ], "keywords": [ - "DIY", - "Arduino" + "Folding", + "Post-Quantum" ], - "duration": 564, + "duration": 1385, "language": "en", - "sources_swarmHash": "22e79a1778a0d016c579c6d3bff0ed86601dc90b1a2f896324503482136f2c30", - "sources_youtubeId": "NWdMDKMZdpQ", + "sources_swarmHash": "075645407ecaef5d20b741c3c2a15c9d02b76d18802bd15d67e8612f19039826", + "sources_youtubeId": "psSr045sdso", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673833811b0f83434dc2c6ef", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673833811b0f83434dc2c6ef.vtt", + "transcript_text": " This Friday morning. So yeah, I will talk about how to folding in the context of Starks. So let's start with the basics. Wait one second. All right. So what is folding? So in folding we have a relation R, relation of interest, which consists of pairs X, W. X is an instance or a statement of a problem, and W is a witness or a solution to the problem. And a folding scheme for this relation R is an interactive protocol between a prover and a verifier, where the prover and the verifier have two instances, X1 and X2, and the prover also has two witnesses, W1, W2, and they are valid witnesses for the instances. And then the prover and the verifier interact, and at the end of this interaction they output a new instance witness pair, XC CW3 with this key property. So this output instance witness pair is valid so it is in the relation. If it is there, if it is in the relation then the original two instance witness pairs are in the relation as well except with negligible probability. So this is the key property of a folding scheme. So basically what is going on here is that we have two tasks. We have to prove one instance witness pair and another one. And we have applied an interactive argument that reduce these two tasks to a single task. So if this folding step is very cheap, then you are basically gaining, you are gaining work, right? You have to do less work because now you only have to prove one instance witness pair. Okay, so commitments play a crucial role in this type of schemes, at least in the modern ones. And in reality, in practice, all instances include a is the same as the instance witness pair, but it's a different instance, it's a different instance, it's a different instance from other ones. And in reality, in practice, all instances include a commitment to the witnesses. So in reality, in practice, we have the instance witness pairs look like this. So the instance is a true instance, XI prime, and then it also includes a commitment to the witness. Okay? So a folding scheme looks like this. We have two instance witness pair where the instance contains a commitment to the witness. You fold, you get a new instance witness pair. Okay, and the commitment most of the time is homomorphic, and this is a crucial point. And homomorphic means that the commitment to the sum of two vectors is the sum of the commitments to the vectors. Okay, so if you've never seen folding, this is everything you need to know to follow this talk. Let's look at how folding looks from 5,000 kilometers away. So as I said, in folding you have two instance witness pair of this form. There's a prover and a verifier. The prover knows the instance and the witnesses. The verifier knows the instances. And now the exchange messages doesn't really matter what's going on here for the purposes of this talk. And at the end, the verifier sends a uniformly sampled challenge. And then the prover and verifier output new instance witness pair. The witness is only known to the prover. And crucially, the new instance and the new witness is a linear combination of the initial two using the last challenge sent by the verifier. So we have these formulas in here, x3 equals x1 plus alpha x2 and so on. And the verifier computes the... So the verifier needs to get the whole instance, right? At the end of folding. So it's easy for the verifier to get X3 because it knows X1 and X2. But how does the verifier get the commitment to W3? Well, here you can use the homomorphic property of the commitment scheme. The verifier knows the commitments to W1 and W2. And because of the linear, the homomorphic property of the commitment scheme. The verifier knows the commitments to W1 and W2 and because of the linear property of the commitment scheme it can obtain a commitment to W3 without any interaction with the prover just by performing this linear combination of the commitments. So this is how folding looks from 5,000 kilometers away. Many folding schemes look like this. OK, let's discuss commitments in folding schemes in more depth. So usually, the commitment scheme is a Pedersen commitment or a KZG commitment. So it's an elliptic curve-based commitment, or some variation of Pedersen and KZG. For example, in nova, hypernova, protostar, protogalaxy, et cetera, this is the commitment of choice. the of the of the of the of the of the of the of the of the of the the vector has large entries, so you have to prove statements about elliptic curves. So you have to prove statements about elliptic curves. You have to prove that the vector has large entries. So you have to prove that the vector has large entries. So you have to prove that the vector has large entries. So you have to prove that the vector has large entries. then you have to prove that the folding is done correctly and so on. So you have to prove statements about elliptic curve operations, which are quite a headache. And yeah, because of this setting, you are kind of bound to using a KCG-based NARC to prove a folded instance, right? Because you are using large fields, you have elliptic curve-based commitments, and so on. So, let's say we want to use a starg to prove a folded instance. So, we want to use a starg to prove a DCG based NARC to prove a folded instance, right? Because you are using large fields, you have elliptic curve based commitments and so on. But maybe you want to use a different proof system, right? So, that would be, that's an inconvenience. Okay. So, yeah, let's say we want to use to prove our folded instance win spurs. So by Stark, I loosely mean a snark that uses codes and Merkle tree-based commitments. For example, the Stark protocol from Starkware, Plonky 2 on 3, Boojum, RIS0, and so on. These protocols are configured on small fields, and they are getting smaller and smaller. For example, Goldilocks for Plancky 2, Baby Bear for Plancky 3 and M31 for CircleStark. This is the attractiveness of small fields is that, well, the attractiveness of these fields is that you get smaller arithmetization due to special properties of the primes of these fields is that you get smaller arithmetization due to spatial properties of the primes of these fields. You also get cheaper computations because field elements never get very large. If you invert a 32-bit field element, the inverse has at most 32 bits. But there are some problems in the context of folding. The first problem is that Merkel trees are not homomorphic. So if you want to do folding, you don't have the homomorphic commitment right away. And since we are working on small fields, we cannot rely on elliptic curves here. And also, depending on how you are going to do folding, even if Merkle trees were homomorphic, it might not be worth it in the context of Starks, and I will explain why now. So let's look at the cost breakdown of creating Stark proofs. And yeah, let's look first at how a fry-based Stark works. So yeah, all these proving systems I mentioned before work as follows. So first the prover computes the trace or the circuit values, the values in the wires of the circuit. Then it encodes the trace using a narrow correcting code polynomials and for this it interpolates the columns of the trace and then it evaluates the polynomials on a larger domain using inverse FFTs and FFTs. This is called computing the low degree extension of the trace. Then the prover commits to this low degree extension using a Merkle tree. And finally there's some quotient, the prover takes some quotients and applies fry and so on. Okay, what's the cost breakdown of all these steps? I'm citing Elie van Sasson from a talk at SBC this summer. So the trace generation, in the case of stool, costs 46% of the total proof cost. Computing the low-degree extension costs 28%, Merkle trees cost about 13%, and the rest also cost 13%. So, yeah. And if we are in the context of folding, if we are thinking of using folding, it means that somehow we want to use recursion. So we are probably not using Merkle trees full of K-chucks or SHA-256 or classic hashes. We are probably using Merkle trees that include some type of algebraic hash, in which case the third step would be much more expensive. Yeah, so here's an important observation. Even if the Merkel trees were homomorphic, you don't want to do the folding with the commitments to the low-degree extension of the trees. Because you would, at each folding step, you would perform this step, this step, and this step, and you would save this part in here. And you would still need to do folding. So in the absolute best-case scenario, the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of We don't want to commit to the low degree extension with Merkle trees. We are just going to commit to the trace in the most cheap way possible. Yeah, so this is a key difference with approaches like accumulation with automomorphism or the ARC protocol from these researchers from papers from this year. Okay, so let's recap what we want to do. We want to commit to the trace instead of the low-degree extension. We want the scheme to be compatible with Starks, and this means that we have to work over a small field. We want the folded instance to be provable in a reasonable manner with a stark. And yeah, so since we want an instance to be provable with a stark, here we think of instances as being errors or plonkish instances. And we look at this as CCSs. A CCS is basically an R1CS constraint, but generalized with more terms. Okay, so here is the general framework of what we could try to do if we want to fold STARKs and afterward I will talk about actual instantiations. So let's say we have two instance witness pairs, two errors. The framework is as follows. The prover would commit to the trace, not to the low degree extension of the trace, so the trace, the witness, with a homomorphic commitment scheme. Okay, so now we have these two instance witness pair and then somehow we would fold in a way that the folded instance is still in an error or somewhat similar to an error. And now say that we want to prove a folded instance. So we have done folding, and now we want to prove the folded instance with this pair. So what would the prover and the verifier do? The prover computes the low-degree extension. So we want to use starts, right, to prove the folded instance. And for start, if you are using start, you have to encode the witness because you are going to use error-correcting codes. So the prover computes the low-degree extension of the folded witness, W3, and commits to it with a Merkle tree, as if it was just using the start protocol. the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the relation with his favorite start. Okay, so we have to keep in mind that we'll have to do this step in here when we want to prove a folded instance witness pair. So this will inform how we choose the commitment scheme to not make this parting here too expensive. Okay, so let's instantiate the framework now. We need a commitment scheme that is homomorphic, that is compatible with a stark field, so it somehow works over small fields. And the candidate here is the ITAI commitment scheme, getting inspiration from the So we have a lot of data, but we have a lot of data. So we have a lot of data. So we have a lot of data. So we have a lot of data. So we have a lot of data. So we have a lot of data. So we have a lot of data. If we look at error or Plunkish instances as CCSs, so let's say R1Css or relaxed R1Css, then the first thing that comes to mind is Nova, because Nova folds relaxed R1Css into relaxed R1Css. So this is our candidate, Nova. However, there's no Nova-type folding scheme that works over lattices. So, we have a very good model for this. So, we have a very good model for this. However, there's no NOVA type folding scheme that works over lattices. When it comes to folding over lattices, the only work available right now is lattice fold. lattice but there's no nova analog over lattices so one of our main results is designing such a such as a folding scheme okay so a bit on lattices and and lattice fold so lattice fold and many lattice based schemes work over so-called cyclotomic rings which are cyclotomic rings are basically, they look like field extensions, right? You take a ring of polynomials over a field, a finite field, and quotient it by an ideal generated by a polynomial. However, here, the quotient polynomial may not be irreducible. And in this case, this means that if the polynomial is not irreducible, it means that this ring splits as a direct product of several field extensions. If F is irreducible, then here there's just one factor and you have a Galois extension, the standard field extension. But if F is not irreducible, then you have this direct product. But in any case, these rings are quite nice because they are just direct products of field extensions. So you could, in principle, and we show that you can configure R in a nice way, and at the same time choosing F to be a start prime field. So we can choose F to be Goldil the first one, the first one is the first one, the first one is the first one, the second one is the first one, the third one is the first one, the fourth one is the first one, the fifth one is the first one, the And the parameters of this commitment scheme are as follows. It's just a matrix sampled uniformly at random from the ring with ring elements. It commits to vectors of length m of ring elements. And the commitment is simply the matrix vector multiplication, a times the vector. So let's discuss the efficiency of this commitment scheme first. So say, and this is an example configuration we can deal with. We can choose the base field to be the Goldilocks field, which has 64 bits, and we can configure R so that R splits as eight factors, and each factor is a degree three extension of the Goldilocks field. Here there's an important remark on how to work with cyclotomic rings, and this is that because of this isomorphism in here, this is the number theoretic transform, because of this isomorphism, you can potentially store eight trace cells in a single ring element. So if you want to store eight trace elements, and each element is in the field, you can put each element in one of these components, and then these eight elements become just one ring element. So a vector of size 2 to the n with ring elements can store 2 to the n plus 3 trace cells. And this is quite relevant for performance. And this is some benchmarks of our implementation of the ITAI commitment scheme using this configuration. So say we want to commit to a vector of size 2 to the 16 field ring elements, and this means 2 to the 19 field elements. So this costs 65 milliseconds. This should be the commitment time I don't know why it says field and this is for comparison what you would get if you commit if you do Merkle tree commitments to the vectors so if you commit to the to the 16 field elements the code with a Blake function so classic classic hash this is 14 milliseconds but as I said this is just to do a 16 field elements, this is to do the 19 field elements. Also, if you are using Merkle trees, you probably have encoded the witness, which means that here you have an extra one in the exponent, which is not giving you anything when you want to encode the witness. Maybe the overhead is even larger if the rate of the code is less than one half. Yeah, so if we compare 2019 and 2019, we get similar performance. And if we put algebraic hatches in the Merkle trees, which we probably have to if we are in the context of folding, because it means we are interested in using some kind of recursion, then the difference, of course, is dramatic. How much time do I have? Five minutes? Okay. So yeah, we are almost... there's one or two slides left. So recall, I take commitment is just matrix vector multiplication of ring elements. There's a big issue with this commitment scheme. I said what is good about it, and now what's bad about it. This commitment scheme is only binding when the vector you're vector is the largest coefficient of an entry of the vector when you look at the elements of the ring as polynomials. And yeah, remember that the folded witness in our folding schemes are a linear combination of the two initial witnesses. So there's two issues here. Because of the linear combination of the two initial witnesses. So there's two issues here because of this caveat. One is that even if W1 and W2 have small norm, because W3 is a random linear combination of W1 and W2, it is possible that W3 has large norm. In general, W3 might have arbitrarily large norm, in which case the commitment to W3 with the I-type commitment would not be binding. And there's another issue that is, even if one does not happen, even if W3 has low norm for some reason, when you prove knowledge soundness of the folding scheme, you need to make sure that the extractor gets witnesses of small norm. Because we, because of this caveat, we only commit to witnesses of small norm, so we end up requiring that the prover commits to witnesses of small norm. So when you prove knowledge soundness, you need to show that the extractor only gets witnesses of small norm, and this is quite difficult to enforce. Okay, so since there's five minutes less than I had planned I will skip how LatticeFold and how we address these two issues and just jump to the Q&A part. Thank you. . . Thank you very much. We have one question. Why M31 is WIP and because of the ? The thing is that there's a lot of factors coming into play when configuring these rings. We have to configure them in a way that when they split as field extensions, they have at least 128 bits to ensure soundness during sound check. And at the same time, we have to ensure that there's a subset of small norm elements that is large enough for the soundness in another part of the protocol. And this places a lot of constraints on how you can choose the cyclotomic polynomial. Yes, but the composition of P minus one is completely different. The way you can choose the, the way you can configure the ring is highly constrained on the factorization of p minus one, on how it factors. Do we have any other questions?", "eventId": "devcon-7", - "slot_start": 1731473400000, - "slot_end": 1731474000000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/12mlEi-XhwS1335VqCql4XOq2MN1ZU6WJeQLvyAc-QHU", - "resources_slides": null, + "slot_start": 1731638700000, + "slot_end": 1731640500000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/190Nsmxqio3tQ_4Rk6RPoyEf0-2DbZVoOuYNvY9It1YM", + "resources_slides": "https://drive.google.com/file/d/1N38NGMTpDb1V9lLFZWKepuFeBU5UKsZI/view", "speakers": [ - "richard-moore" + "albert-garreta" ] }, "vector": [ - 0, - 0, - 0, - 6, 0, 0, 0, @@ -350649,6 +349671,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -350968,7 +349991,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -350993,6 +350015,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -351398,6 +350421,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -351416,7 +350440,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -351461,6 +350484,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -351596,6 +350620,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -351640,7 +350665,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -351725,8 +350749,6 @@ 0, 0, 2, - 2, - 0, 0, 0, 0, @@ -351949,11 +350971,9 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, + 2, 0, 2, 0, @@ -351966,51 +350986,48 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "folding-starks-with-the-mova-folding-scheme", - "sourceId": "J78CHZ", - "title": "Folding STARKs with the Mova folding scheme", - "description": "We will present a new folding scheme that is 5 to 10 times more efficient than Nova, and 2.5 to 4 times more efficient than Hypernova. We will then explain how to use the scheme so as to construct a folding scheme for STARK proofs.", - "track": "Applied Cryptography", - "type": "Talk", - "expertise": "Expert", - "audience": "Research", + "id": "for-the-kingdom-mud-day-demo", + "sourceId": "FM3LCK", + "title": "For The Kingdom - MUD Day Demo", + "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications.\r\n\r\nFor The Kingdom (https://forthekingdom.xyz/) is a web-based MMORPG featuring a player-driven economy and worldbuilding, empowering players to be anyone they want to be.\r\n\r\nThe game is fully onchain, and currently live on the Redstone Garnet Testnet, using the MUD framework.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "ZKP", - "Zero-Knowledge", - "STARK", - "post-quantum", - "STARK", - "Zero-Knowledge", - "ZKP" + "Autonomous World", + "Gaming" ], "keywords": [ - "Folding", - "Post-Quantum" + "fully", + "onchain", + "game" ], - "duration": 1385, + "duration": 350, "language": "en", - "sources_swarmHash": "075645407ecaef5d20b741c3c2a15c9d02b76d18802bd15d67e8612f19039826", - "sources_youtubeId": "psSr045sdso", + "sources_swarmHash": "f72a715fadd0d0f0678b493f07a5e8630a9eba13d93187911740ef6769a6d966", + "sources_youtubeId": "PRVgtqUH6_U", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673833811b0f83434dc2c6ef", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673833811b0f83434dc2c6ef.vtt", - "transcript_text": " This Friday morning. So yeah, I will talk about how to folding in the context of Starks. So let's start with the basics. Wait one second. All right. So what is folding? So in folding we have a relation R, relation of interest, which consists of pairs X, W. X is an instance or a statement of a problem, and W is a witness or a solution to the problem. And a folding scheme for this relation R is an interactive protocol between a prover and a verifier, where the prover and the verifier have two instances, X1 and X2, and the prover also has two witnesses, W1, W2, and they are valid witnesses for the instances. And then the prover and the verifier interact, and at the end of this interaction they output a new instance witness pair, XC CW3 with this key property. So this output instance witness pair is valid so it is in the relation. If it is there, if it is in the relation then the original two instance witness pairs are in the relation as well except with negligible probability. So this is the key property of a folding scheme. So basically what is going on here is that we have two tasks. We have to prove one instance witness pair and another one. And we have applied an interactive argument that reduce these two tasks to a single task. So if this folding step is very cheap, then you are basically gaining, you are gaining work, right? You have to do less work because now you only have to prove one instance witness pair. Okay, so commitments play a crucial role in this type of schemes, at least in the modern ones. And in reality, in practice, all instances include a is the same as the instance witness pair, but it's a different instance, it's a different instance, it's a different instance from other ones. And in reality, in practice, all instances include a commitment to the witnesses. So in reality, in practice, we have the instance witness pairs look like this. So the instance is a true instance, XI prime, and then it also includes a commitment to the witness. Okay? So a folding scheme looks like this. We have two instance witness pair where the instance contains a commitment to the witness. You fold, you get a new instance witness pair. Okay, and the commitment most of the time is homomorphic, and this is a crucial point. And homomorphic means that the commitment to the sum of two vectors is the sum of the commitments to the vectors. Okay, so if you've never seen folding, this is everything you need to know to follow this talk. Let's look at how folding looks from 5,000 kilometers away. So as I said, in folding you have two instance witness pair of this form. There's a prover and a verifier. The prover knows the instance and the witnesses. The verifier knows the instances. And now the exchange messages doesn't really matter what's going on here for the purposes of this talk. And at the end, the verifier sends a uniformly sampled challenge. And then the prover and verifier output new instance witness pair. The witness is only known to the prover. And crucially, the new instance and the new witness is a linear combination of the initial two using the last challenge sent by the verifier. So we have these formulas in here, x3 equals x1 plus alpha x2 and so on. And the verifier computes the... So the verifier needs to get the whole instance, right? At the end of folding. So it's easy for the verifier to get X3 because it knows X1 and X2. But how does the verifier get the commitment to W3? Well, here you can use the homomorphic property of the commitment scheme. The verifier knows the commitments to W1 and W2. And because of the linear, the homomorphic property of the commitment scheme. The verifier knows the commitments to W1 and W2 and because of the linear property of the commitment scheme it can obtain a commitment to W3 without any interaction with the prover just by performing this linear combination of the commitments. So this is how folding looks from 5,000 kilometers away. Many folding schemes look like this. OK, let's discuss commitments in folding schemes in more depth. So usually, the commitment scheme is a Pedersen commitment or a KZG commitment. So it's an elliptic curve-based commitment, or some variation of Pedersen and KZG. For example, in nova, hypernova, protostar, protogalaxy, et cetera, this is the commitment of choice. the of the of the of the of the of the of the of the of the of the the vector has large entries, so you have to prove statements about elliptic curves. So you have to prove statements about elliptic curves. You have to prove that the vector has large entries. So you have to prove that the vector has large entries. So you have to prove that the vector has large entries. So you have to prove that the vector has large entries. then you have to prove that the folding is done correctly and so on. So you have to prove statements about elliptic curve operations, which are quite a headache. And yeah, because of this setting, you are kind of bound to using a KCG-based NARC to prove a folded instance, right? Because you are using large fields, you have elliptic curve-based commitments, and so on. So, let's say we want to use a starg to prove a folded instance. So, we want to use a starg to prove a DCG based NARC to prove a folded instance, right? Because you are using large fields, you have elliptic curve based commitments and so on. But maybe you want to use a different proof system, right? So, that would be, that's an inconvenience. Okay. So, yeah, let's say we want to use to prove our folded instance win spurs. So by Stark, I loosely mean a snark that uses codes and Merkle tree-based commitments. For example, the Stark protocol from Starkware, Plonky 2 on 3, Boojum, RIS0, and so on. These protocols are configured on small fields, and they are getting smaller and smaller. For example, Goldilocks for Plancky 2, Baby Bear for Plancky 3 and M31 for CircleStark. This is the attractiveness of small fields is that, well, the attractiveness of these fields is that you get smaller arithmetization due to special properties of the primes of these fields is that you get smaller arithmetization due to spatial properties of the primes of these fields. You also get cheaper computations because field elements never get very large. If you invert a 32-bit field element, the inverse has at most 32 bits. But there are some problems in the context of folding. The first problem is that Merkel trees are not homomorphic. So if you want to do folding, you don't have the homomorphic commitment right away. And since we are working on small fields, we cannot rely on elliptic curves here. And also, depending on how you are going to do folding, even if Merkle trees were homomorphic, it might not be worth it in the context of Starks, and I will explain why now. So let's look at the cost breakdown of creating Stark proofs. And yeah, let's look first at how a fry-based Stark works. So yeah, all these proving systems I mentioned before work as follows. So first the prover computes the trace or the circuit values, the values in the wires of the circuit. Then it encodes the trace using a narrow correcting code polynomials and for this it interpolates the columns of the trace and then it evaluates the polynomials on a larger domain using inverse FFTs and FFTs. This is called computing the low degree extension of the trace. Then the prover commits to this low degree extension using a Merkle tree. And finally there's some quotient, the prover takes some quotients and applies fry and so on. Okay, what's the cost breakdown of all these steps? I'm citing Elie van Sasson from a talk at SBC this summer. So the trace generation, in the case of stool, costs 46% of the total proof cost. Computing the low-degree extension costs 28%, Merkle trees cost about 13%, and the rest also cost 13%. So, yeah. And if we are in the context of folding, if we are thinking of using folding, it means that somehow we want to use recursion. So we are probably not using Merkle trees full of K-chucks or SHA-256 or classic hashes. We are probably using Merkle trees that include some type of algebraic hash, in which case the third step would be much more expensive. Yeah, so here's an important observation. Even if the Merkel trees were homomorphic, you don't want to do the folding with the commitments to the low-degree extension of the trees. Because you would, at each folding step, you would perform this step, this step, and this step, and you would save this part in here. And you would still need to do folding. So in the absolute best-case scenario, the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of the value of We don't want to commit to the low degree extension with Merkle trees. We are just going to commit to the trace in the most cheap way possible. Yeah, so this is a key difference with approaches like accumulation with automomorphism or the ARC protocol from these researchers from papers from this year. Okay, so let's recap what we want to do. We want to commit to the trace instead of the low-degree extension. We want the scheme to be compatible with Starks, and this means that we have to work over a small field. We want the folded instance to be provable in a reasonable manner with a stark. And yeah, so since we want an instance to be provable with a stark, here we think of instances as being errors or plonkish instances. And we look at this as CCSs. A CCS is basically an R1CS constraint, but generalized with more terms. Okay, so here is the general framework of what we could try to do if we want to fold STARKs and afterward I will talk about actual instantiations. So let's say we have two instance witness pairs, two errors. The framework is as follows. The prover would commit to the trace, not to the low degree extension of the trace, so the trace, the witness, with a homomorphic commitment scheme. Okay, so now we have these two instance witness pair and then somehow we would fold in a way that the folded instance is still in an error or somewhat similar to an error. And now say that we want to prove a folded instance. So we have done folding, and now we want to prove the folded instance with this pair. So what would the prover and the verifier do? The prover computes the low-degree extension. So we want to use starts, right, to prove the folded instance. And for start, if you are using start, you have to encode the witness because you are going to use error-correcting codes. So the prover computes the low-degree extension of the folded witness, W3, and commits to it with a Merkle tree, as if it was just using the start protocol. the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the instance witness pair is in the relation with his favorite start. Okay, so we have to keep in mind that we'll have to do this step in here when we want to prove a folded instance witness pair. So this will inform how we choose the commitment scheme to not make this parting here too expensive. Okay, so let's instantiate the framework now. We need a commitment scheme that is homomorphic, that is compatible with a stark field, so it somehow works over small fields. And the candidate here is the ITAI commitment scheme, getting inspiration from the So we have a lot of data, but we have a lot of data. So we have a lot of data. So we have a lot of data. So we have a lot of data. So we have a lot of data. So we have a lot of data. So we have a lot of data. If we look at error or Plunkish instances as CCSs, so let's say R1Css or relaxed R1Css, then the first thing that comes to mind is Nova, because Nova folds relaxed R1Css into relaxed R1Css. So this is our candidate, Nova. However, there's no Nova-type folding scheme that works over lattices. So, we have a very good model for this. So, we have a very good model for this. However, there's no NOVA type folding scheme that works over lattices. When it comes to folding over lattices, the only work available right now is lattice fold. lattice but there's no nova analog over lattices so one of our main results is designing such a such as a folding scheme okay so a bit on lattices and and lattice fold so lattice fold and many lattice based schemes work over so-called cyclotomic rings which are cyclotomic rings are basically, they look like field extensions, right? You take a ring of polynomials over a field, a finite field, and quotient it by an ideal generated by a polynomial. However, here, the quotient polynomial may not be irreducible. And in this case, this means that if the polynomial is not irreducible, it means that this ring splits as a direct product of several field extensions. If F is irreducible, then here there's just one factor and you have a Galois extension, the standard field extension. But if F is not irreducible, then you have this direct product. But in any case, these rings are quite nice because they are just direct products of field extensions. So you could, in principle, and we show that you can configure R in a nice way, and at the same time choosing F to be a start prime field. So we can choose F to be Goldil the first one, the first one is the first one, the first one is the first one, the second one is the first one, the third one is the first one, the fourth one is the first one, the fifth one is the first one, the And the parameters of this commitment scheme are as follows. It's just a matrix sampled uniformly at random from the ring with ring elements. It commits to vectors of length m of ring elements. And the commitment is simply the matrix vector multiplication, a times the vector. So let's discuss the efficiency of this commitment scheme first. So say, and this is an example configuration we can deal with. We can choose the base field to be the Goldilocks field, which has 64 bits, and we can configure R so that R splits as eight factors, and each factor is a degree three extension of the Goldilocks field. Here there's an important remark on how to work with cyclotomic rings, and this is that because of this isomorphism in here, this is the number theoretic transform, because of this isomorphism, you can potentially store eight trace cells in a single ring element. So if you want to store eight trace elements, and each element is in the field, you can put each element in one of these components, and then these eight elements become just one ring element. So a vector of size 2 to the n with ring elements can store 2 to the n plus 3 trace cells. And this is quite relevant for performance. And this is some benchmarks of our implementation of the ITAI commitment scheme using this configuration. So say we want to commit to a vector of size 2 to the 16 field ring elements, and this means 2 to the 19 field elements. So this costs 65 milliseconds. This should be the commitment time I don't know why it says field and this is for comparison what you would get if you commit if you do Merkle tree commitments to the vectors so if you commit to the to the 16 field elements the code with a Blake function so classic classic hash this is 14 milliseconds but as I said this is just to do a 16 field elements, this is to do the 19 field elements. Also, if you are using Merkle trees, you probably have encoded the witness, which means that here you have an extra one in the exponent, which is not giving you anything when you want to encode the witness. Maybe the overhead is even larger if the rate of the code is less than one half. Yeah, so if we compare 2019 and 2019, we get similar performance. And if we put algebraic hatches in the Merkle trees, which we probably have to if we are in the context of folding, because it means we are interested in using some kind of recursion, then the difference, of course, is dramatic. How much time do I have? Five minutes? Okay. So yeah, we are almost... there's one or two slides left. So recall, I take commitment is just matrix vector multiplication of ring elements. There's a big issue with this commitment scheme. I said what is good about it, and now what's bad about it. This commitment scheme is only binding when the vector you're vector is the largest coefficient of an entry of the vector when you look at the elements of the ring as polynomials. And yeah, remember that the folded witness in our folding schemes are a linear combination of the two initial witnesses. So there's two issues here. Because of the linear combination of the two initial witnesses. So there's two issues here because of this caveat. One is that even if W1 and W2 have small norm, because W3 is a random linear combination of W1 and W2, it is possible that W3 has large norm. In general, W3 might have arbitrarily large norm, in which case the commitment to W3 with the I-type commitment would not be binding. And there's another issue that is, even if one does not happen, even if W3 has low norm for some reason, when you prove knowledge soundness of the folding scheme, you need to make sure that the extractor gets witnesses of small norm. Because we, because of this caveat, we only commit to witnesses of small norm, so we end up requiring that the prover commits to witnesses of small norm. So when you prove knowledge soundness, you need to show that the extractor only gets witnesses of small norm, and this is quite difficult to enforce. Okay, so since there's five minutes less than I had planned I will skip how LatticeFold and how we address these two issues and just jump to the Q&A part. Thank you. . . Thank you very much. We have one question. Why M31 is WIP and because of the ? The thing is that there's a lot of factors coming into play when configuring these rings. We have to configure them in a way that when they split as field extensions, they have at least 128 bits to ensure soundness during sound check. And at the same time, we have to ensure that there's a subset of small norm elements that is large enough for the soundness in another part of the protocol. And this places a lot of constraints on how you can choose the cyclotomic polynomial. Yes, but the composition of P minus one is completely different. The way you can choose the, the way you can configure the ring is highly constrained on the factorization of p minus one, on how it factors. Do we have any other questions?", + "sources_streamethId": "6735897b9dbb7a90e1550f24", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735897b9dbb7a90e1550f24.vtt", + "transcript_text": " Translator's note Trinh will be demoing For The Kingdom, a browser-based RPG currently on testnet. Hello, okay, thanks guys. So yeah, super great, presenting our game right after E-Frontier, like just super. So okay, hi everyone. Thanks a lot for joining. My name is Tuyen, I'm the founder of Constant Maker Studio, and we're making a fully on-chain game called For the Kingdom. So For the Kingdom is an MMO that aims to become a community-owned world with a player-driven economy and world-building that empowers players to be anyone they want to be. Okay, so can you repeat the video for me? Thanks a lot. Okay, so our vision is to craft an expansive and persistent game world where players can freely experience it and take an active role in shaping the game's values. The core gameplay loop begins by first dropping you into a vast open world where you can explore and fight monsters for valuable materials, for other players to steal their items, or simply lead a peaceful life by chopping away some woods. The true goal of the game is becoming the best at what you do and contribute the most to your kingdom. Imagine you're the best blacksmith in the whole game, and only you can craft the most powerful weapon. It means your kingdom's warriors have an edge over their opponents, or you can even sell your weapons to other kingdoms as well for a profit. And so here I'll show you the first three minutes of our game via a recording, but you can definitely already play the game, even on your smartphone. So the first thing you do is to choose one of four available kingdoms and start building your character. So you can see here that that's basically me creating a character called Defcon for the win. I'll be able to customize the character's story by answering some of the random questions about the character's childhood or their personality. Your choices here determine your character's starting stats, like choosing that you stole a trinket from a rich woman when you were 10 will get you a boost in agility. The first thing you do in the game is claim a welcome package where you'll get all of the beginner items necessary to start to create, you know, doing anything. We have like some text to support onboarding but I'll just, you know, like skip it to get to the fun part. So right after that, the next thing you can do is to basically go into your character profile and equip a weapon so that you can prevent yourself from punching some monsters barehanded. So the next thing you can do is to browse some available quest lines. We have daily quests as well as story lines that you can explore through Kingdom specifics. Next up, you can move from your starting point to different parts of the map. Like here I'm moving to a grassland outside of my starting city. Moving right now kind of cost 15 seconds to compensate that right now we don't have an, like, you know, a zoom in map or an Intel map like DOFUS or ROCFU yet, but we'll get there sometime. You can see during that time that I've also equipped a battle skill. Next, you'll be able to engage in PvE in each tile. Like right now, I'm fighting a slime enemy. As the game is an auto battle and fully on chain, it means that right now the outcome is deterministic and the result of the battle is already decided once you click on the button. But the animation here is kind of like a good representation. Here you can also engage in PvP with different players in this game. For example, I can challenge literally the best player right now. And probably we'll get decimated in one move. Yep, that's because Soulmate or like Dudendi, I think that he's here. Thanks a lot for coming. Like you know, he is stoked with very, the map is right now very huge. And the main goal of this demo, basically, is to defeat Ignis, our first raid boss. It's a powerful dragon that can drop the best loot right now once defeated. So battling monsters, bosses, and, you know, farming resources will enable you to craft various items to get stronger. In the future, these items can be tokenized and sold to other players that cannot craft them. So, that's our demo. I'm just going to skip to... I have like five seconds left. I can't like, you know, can you change the, I mean, yeah, somehow I can't change it to, to the next slide. Like, can you manually change, like, you know, I don't have enough time yet. So if you can like, you know, go to the last slide. It's OK. Just five seconds on this. This is what we've been doing for the next build as well. We'll add more co-op features and basically a card demo game that we kind of stolen from Final Fantasy right now. And yeah, just can you move on to the last slide, please? So yeah, thank you, guys. The game is playable right now. And yeah, just can you move on to the last slide, please? So yeah, thank you, guys. The game is playable right now. Cross your eyes on mobile.", "eventId": "devcon-7", - "slot_start": 1731638700000, - "slot_end": 1731640500000, + "slot_start": 1731556800000, + "slot_end": 1731557100000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/190Nsmxqio3tQ_4Rk6RPoyEf0-2DbZVoOuYNvY9It1YM", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/19JLbZ-yVksBM4TM3ftOIccuAC6UgKAKtM0nVGAdWdQ4", + "resources_slides": "https://drive.google.com/file/d/1QzU_GBZ2S1lsvtJWqpHYQkGn5ZJu7blZ/view", "speakers": [ - "albert-garreta" + "tuyen-dinh" ] }, "vector": [ @@ -352024,9 +351041,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -352777,7 +351794,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -352840,6 +351856,40 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, 2, 0, 0, @@ -352976,7 +352026,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -353105,57 +352154,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -353331,7 +352329,24 @@ 0, 0, 0, - 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, 0, 2, 0, @@ -353342,50 +352357,47 @@ 0, 0, 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "for-the-kingdom-mud-day-demo", - "sourceId": "FM3LCK", - "title": "For The Kingdom - MUD Day Demo", - "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications.\r\n\r\nFor The Kingdom (https://forthekingdom.xyz/) is a web-based MMORPG featuring a player-driven economy and worldbuilding, empowering players to be anyone they want to be.\r\n\r\nThe game is fully onchain, and currently live on the Redstone Garnet Testnet, using the MUD framework.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "fork-choice-enforced-inclusion-lists-focil", + "sourceId": "CDTX78", + "title": "Fork-Choice enforced Inclusion Lists (FOCIL)", + "description": "A direct consequence of centralized block production is a deterioration of Ethereum's censorship resistance properties. In this talk, we introduce FOCIL, a simple committee-based design improving upon previous inclusion list and co-created block mechanisms. We present the benefits of (1) relying on a committee to address issues related to bribing/extortion attacks, and (2) having attesters enforce the IL as part of the block validity condition to prevent IL equivocation.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Autonomous World", - "Gaming" + "Design", + "mechanism" ], "keywords": [ - "fully", - "onchain", - "game" + "Censorship Resistance", + "Inclusion Lists", + "Mechanism Design" ], - "duration": 350, + "duration": 1535, "language": "en", - "sources_swarmHash": "f72a715fadd0d0f0678b493f07a5e8630a9eba13d93187911740ef6769a6d966", - "sources_youtubeId": "PRVgtqUH6_U", + "sources_swarmHash": "ab48939dc7d01fcc84cab502b6e1268be91cf99ff863083e4e7e1b121ae75e13", + "sources_youtubeId": "75aQTuZkDvE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735897b9dbb7a90e1550f24", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735897b9dbb7a90e1550f24.vtt", - "transcript_text": " Translator's note Trinh will be demoing For The Kingdom, a browser-based RPG currently on testnet. Hello, okay, thanks guys. So yeah, super great, presenting our game right after E-Frontier, like just super. So okay, hi everyone. Thanks a lot for joining. My name is Tuyen, I'm the founder of Constant Maker Studio, and we're making a fully on-chain game called For the Kingdom. So For the Kingdom is an MMO that aims to become a community-owned world with a player-driven economy and world-building that empowers players to be anyone they want to be. Okay, so can you repeat the video for me? Thanks a lot. Okay, so our vision is to craft an expansive and persistent game world where players can freely experience it and take an active role in shaping the game's values. The core gameplay loop begins by first dropping you into a vast open world where you can explore and fight monsters for valuable materials, for other players to steal their items, or simply lead a peaceful life by chopping away some woods. The true goal of the game is becoming the best at what you do and contribute the most to your kingdom. Imagine you're the best blacksmith in the whole game, and only you can craft the most powerful weapon. It means your kingdom's warriors have an edge over their opponents, or you can even sell your weapons to other kingdoms as well for a profit. And so here I'll show you the first three minutes of our game via a recording, but you can definitely already play the game, even on your smartphone. So the first thing you do is to choose one of four available kingdoms and start building your character. So you can see here that that's basically me creating a character called Defcon for the win. I'll be able to customize the character's story by answering some of the random questions about the character's childhood or their personality. Your choices here determine your character's starting stats, like choosing that you stole a trinket from a rich woman when you were 10 will get you a boost in agility. The first thing you do in the game is claim a welcome package where you'll get all of the beginner items necessary to start to create, you know, doing anything. We have like some text to support onboarding but I'll just, you know, like skip it to get to the fun part. So right after that, the next thing you can do is to basically go into your character profile and equip a weapon so that you can prevent yourself from punching some monsters barehanded. So the next thing you can do is to browse some available quest lines. We have daily quests as well as story lines that you can explore through Kingdom specifics. Next up, you can move from your starting point to different parts of the map. Like here I'm moving to a grassland outside of my starting city. Moving right now kind of cost 15 seconds to compensate that right now we don't have an, like, you know, a zoom in map or an Intel map like DOFUS or ROCFU yet, but we'll get there sometime. You can see during that time that I've also equipped a battle skill. Next, you'll be able to engage in PvE in each tile. Like right now, I'm fighting a slime enemy. As the game is an auto battle and fully on chain, it means that right now the outcome is deterministic and the result of the battle is already decided once you click on the button. But the animation here is kind of like a good representation. Here you can also engage in PvP with different players in this game. For example, I can challenge literally the best player right now. And probably we'll get decimated in one move. Yep, that's because Soulmate or like Dudendi, I think that he's here. Thanks a lot for coming. Like you know, he is stoked with very, the map is right now very huge. And the main goal of this demo, basically, is to defeat Ignis, our first raid boss. It's a powerful dragon that can drop the best loot right now once defeated. So battling monsters, bosses, and, you know, farming resources will enable you to craft various items to get stronger. In the future, these items can be tokenized and sold to other players that cannot craft them. So, that's our demo. I'm just going to skip to... I have like five seconds left. I can't like, you know, can you change the, I mean, yeah, somehow I can't change it to, to the next slide. Like, can you manually change, like, you know, I don't have enough time yet. So if you can like, you know, go to the last slide. It's OK. Just five seconds on this. This is what we've been doing for the next build as well. We'll add more co-op features and basically a card demo game that we kind of stolen from Final Fantasy right now. And yeah, just can you move on to the last slide, please? So yeah, thank you, guys. The game is playable right now. And yeah, just can you move on to the last slide, please? So yeah, thank you, guys. The game is playable right now. Cross your eyes on mobile.", + "sources_streamethId": "6736c9979dbb7a90e11fece0", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736c85374749a4b89b3ae2c.vtt", + "transcript_text": " Good morning everyone. So today I'll talk about fuzzing for zero knowledge infrastructure and this is joint work with my collaborators from the Technical University of Vienna. So just to kind of make sure we are all on the same page, let's have a short definition of what I understand by zero-knowledge infrastructure. So for this talk I'll define zero-knowledge infrastructure as software components that are used for compiling, executing, proving and verifying ZK circuits. So examples would be the processing pipelines that are commonly used by DSLs for describing ZK circuits or maybe in the future we'll also look at entire ZK EVMs. So with this out of the way let's look at why this is an important topic and why more people should be doing this. Well first zero knowledge infrastructure is highly complex and highly critical. For instance it's used in several L2 chains so kind of bugs there could have catastrophic financial and reputational impact, and we should really make sure these components are as bulletproof as they can get. While we haven't seen a really catastrophic incident in this field, maybe perhaps comparable to the DAO hack in 2016, it's really important that we have rigorous testing for these components and we use, you know, the best engineering discipline that we can have, right? Because these components are really complex, and getting them right is not easy. So what is fuzzing? I don't think I have to explain this after the last talk, but fuzzing is widely used in industry. For instance, at Microsoft, Google, Meta, they're fuzzing a lot of their infrastructure to catch bugs before hackers can actually exploit them in practice. So in this talk, I'll give you an overview of our fuzzer for finding critical bugs in processing pipelines for zero-knowledge circuits. The fuzz is called Circus, and it already supports four of these pipelines, namely Circum, Corsair, Gnark, and Noir. And there's a gazillion of different DSLs popping up, it seems. Like you walk around here and you see another one in the corner. And we've already found 16 bugs in total for the four pipelines we looked at. 15 have already been fixed, so that kind of shows that these bugs are really taken seriously, and developers sometimes respond within hours to actually get them fixed. You can also see a kind of breakdown of the different bugs and where we found them. And as you can see, they're kind of very evenly spread across the different pipelines, so it's not like there's one pipeline that's responsible for all the bugs that we found. Now, kind of to make sure that everybody understands, I assume not all people are aware of what these processing pipelines look like, here's a short summary. You can see in the yellow box, there is a circuit that the user writes that goes into the compiler, and then the compiler hands some output to the witness generator, and the witness generator can then take the input, so the user input to the circuit, and generate a witness that can be then used by the prover and then the prover generates a proof that can then be verified to essentially check that the circuit was actually executed. So now that we have a bit of an understanding of how these pipelines look like, let's dive a bit deeper to understand how we find these bugs. So first, I wanna be very clear, we are not cryptographers. This is not my background. My background is in software security, program analysis. And for this reason, I can't really you know try to understand the the logic that's in the intricate logic in many of these components which is why we treat these essentially as a black box so much like what a typical user would do we just view them as a black box. So how can we still find all these bugs? Well first we have a lot of experience in testing complex software for complex domains and building fuzzers for them. So we've done quite a bit of work on fuzzing for smart contracts. It's probably the work that's most closely to the audience that's here. And for instance, we created the Harvey fuzzer many years ago, and we're still kind of maintaining it. We also did some work on ML models and on testing ML models and testing program analysis tools. If you want to know more, check out the papers that are online. And the second reason why we were able to find these bugs is that we have a not-so-secret weapon, which is called metamorphic testing. So before explaining in a bit more detail what metamorphic testing is, let me just give you kind of a short summary of how we got here, how we got into this, interested in this topic. So we were, as I said, we were working on fuzzing for smart contracts mainly for many years and then at some point I guess maybe like a year ago roughly, Consensys, they released the linear blockchain. That is one of the first ZK EVMs. And we were like, well, this is really complex. We started looking into it a bit. And we saw that there are these components like the GNARC library and the Corsair language for processing zero knowledgeknowledge circuits. So we were like, oh, what can we do to test these? What can we do to make sure that there are no bugs in these components? And that's when we started building kind of a predecessor to this fuzzer I'm presenting today, which is called Rio. So Rio is a fuzzer for the GNARC library, specifically. And then essentially this Circus fuzzer I'm presenting today is essentially an evolution of this fuzzer that is a bit more general and can target multiple DSLs. All right. Now back to metamorphic testing. So what is metamorphic testing? I think the shortest way to summarize it, it's kind of a way to define test oracles in a pretty elegant and concise way. So to illustrate, I've collected a few examples that explain this and try to explain what metamorphic testing is. So the first example is how to actually test that the sorting function, sort for an input, let's say an array of integers x, actually does the right thing. Well there's many ways to check this, but here's a way to check it using metamorphic testing. So you sort the input x, and then you also sort x, but you shuffle it randomly. And these two, the output of both of these invocations of the function, they should have the same output. So pretty nice and concise specification. So let's look at another example. Here, we want to test essentially some procedure for computing the shortest path in graph G between the nodes N and M. So how can we do this with metamorphic testing? Well, one way to do it is to say, you know, the shortest path between n and m in the graph G is less than the shortest path between n and m in the graph where we take G but we remove some random edge, right? Because that edge might have been on the shortest path, and then the shortest path could become longer, right? So we can also apply the same kind of principle to more complex components, like an entire compiler, right? So here's an example of how to do this with metamorphic testing. When you take a program P and you compile it and then run it, you should essentially see the same behavior as when you take the program P, but you add some dead code somewhere randomly. And you compile it and you run it. Shouldn't matter. It should give you the same output, essentially. So with this, let's look at how we can apply the same reasoning principle to zero-knowledge circuits. So the fuzzer, what it does is it first generates a random circuit, C1, and then it applies a random transformation to C1 to get a new circuit, C2. So now we have two circuits that are essentially syntactically, perhaps completely different, but semantically they should have the same behavior. And now the fuzzle generates an input, I, and invokes the processing pipeline for both of these circuits. And if there's any difference in the output or the behavior, then that's a bug somewhere in this processing pipeline. So for instance, for one circuit, we might not be able to generate a witness. But for the other one, we would. Then there's probably a bug somewhere in the compiler or in the witness generator. So let's look at a few of these transformations to kind of understand how the fuzzer does this. So here's a very simple transformation. If you have somewhere in your circuit an expression E, you can always multiply the expression by 1. That should not change anything. Or you can also multiply the expression by one, right? That should not change anything. Or you can also divide by one. Again, shouldn't have any effect on the circuits. Another transformation is to basically negate the expression E twice. Or you can also apply other transformations, like swapping the two operands of a multiplication. And then we also have some transformations that use essentially new randomly generated expressions. So here, for instance, we replace an expression e with e minus some random expression plus some random expression. That should, again, not have any effect. And we have a small DSL for describing these circuits, so there's many more of these transformations. So currently we support roughly 90 such rules, but you can easily add more of them and think of new ones if you would like to. So we also found a number of bugs in the different pipelines, and let's look at a few of them just to kind of give you the idea and give you a concrete example that you can look at. So here on the left, we see a small example circuit in the circum language. You can also look up the GitHub issue if you'd like to. But this is actually a minimized, cleaned up version of the circuit. And as you can see, there's a variable P. In practice, this is essentially just a constant. It just didn't fit on the slide. So I put it at the bottom. But think of P just as a normal constant in your program. And now what the fuzzer does is it generates a transformation of this circuit, which is the one that's shown on the right. And here it applies a number of metamorphic transformations. So, for instance, it seems like the fuzzer first multiplied the expression P by 1, then subtracted 0 from 1, and then divided that expression by 1. And when we execute these two circuits, we can see that the output out 1 and out two actually are not the same. So this is bad and this was a bug that we found. It seemed like there was a bug in the witness generation part. And here's another bug that we found in GNARK. So at the top you can again see the original circuit that the fuzzer generated. And then we can also see the transform circuit where the fuzzer essentially changed the expression zero to just zero or zero, which should be equivalent. And here, we observed that for C1, there was no witness that was generated, whereas for C2, there was. So this, again, is a bug somewhere in this pipeline. And yeah, now that you've kind of hopefully got an overview of the tool and saw a bit how these bugs look like in practice, I hope more people will start looking into this problem, because I think it's a very important problem. We need to really make sure that these components are bulletproof, because otherwise pretty bad things can happen. And it's better to do this before some, you know, attacker does it, right? And you don't need to be a cryptographer to actually find these bugs. So, you know, that gives us a lot more people that can actually look into this. Because I think so far we only really sketched the surface, so there's more things that need to be done to test these components. Also, there's components like this popping up every now and then. So we really need to make sure they're safe. And we also should do continuous fuzzing of all these components. So for the Gnark team, we actually implemented a continuous fuzzing of all these components. So for the Gnark team, we actually implemented a continuous fuzzing setup, where whenever they commit the latest version to master, we start a new fuzzing campaign. We run for 24 hours. We tell them if something is wrong. And then the next day, same thing happens. We're also planning to do the same for Corsair. And if you're interested in us taking a look at your ZK infrastructure, please reach out. And if you want to know more about what's going on in the fuzzer, then please check out our paper. You can scan the QR code there if you want to take a look. Yeah. Fantastic. Thank you, Valentin, for this presentation. Let's get to the questions, shall we? Yeah. All right. So I've sorted the questions. Again, you have a QR code here. You can ask your questions, and I'll ask the ones that are the most upvoted. So let's go to the first one. When you identify a bug, can you just say, hey, there's a bug, or do you provide a path to fix it? Do you identify where the bug exists within the circuit? We don't directly identify where the bug is, but when we generate the test inputs, like the circuits, we try to minimize them. So we try to keep them as small as possible so the developers can really, hopefully quite easily identify where things go wrong. So far, I think the developers were very happy with the bugs reported. And I think there was no issues finding the bug once they had the input. Makes sense. Thank you. Second question. Do you fuzz logical expressions during coding in circuits? Yeah, we do. I think, yeah, so for instance, we apply some, you know, some common transformations like applying the Morgan's rule and so on. So there's a bunch of these transformations that we use in the fuzzer. Wonderful. Third, where is the bug nest? Where do bugs usually reside in your historical fuzzing, from what you've seen? I think it's hard to... You really derive enough data from... You found 17, right? Yeah, yeah. I think it's probably too early to say, but we did observe that the fuzzer found bugs basically in different components. So we found bugs in the compilers, we found bugs in the witness generator So we found bugs in the compilers. We found bugs in the witness generator. We found bugs in the prover as well. I don't think we found bugs in the verifier. It seems like, yeah, that's kind of towards the end of the pipeline. And I think many people are very concerned about getting the verifier right. So maybe that's also paying off here. But we'll keep trying. We'll see. Nice. Okay. When Cairo fuzzing? Yeah. I guess you answered it in your slide. If we're interested, people can talk to you. If somebody from Starkware wants to look to look at more at Cairo, then yeah, please reach out. We're interested. It's definitely a good idea to do that, yeah. Wonderful. Well, I'll reach out. What do you think of concolic testing of ZK circuits? I had to ask Chad GPT what concolic testing is. Okay. I still don't understand it. Okay. Yeah, so concolic testing, essentially, I mean, for the audience, maybe it's essentially a way to, you know, generate new inputs by doing some kind of symbolic execution. But basically, for some expressions, that might be really complex. You have non-linear expressions in these circuits. So there, you might want to concretize some inputs. And that's the concolic part. So it's concrete and symbolic. But yeah, so I think that's an interesting area. It's not what we focused on because we're not so much interested in actually generating inputs for these circuits. That's something you would probably want to do if you want to have a specific circuit that you want to get right and you want to make sure it satisfies some properties, then you probably should think about concordant testing for that circuit. Yeah. Perfect. Thank you. How does it feel to find a bug? Like, literally, when you find one, is it scary? Is it exciting? I mean, you're looking for those, so it's probably a bit exciting, but it's also scary. There's probably money on the line. How does it feel? No, it's definitely, I mean, it's definitely definitely exciting. I work a lot with students. You can see they're excited when they find a bark. They're happy they're making an impact. And also the reactions from the developers are a great motivation to find more barks because usually they're very... Yeah, they have been very positive. I don't believe you. There's no way you go to people and say, I broke your shit. And they're like, oh yeah, amazing. Yeah. I mean, if you're sort of... If you're... Yeah, and L2 and one bug like this can wreck your system, then in some sense your job is also on the line. So you should... You want to find those bugs. I'm not saying you don't want to. It's just a theory. When I think about bugs, I think like, yes, I want to fix them. But the first thing that comes to mind is like, oh, yes, no. But you're right. It's better to be aware of first than not. Okay, we can actually take the last... Oh, one second's left. Let's wrap it up, or do you want to... Are you using property-based testing for choosing the input? I mean, you can call it property-based testing. We're generating the inputs right now completely randomly, so it's kind of black box fuzzing that we're doing. And we're also considering to do more, you know, feedback-directed fuzzing, like you saw in the last talk. Yeah. Fantastic. Thank you, Valentin. Thank you.", "eventId": "devcon-7", - "slot_start": 1731556800000, - "slot_end": 1731557100000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/19JLbZ-yVksBM4TM3ftOIccuAC6UgKAKtM0nVGAdWdQ4", - "resources_slides": null, + "slot_start": 1731570000000, + "slot_end": 1731571800000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1MowR6E3eFzSs1jXPUxgTBxReXgDFk6pgjqMA7hnC7t8", + "resources_slides": "https://drive.google.com/file/d/11z833igJ75d0WUxo9T9c2lW-ud_eQcI1/view", "speakers": [ - "tuyen-dinh" + "thomas-thiery" ] }, "vector": [ @@ -353393,6 +352405,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -353401,7 +352414,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -354250,11 +353262,6 @@ 0, 0, 0, - 2, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -354285,6 +353292,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -354481,6 +353489,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -354701,6 +353710,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -354711,9 +353721,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -354725,49 +353732,52 @@ }, { "session": { - "id": "fork-choice-enforced-inclusion-lists-focil", - "sourceId": "CDTX78", - "title": "Fork-Choice enforced Inclusion Lists (FOCIL)", - "description": "A direct consequence of centralized block production is a deterioration of Ethereum's censorship resistance properties. In this talk, we introduce FOCIL, a simple committee-based design improving upon previous inclusion list and co-created block mechanisms. We present the benefits of (1) relying on a committee to address issues related to bribing/extortion attacks, and (2) having attesters enforce the IL as part of the block validity condition to prevent IL equivocation.", - "track": "Core Protocol", - "type": "Talk", + "id": "formal-verification-in-the-ethereum-protocol-current-status-and-future-directions", + "sourceId": "KQCGWV", + "title": "Formal Verification in the Ethereum Protocol: Current Status and Future Directions", + "description": "Vitalik believes \"ethereum's biggest technical risk probably is bugs in code, and anything that could significantly change the game on that would be amazing\". Formal verification is a key technology which many believe could significantly help. However, it has yet to see wide adoption for a variety of reasons. This panel will bring together formal verification experts working in blockchain to discuss the challenges faced in increasing the use of formal verification within the community.", + "track": "Security", + "type": "Panel", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Design", - "mechanism" - ], "keywords": [ - "Censorship Resistance", - "Inclusion Lists", - "Mechanism Design" + "model checking", + "theorem proving" + ], + "tags": [ + "Security", + "Formal Verification", + "Testing", + "proving", + "theorem", + "Formal Verification", + "Security", + "Testing" ], - "duration": 1535, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "d68ae03f4422ca57d79f914604ad876858505c2d4fddb19bfaaebd80f763b91f", + "sources_youtubeId": "J71rLx8dBXY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736c9979dbb7a90e11fece0", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736c85374749a4b89b3ae2c.vtt", - "transcript_text": " Good morning everyone. So today I'll talk about fuzzing for zero knowledge infrastructure and this is joint work with my collaborators from the Technical University of Vienna. So just to kind of make sure we are all on the same page, let's have a short definition of what I understand by zero-knowledge infrastructure. So for this talk I'll define zero-knowledge infrastructure as software components that are used for compiling, executing, proving and verifying ZK circuits. So examples would be the processing pipelines that are commonly used by DSLs for describing ZK circuits or maybe in the future we'll also look at entire ZK EVMs. So with this out of the way let's look at why this is an important topic and why more people should be doing this. Well first zero knowledge infrastructure is highly complex and highly critical. For instance it's used in several L2 chains so kind of bugs there could have catastrophic financial and reputational impact, and we should really make sure these components are as bulletproof as they can get. While we haven't seen a really catastrophic incident in this field, maybe perhaps comparable to the DAO hack in 2016, it's really important that we have rigorous testing for these components and we use, you know, the best engineering discipline that we can have, right? Because these components are really complex, and getting them right is not easy. So what is fuzzing? I don't think I have to explain this after the last talk, but fuzzing is widely used in industry. For instance, at Microsoft, Google, Meta, they're fuzzing a lot of their infrastructure to catch bugs before hackers can actually exploit them in practice. So in this talk, I'll give you an overview of our fuzzer for finding critical bugs in processing pipelines for zero-knowledge circuits. The fuzz is called Circus, and it already supports four of these pipelines, namely Circum, Corsair, Gnark, and Noir. And there's a gazillion of different DSLs popping up, it seems. Like you walk around here and you see another one in the corner. And we've already found 16 bugs in total for the four pipelines we looked at. 15 have already been fixed, so that kind of shows that these bugs are really taken seriously, and developers sometimes respond within hours to actually get them fixed. You can also see a kind of breakdown of the different bugs and where we found them. And as you can see, they're kind of very evenly spread across the different pipelines, so it's not like there's one pipeline that's responsible for all the bugs that we found. Now, kind of to make sure that everybody understands, I assume not all people are aware of what these processing pipelines look like, here's a short summary. You can see in the yellow box, there is a circuit that the user writes that goes into the compiler, and then the compiler hands some output to the witness generator, and the witness generator can then take the input, so the user input to the circuit, and generate a witness that can be then used by the prover and then the prover generates a proof that can then be verified to essentially check that the circuit was actually executed. So now that we have a bit of an understanding of how these pipelines look like, let's dive a bit deeper to understand how we find these bugs. So first, I wanna be very clear, we are not cryptographers. This is not my background. My background is in software security, program analysis. And for this reason, I can't really you know try to understand the the logic that's in the intricate logic in many of these components which is why we treat these essentially as a black box so much like what a typical user would do we just view them as a black box. So how can we still find all these bugs? Well first we have a lot of experience in testing complex software for complex domains and building fuzzers for them. So we've done quite a bit of work on fuzzing for smart contracts. It's probably the work that's most closely to the audience that's here. And for instance, we created the Harvey fuzzer many years ago, and we're still kind of maintaining it. We also did some work on ML models and on testing ML models and testing program analysis tools. If you want to know more, check out the papers that are online. And the second reason why we were able to find these bugs is that we have a not-so-secret weapon, which is called metamorphic testing. So before explaining in a bit more detail what metamorphic testing is, let me just give you kind of a short summary of how we got here, how we got into this, interested in this topic. So we were, as I said, we were working on fuzzing for smart contracts mainly for many years and then at some point I guess maybe like a year ago roughly, Consensys, they released the linear blockchain. That is one of the first ZK EVMs. And we were like, well, this is really complex. We started looking into it a bit. And we saw that there are these components like the GNARC library and the Corsair language for processing zero knowledgeknowledge circuits. So we were like, oh, what can we do to test these? What can we do to make sure that there are no bugs in these components? And that's when we started building kind of a predecessor to this fuzzer I'm presenting today, which is called Rio. So Rio is a fuzzer for the GNARC library, specifically. And then essentially this Circus fuzzer I'm presenting today is essentially an evolution of this fuzzer that is a bit more general and can target multiple DSLs. All right. Now back to metamorphic testing. So what is metamorphic testing? I think the shortest way to summarize it, it's kind of a way to define test oracles in a pretty elegant and concise way. So to illustrate, I've collected a few examples that explain this and try to explain what metamorphic testing is. So the first example is how to actually test that the sorting function, sort for an input, let's say an array of integers x, actually does the right thing. Well there's many ways to check this, but here's a way to check it using metamorphic testing. So you sort the input x, and then you also sort x, but you shuffle it randomly. And these two, the output of both of these invocations of the function, they should have the same output. So pretty nice and concise specification. So let's look at another example. Here, we want to test essentially some procedure for computing the shortest path in graph G between the nodes N and M. So how can we do this with metamorphic testing? Well, one way to do it is to say, you know, the shortest path between n and m in the graph G is less than the shortest path between n and m in the graph where we take G but we remove some random edge, right? Because that edge might have been on the shortest path, and then the shortest path could become longer, right? So we can also apply the same kind of principle to more complex components, like an entire compiler, right? So here's an example of how to do this with metamorphic testing. When you take a program P and you compile it and then run it, you should essentially see the same behavior as when you take the program P, but you add some dead code somewhere randomly. And you compile it and you run it. Shouldn't matter. It should give you the same output, essentially. So with this, let's look at how we can apply the same reasoning principle to zero-knowledge circuits. So the fuzzer, what it does is it first generates a random circuit, C1, and then it applies a random transformation to C1 to get a new circuit, C2. So now we have two circuits that are essentially syntactically, perhaps completely different, but semantically they should have the same behavior. And now the fuzzle generates an input, I, and invokes the processing pipeline for both of these circuits. And if there's any difference in the output or the behavior, then that's a bug somewhere in this processing pipeline. So for instance, for one circuit, we might not be able to generate a witness. But for the other one, we would. Then there's probably a bug somewhere in the compiler or in the witness generator. So let's look at a few of these transformations to kind of understand how the fuzzer does this. So here's a very simple transformation. If you have somewhere in your circuit an expression E, you can always multiply the expression by 1. That should not change anything. Or you can also multiply the expression by one, right? That should not change anything. Or you can also divide by one. Again, shouldn't have any effect on the circuits. Another transformation is to basically negate the expression E twice. Or you can also apply other transformations, like swapping the two operands of a multiplication. And then we also have some transformations that use essentially new randomly generated expressions. So here, for instance, we replace an expression e with e minus some random expression plus some random expression. That should, again, not have any effect. And we have a small DSL for describing these circuits, so there's many more of these transformations. So currently we support roughly 90 such rules, but you can easily add more of them and think of new ones if you would like to. So we also found a number of bugs in the different pipelines, and let's look at a few of them just to kind of give you the idea and give you a concrete example that you can look at. So here on the left, we see a small example circuit in the circum language. You can also look up the GitHub issue if you'd like to. But this is actually a minimized, cleaned up version of the circuit. And as you can see, there's a variable P. In practice, this is essentially just a constant. It just didn't fit on the slide. So I put it at the bottom. But think of P just as a normal constant in your program. And now what the fuzzer does is it generates a transformation of this circuit, which is the one that's shown on the right. And here it applies a number of metamorphic transformations. So, for instance, it seems like the fuzzer first multiplied the expression P by 1, then subtracted 0 from 1, and then divided that expression by 1. And when we execute these two circuits, we can see that the output out 1 and out two actually are not the same. So this is bad and this was a bug that we found. It seemed like there was a bug in the witness generation part. And here's another bug that we found in GNARK. So at the top you can again see the original circuit that the fuzzer generated. And then we can also see the transform circuit where the fuzzer essentially changed the expression zero to just zero or zero, which should be equivalent. And here, we observed that for C1, there was no witness that was generated, whereas for C2, there was. So this, again, is a bug somewhere in this pipeline. And yeah, now that you've kind of hopefully got an overview of the tool and saw a bit how these bugs look like in practice, I hope more people will start looking into this problem, because I think it's a very important problem. We need to really make sure that these components are bulletproof, because otherwise pretty bad things can happen. And it's better to do this before some, you know, attacker does it, right? And you don't need to be a cryptographer to actually find these bugs. So, you know, that gives us a lot more people that can actually look into this. Because I think so far we only really sketched the surface, so there's more things that need to be done to test these components. Also, there's components like this popping up every now and then. So we really need to make sure they're safe. And we also should do continuous fuzzing of all these components. So for the Gnark team, we actually implemented a continuous fuzzing of all these components. So for the Gnark team, we actually implemented a continuous fuzzing setup, where whenever they commit the latest version to master, we start a new fuzzing campaign. We run for 24 hours. We tell them if something is wrong. And then the next day, same thing happens. We're also planning to do the same for Corsair. And if you're interested in us taking a look at your ZK infrastructure, please reach out. And if you want to know more about what's going on in the fuzzer, then please check out our paper. You can scan the QR code there if you want to take a look. Yeah. Fantastic. Thank you, Valentin, for this presentation. Let's get to the questions, shall we? Yeah. All right. So I've sorted the questions. Again, you have a QR code here. You can ask your questions, and I'll ask the ones that are the most upvoted. So let's go to the first one. When you identify a bug, can you just say, hey, there's a bug, or do you provide a path to fix it? Do you identify where the bug exists within the circuit? We don't directly identify where the bug is, but when we generate the test inputs, like the circuits, we try to minimize them. So we try to keep them as small as possible so the developers can really, hopefully quite easily identify where things go wrong. So far, I think the developers were very happy with the bugs reported. And I think there was no issues finding the bug once they had the input. Makes sense. Thank you. Second question. Do you fuzz logical expressions during coding in circuits? Yeah, we do. I think, yeah, so for instance, we apply some, you know, some common transformations like applying the Morgan's rule and so on. So there's a bunch of these transformations that we use in the fuzzer. Wonderful. Third, where is the bug nest? Where do bugs usually reside in your historical fuzzing, from what you've seen? I think it's hard to... You really derive enough data from... You found 17, right? Yeah, yeah. I think it's probably too early to say, but we did observe that the fuzzer found bugs basically in different components. So we found bugs in the compilers, we found bugs in the witness generator So we found bugs in the compilers. We found bugs in the witness generator. We found bugs in the prover as well. I don't think we found bugs in the verifier. It seems like, yeah, that's kind of towards the end of the pipeline. And I think many people are very concerned about getting the verifier right. So maybe that's also paying off here. But we'll keep trying. We'll see. Nice. Okay. When Cairo fuzzing? Yeah. I guess you answered it in your slide. If we're interested, people can talk to you. If somebody from Starkware wants to look to look at more at Cairo, then yeah, please reach out. We're interested. It's definitely a good idea to do that, yeah. Wonderful. Well, I'll reach out. What do you think of concolic testing of ZK circuits? I had to ask Chad GPT what concolic testing is. Okay. I still don't understand it. Okay. Yeah, so concolic testing, essentially, I mean, for the audience, maybe it's essentially a way to, you know, generate new inputs by doing some kind of symbolic execution. But basically, for some expressions, that might be really complex. You have non-linear expressions in these circuits. So there, you might want to concretize some inputs. And that's the concolic part. So it's concrete and symbolic. But yeah, so I think that's an interesting area. It's not what we focused on because we're not so much interested in actually generating inputs for these circuits. That's something you would probably want to do if you want to have a specific circuit that you want to get right and you want to make sure it satisfies some properties, then you probably should think about concordant testing for that circuit. Yeah. Perfect. Thank you. How does it feel to find a bug? Like, literally, when you find one, is it scary? Is it exciting? I mean, you're looking for those, so it's probably a bit exciting, but it's also scary. There's probably money on the line. How does it feel? No, it's definitely, I mean, it's definitely definitely exciting. I work a lot with students. You can see they're excited when they find a bark. They're happy they're making an impact. And also the reactions from the developers are a great motivation to find more barks because usually they're very... Yeah, they have been very positive. I don't believe you. There's no way you go to people and say, I broke your shit. And they're like, oh yeah, amazing. Yeah. I mean, if you're sort of... If you're... Yeah, and L2 and one bug like this can wreck your system, then in some sense your job is also on the line. So you should... You want to find those bugs. I'm not saying you don't want to. It's just a theory. When I think about bugs, I think like, yes, I want to fix them. But the first thing that comes to mind is like, oh, yes, no. But you're right. It's better to be aware of first than not. Okay, we can actually take the last... Oh, one second's left. Let's wrap it up, or do you want to... Are you using property-based testing for choosing the input? I mean, you can call it property-based testing. We're generating the inputs right now completely randomly, so it's kind of black box fuzzing that we're doing. And we're also considering to do more, you know, feedback-directed fuzzing, like you saw in the last talk. Yeah. Fantastic. Thank you, Valentin. Thank you.", - "eventId": "devcon-7", - "slot_start": 1731570000000, - "slot_end": 1731571800000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1MowR6E3eFzSs1jXPUxgTBxReXgDFk6pgjqMA7hnC7t8", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "thomas-thiery" - ] + "david-pearce", + "igor-konnov", + "julian-sutherland", + "zoe-p" + ], + "eventId": "devcon-7", + "slot_start": 1731465900000, + "slot_end": 1731469500000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1v3H83g6kUyGEXtHlMSYEBQw6ksu6---QQw0rs5zcxM8", + "resources_slides": "https://drive.google.com/file/d/1O2JNnJWPh93Pn2jNl4yQp_CJGtb-DFHD/view" }, "vector": [ - 0, - 0, - 0, - 0, 6, 0, 0, @@ -354821,6 +353831,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -354945,6 +353956,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -355121,14 +354133,10 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -355518,6 +354526,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -355631,6 +354640,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -355658,7 +354668,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -355714,6 +354723,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -355760,6 +354770,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -356076,13 +355087,10 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, - 0, 2, 0, 0, @@ -356095,55 +355103,53 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "formal-verification-in-the-ethereum-protocol-current-status-and-future-directions", - "sourceId": "KQCGWV", - "title": "Formal Verification in the Ethereum Protocol: Current Status and Future Directions", - "description": "Vitalik believes \"ethereum's biggest technical risk probably is bugs in code, and anything that could significantly change the game on that would be amazing\". Formal verification is a key technology which many believe could significantly help. However, it has yet to see wide adoption for a variety of reasons. This panel will bring together formal verification experts working in blockchain to discuss the challenges faced in increasing the use of formal verification within the community.", - "track": "Security", - "type": "Panel", - "expertise": "Intermediate", + "id": "fossify-yourself-for-privacy-and-security", + "sourceId": "TW7QGF", + "title": "FOSSify yourself for privacy and security", + "description": "You will leave this workshop at least a bit more cypherpunk than when you came. The session will introduce FOSS stack of tools for all platforms. We will discuss free operating systems, GNU/Linux distros, GrapheneOS, secure communication, browsing, hardware options and secure environment for handling your crypto or Ethereum validators.\r\nThe workshop is interactive and open to anyone to participate. Join us to find free and open solutions to your problems or come to share your favorite foss tools!", + "track": "Cypherpunk & Privacy", + "type": "Workshop", + "expertise": "Beginner", "audience": "Engineering", "featured": false, - "doNotRecord": false, + "doNotRecord": true, "keywords": [ - "model checking", - "theorem proving" + "free software", + "degoogle", + "self hosting" ], "tags": [ + "Privacy", "Security", - "Formal Verification", - "Testing", - "proving", - "theorem", - "Formal Verification", - "Security", - "Testing" + "self", + "hosting", + "Privacy", + "Security" ], "language": "en", "speakers": [ - "david-pearce", - "igor-konnov", - "julian-sutherland", - "zoe-p" + "mario-havel" ], "eventId": "devcon-7", - "slot_start": 1731465900000, - "slot_end": 1731469500000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1v3H83g6kUyGEXtHlMSYEBQw6ksu6---QQw0rs5zcxM8" + "slot_start": 1731553200000, + "slot_end": 1731558600000, + "slot_roomId": "classroom-e", + "resources_presentation": "https://docs.google.com/presentation/d/1PShw8A7XomH3DtlwmgLZcgMrPY11XvLp_EuNeSwghoQ", + "resources_slides": "" }, "vector": [ - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -356191,7 +355197,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -356316,7 +355321,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -356450,6 +355454,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -356496,8 +355501,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -357003,6 +356006,8 @@ 0, 0, 0, + 0, + 0, 2, 0, 0, @@ -357086,7 +356091,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -357133,18 +356137,17 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -357232,7 +356235,7 @@ 0, 0, 2, - 0, + 2, 0, 0, 0, @@ -357454,8 +356457,6 @@ 0, 2, 0, - 0, - 0, 2, 0, 0, @@ -357474,38 +356475,48 @@ }, { "session": { - "id": "fossify-yourself-for-privacy-and-security", - "sourceId": "TW7QGF", - "title": "FOSSify yourself for privacy and security", - "description": "You will leave this workshop at least a bit more cypherpunk than when you came. The session will introduce FOSS stack of tools for all platforms. We will discuss free operating systems, GNU/Linux distros, GrapheneOS, secure communication, browsing, hardware options and secure environment for handling your crypto or Ethereum validators.\r\nThe workshop is interactive and open to anyone to participate. Join us to find free and open solutions to your problems or come to share your favorite foss tools!", - "track": "Cypherpunk & Privacy", - "type": "Workshop", - "expertise": "Beginner", - "audience": "Engineering", + "id": "fraud-proofs-war", + "sourceId": "UTTXWB", + "title": "Fraud proofs war", + "description": "Fraud proof systems were originally envisioned to be able to protect a rollup with just a single honest challenger assumption. As it turns out, the matter is much more complex because of exhaustion attacks, a form of sybil attack where the attacker tries to win by economically outlasting the defenders. The talk discusses the tradeoffs in the proposed solutions to this form of attack by analyzing Arbitrum, Cartesi and Optimism fraud proof systems.", + "track": "Layer 2", + "type": "Talk", + "expertise": "Expert", + "audience": "Research", "featured": false, - "doNotRecord": true, - "keywords": [ - "free software", - "degoogle", - "self hosting" - ], + "doNotRecord": false, "tags": [ - "Privacy", - "Security", - "self", - "hosting", - "Privacy", - "Security" + "Optimistic rollups", + "Challenge period", + "Mechanism design", + "fraud", + "proof", + "Challenge period", + "Mechanism design", + "Optimistic rollups" ], - "language": "en", - "speakers": [ - "mario-havel" + "keywords": [ + "Fraud", + "proofs" ], + "duration": 1471, + "language": "en", + "sources_swarmHash": "6e327022853abdfce60bfc6ae70d8d83839fc2348138d39e128a8834bba9b846", + "sources_youtubeId": "k0UooaY7VQ0", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6734395c9dbb7a90e17a7cc5", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6734395c9dbb7a90e17a7cc5.vtt", + "transcript_text": " Hey everyone, I'm Luca, I do research at AltaBeat and I'm here to talk about fraud proofs. So there are many different types of fraud proofs. There are off-chain fraud proofs, on-chain fraud proofs, single round, multi-round. This talk is mainly about on-chain multi-round proof systems which is what the top optimistic roll-ups use today like Arbitrum, Optimism or BASE. And the thing is that these two proof systems so the one developed by Arbitrum, Optimism, or BASE. But the thing is that these two proof systems, so the one developed by Arbitrum and the one developed by Optimism, are very different. There are very different trade-offs, and my goal here is to give you an intuition on why they're different, and the security trade-offs that they make. So first of all, what optimistic rollups it's very simple intuitively you need to communicate to L1 the L2 state so there is an actor could be yourself that publishes the state routes of the L2 on L1 and there is a timer if this timer expires the state route is optimistically considered correct so that it can be used for withdrawals. This is the happy case. You don't need to prove anything about the correctness of the state. In the not so happy case, you can have this actor that publishes an invalid state root and now what do you do? You need to prove that this state route is incorrect. And as I said, there are a lot of different designs and this is what this talk is about. So the original vision about optimistic roll-ups was the following. That any single honest challenger can protect an optimistic roll-up with a fixed challenge period. So, you know, So optimism and arbitrum have a seven days challenge period. If there is anyone monitoring the chain and they see that some state route is incorrect, they should be able to challenge it. So this was the original vision. The problem is that this is not possible at all. And I'm going to explain you why. So the main problem on why this is not possible are Sybil attacks. So let's say that you're an attacker. What you can do in an optimistic roll-up is propose millions of invalid state routes at the same time. So you, as the single honest challenger, how do you defend yourself? How do you defend the chain? You know, imagine you have millions, again, and you need to prove all of them wrong to protect the fans that are in the bridge. There are two ways to do this. The first one is what I call full concurrency, which means you have millions of embodied state routes, you need to challenge all of them at the same time. Which means that for each state route, there is a challenge period which could be seven days. All these challenge periods overlaps. So the advantage of this approach is that since again all the challenge period overlap, after seven days some of them will be confirmed. So your settlement delay is seven days. The problem is that challenging state routes has a cost, has a gas cost because challenges are interactive, you need to interact with the one multiple times and what an attacker can do in this case is trying to outspend you. So the attacker again publishes a lot of invalid state routes, you need to spend funds to attack all of them, it could be that at a certain point, you don't have funds anymore, and one of the invalid state routes will be unchallenged, and it will be confirmed, stealing everything. So I have an example here with some data. Let's say that the challenge period is seven days. They overlap for all these invalid state routes. The cost of challenging on chain, like the gas cost is one ETH. This is symmetrical for the defender and the attacker because challenges are interactive. There are 1,000 if in the bridge. The attacker has 800 if. The honest challenger has only 500. At a certain point, the attacker is going to outspend the defender and the attacker is going to win. So right now, it's not true that any single honest challenger can protect an optimistic roll-up with a fixed period because not anyone has the same amount of funds as the attacker to protect the chain. So let's say I'm a simple user that only has $1,000 on a chain, and the bounty of the bridge is millions of dollars, an attacker can spend millions to attack it, I cannot defend it, clearly. So this is not true, the correct statement is that challengers with more funds than the attackers can protect an optimistic roll-up with a fixed challenge period, which is a much stronger claim. The second option is partial concurrency. Which is, instead of challenging all the Invali state routes at the same time, I challenge them one at a time. So I create this kind of queue, and I play in one challenge. When I finish one challenge, I go to the next one. And this is partial concurrency, because you can still have some kind of concurrency in the sense that if you have multiple defenders, they can play against different attackers in parallel, but like in the worst case, if you're alone, you need to play against all of them one after the other. So let's go through an example. Again, let's say the challenge period is seven days per challenge and there are multiple challenges. The cost is again one ETH. The funds in the bridge are 1,000 ETH. The attacker funds are 800. And the honest challenger now has very little funds. Having little funds here is not a problem because you can play like you only need one ETH for the first challenge. Then when you win, because you're guaranteed to win, if you're honest, you will get the bonds of the adversary. And with that bond, you can play the next one. So, like, you need very little funds. But the problem is that you have an attacker that can spend 800 ETH to play one challenge. You're going to have a delay of 800 weeks, because one challenge takes one week, and you have, you play them sequentially, so you will have 800 weeks of delay, which is more than 15 years. So this is what is called a delay attack. On proof systems that use partial concurrency. So here again, the claim that any single honest challenger can play an optimistic roll-up with a fixed challenge period is not true. The correct statement is that any single honest challenger, in the sense that you need little funds, so it's very decentralized, it can be you, can play an optimistic roll-up with non-fixed challenge period because they use partial concurrency. So there is this trade-off. When you need to design a for-proof system, you need to decide whether you want to use full concurrency or partial concurrency, which means you either are affected by resource exhaustion attacks, which means that the attacker can try to suspend you, or delay attacks. There is this trade-off. So now let's try to optimize these two cases, because it's not just as bad as I told you, but there is something that you can do to improve the situation. In full concurrency, what you can do is to add bonds. When you add bonds, you add a cost to create challenges. So as I said, the cost of one challenge can be one ETH, but if creating a challenge costs 10 ETH, it means that the attacker needs to spend 10 ETH to induce me as the honest defender one ETH of cost, right? So right now, there is asymmetry, like the C builds have to stake a lot of bonds, I only need to stake one, for each bond they induce me gas cost on playing the challenge, the bond is higher than the cost of one challenge. So let's go through an example, again you have a challenge period of seven days. You have the bond that are ten ETH. The cost of one challenge is one ETH. The funds in the bridge are 1,000. The attacker funds are 800 as before. Honest challenger funds are 500 as before. But now the defenders win because with 800 ETH, the attacker can only generate 80 CBELs, which means that I will only spend 80 ETH. I have more than that, so in this case I win, even though I have less funds than the attacker. The problem here is that this is very complex to do. You need something that is called execution history commitments, which means that you cannot allow... The problem is you need to play the base action when you need to go through a challenge, and you should be able to allow everyone to play within the same claim, because otherwise the problem is... So the interactive game works as follows. I propose a state route, the game is interactive, the next player is going to ask me for a midpoint, the problem is I can lie. I can propose the correct state truth at the beginning and then propose an invalid bisection. And then I can lose on purpose in this way. So the problem is I can only have this asymmetry if people can join together and state truths cannot lose on purpose. And the way that you do this, the way you prevent invalid bisections is by having a commitment over all the steps, the state after all the steps, so that when you bisect, you can provide a miracle proof that the bisection is correct. But again, this is super complex. You are not supposed to understand this, because as I said, this is super, super complex. So improving on full concurrency is very complex. You need to add levels because now when you have execution history commitments over all the steps, you need to merkleize the state for all the steps. These steps are like on the order of 2 to the 70 steps. This is unfeasible, so you cannot merkleize all of them. You need to have bigger chunks. And then when you reach, when you bisect and you reach one chunk, you need to repeat the protocol recursively. And then when you do this, you create another type of attack across levels. And then you might say, I don't want to use levels at all, so let me plug ZK proofs. Again, you're not supposed to understand this. You're just supposed to understand that this gets very complicated when you want to improve on full concurrency. So in practice, Arbitrum, the new proof system that they will deploy soon, and Optimism on mainnet today use full concurrency. So they have fixed challenge periods of seven days. Arbitrum uses this very complicated set-up called execution history commitments. Optimism doesn't. And for this reason, Arbitrum can have what is called the resource ratio of 15%, which means if an attacker, let's say, has 1,000 ETH, me as a defender, to protect the chain, I only need 15% of the funds that the attacker is willing to spend to protect the chain, as the example we made before with bonds. While optimism doesn't have this, they cannot have this asymmetry, this advantage, and indeed if you are a defender and you want to protect the chain, you need 109% of the funds that the attacker has. So like more than 100%. Like significantly more, I would say. So again, imagine you have a bridge with 1 million ETH, an attacker can spend a little less than that, and you need more than that to protect the chain. So it's a lot of funds. An attacker can spend a little less than that, and you need more than that to protect the chain. So it's a lot of fun. The problem is you need very big bond sizes because your bond size needs to be a multiple of the gas cost to play a challenge. And in Arbitrum, this bond size is 3600 ETH, which is insane. So you affect decentralization by a lot. And you have this trade-off between resource ratio and issue on bond size, because, as I said, right now, when you increase the bond size, you can have better resource ratio, like a lower resource ratio, which is better, which improves safety, but affects decentralization. Let's try to optimize partial concurrency. So, instead of having all the challenges one after the other, what you can do is to create a tournament, like a bracket tournament. This is something that was developed by Cartesi, which is another team working on for proofs. So what you can do is you create this bracket tournament, the matches are in parallel, and if you have a lot of C builds, since they are in this tournament, they will eliminate each other. Because they are in this tournament, they will eliminate each other because they are paired together. So this is a strategy to eliminate symbols together. And then since the brackets are then sequential, the honest challenger will only play a logarithmic number of challenges compared to the number of decibels. So if you go to an example, right now the delay is not 800 weeks, but it's a logarithm of that which is two months and one week. And in practice, the current arbitrum classic protocol, the one that is live on Meta that will be replaced, uses partial concurrency. Cartesi will use partial concurrency as well. Cartesi uses it in tournamentsrency as well. Cartesi uses the tournament. Arbitrum doesn't. Initial bond size, let's say, 3 ETH. Cartesi is much better than Arbitrum because, again, they use these... There are a lot of caveats here, but to simplify, a tournament is a very nice optimization to use when you want to use partial concurrency. And the bond size that you need are super small as well. So now you also have this trade-off between initial bond size and delay attacks because if you increase the bond size in partial concurrency, you can create less C bills and when you have less C bills, you have less, like a lower number of challenge periods. So like a lower number of matches, which means also the settlement is faster. This is the summary, more or less, which means that you can see on the vertical axis that if you go full concurrency, you're faster, your settlement gets faster, but you sacrifice safety and decentralization. If you go full concurrency, you're faster, your settlement gets faster, but you sacrifice safety and decentralization. If you go partial concurrency, it's very decentralized, very safe, because you need very low bonds, and your resource ratio is like, it's not even a ratio, it's constant. Then you can play with bond sizes for partial concurrency to be faster but less decentralized, or you can play with bonds on full concurrency to be more safe, to have a better resource ratio, sacrifice decentralization, or you can also play in principle with a challenge period to be like if you have a shorter challenge period, your settlement will be faster, but you sacrifice safety significantly. So I have some further points. I don't have time to discuss all of them, but one point is, is there any way to reduce this challenge period? Because right now it's seven days for all of them. Can we go below that? The answer is no. I don't think we can do this. Because, first of all, why seven days? The reason is to protect from a strong censorship, attack on Ethereum. So there are two types of censorship that you can have. You can have weak censorship or strong censorship. Weak censorship means that you have a certain percentage of builders censoring transactions. You know, like as a context, censoring is a concern because if the honest challenger is censored, the invalid state is going to be confirmed, right? So if there is a percentage of builders that is censoring, This is not really a problem because you still have a high percentage of probability, a high probability of getting included within a short amount of time. The main problem is that if there is a majority of attesters, like 51% of attester censoring, all the transactions from the honest defenders will be excluded from the chain. So now we will need to coordinate a hard fork to hard fork away the censoring validators. And we agreed a while ago that to do this, to coordinate a hard fork, we need around seven days, so if your protocol uses a challenge period that is less than seven days, you cannot protect from strong censorship attacks. Do optimistic roll-ups make sense if we have ZK? I think they do, in the sense that you cannot be cheaper than an optimistic roll-up. In the happy case, you don't have costs, apart from the bare execution of your state transition function. You don't have the proof generation cost. Your throughput is the same as the machine. And, well, in ZK, you can parallelize proof generation and so on, but there are also centralization concerns. So, like, you want to exit from a roll-up, not everyone has the hardware or the, like, ZK ASICs to prove a lot of blocks by themselves if they are censored. So optimistic roll-ups have these nicer properties. But should more project be ZK? I think so. I think the answer is yes, in the sense that if, like, most of the L2s today, we have L2s, we have scalability. The problem that L2s created is fragmentation. Most of the L2s want to fix fragmentation. Can you have interoperability with a seven-day challenge period? I don't think so. So every project that aims at being interoperable with a seven days challenge period? I don't think so. So every project that aims at being interoperable with all the others, I think they should be ZK. For all the other use cases, if you're building a game, you don't need interoperability. I think the optimistic approach is great. So that's it. Thank you. Wow, committing fraud is really complicated. I thought you just signed a different name on the check. So we got some questions from the audience. And the first one up there, I saw this one going up, it's really interesting. In all optimistic L2s, how many times has there been a fraud actually reported and acted upon? We actually have a few examples of fraud proofs happening on-chain. But those fraud proofs are not the result of people being malicious. Most of the time it was just that there were some validators, some nodes that didn't upgrade their software. So they saw an invalid state root because they were running on a previous version. This has happened in Chroma, I think. It has happened in Kinto recently, which is an Orbit stack, like an Orbit from Fork. So another example that is a fun one is on FuelV1, we L2Bit tried to steal $8 from FuelV1. And, you know, FuelV1 is this app-specific roll-up that no one uses, basically. There's no activity. So, like, we wanted to see whether there was anyone trying to defend the chain, even if it was not used. The bond size was 0.5 if we lost. So like all the TVL in Fuel V1, it's like our funds. And because there was someone watching, indeed. Yeah, that Fuel V1 story is going to be the stuff of legend. Next question. Can't the challenger just challenge the first wrong state root, hence invalidating all state roots that are proposed after by the proposer? Well, it depends on the protocol. Most of the protocols, like Optimism today, all the state routes are independent. So invalidating one state route doesn't invalidate all of the others, but also you need to allow multiple proposals to be sent for the same state. Because it's not just that state routes are sequential, it could be that for a single point in time, for a single block number, you need to have multiple proposals because again you're going to have a lot of symbols, so you need to challenge all of them. You could have a fallback mechanism such that if multiple state roots are confirmed, you halt or something. But challenging one state root is not enough. That's the TLDR. Cool. Next question. You kind of alluded to this in the slide. How do you see solutions like optimistic ZK hybrid solutions? Yeah, so this is an interesting one. As I said, if you need to use these execution history commitments, which most of the optimistic proof system use, you have these problems with levels and recursion, and you might prefer not to do that because, as I said, it's very complex. Cartesi decided to go hybrid in the sense that they have an optimistic protocol but at a certain point instead of proving just one single step in their RISC-V machine because they use RISC-V underneath they prove a big chunk of steps. So I think it makes sense to improve the gas cost. Well you can have off-chain proving cost so that's debatable, but another approach is single round. Instead of going to the root of multi-round proof system, you can do what Tyco does, which is you propose, you challenge, and when you challenge, you need to provide a Zika proof either with risk zero or SP1. So I think in the future, if an optimistic roll-up wants to use a fraud-proof system, it's very likely that they will also need ZK somehow in their stock. Cool. Next question. Can part of the answers be facilitating an honest offender having snowballing community support? The other honest observers can validate the challenge and support? Yes. So to allow honest defenders to play together as one, you need ad hoc mechanisms. It's not super obvious. There are some proof systems like the current one in Arbitrum that doesn't allow honest defenders to play together because there are some issues. But the idea is, yes, the honest defender in an optimistic roll-up is going to win, is going to profit. So that's the main reason why some of these projects say, look, it's not a problem if we need a lot of funds to protect the chain because it's profitable. A lot of people will join and defend the chain together. So yes, like the plan is to have multiple months. All right, everyone give Luke.", "eventId": "devcon-7", - "slot_start": 1731553200000, - "slot_end": 1731558600000, - "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/1PShw8A7XomH3DtlwmgLZcgMrPY11XvLp_EuNeSwghoQ" + "slot_start": 1731468600000, + "slot_end": 1731470400000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1ft-eFG4MqCEgA32GW7jQmKsNVc9dmE6ItmC7m8A1nFs", + "resources_slides": "https://drive.google.com/file/d/1k16u0NpLME83FFa16mP1s3rZ7qZIXLM0/view", + "speakers": [ + "luca-donno" + ] }, "vector": [ 0, @@ -357513,12 +356524,9 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -357820,7 +356828,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -357871,6 +356878,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -358259,7 +357267,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -358267,6 +357274,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -358327,6 +357335,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -358375,7 +357384,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -358483,6 +357491,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -358822,8 +357831,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 2, @@ -358838,53 +357845,52 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "fraud-proofs-war", - "sourceId": "UTTXWB", - "title": "Fraud proofs war", - "description": "Fraud proof systems were originally envisioned to be able to protect a rollup with just a single honest challenger assumption. As it turns out, the matter is much more complex because of exhaustion attacks, a form of sybil attack where the attacker tries to win by economically outlasting the defenders. The talk discusses the tradeoffs in the proposed solutions to this form of attack by analyzing Arbitrum, Cartesi and Optimism fraud proof systems.", - "track": "Layer 2", + "id": "from-auctions-to-zk-an-educational-tour-of-mpc-tools", + "sourceId": "7TRTQW", + "title": "From Auctions to ZK: An Educational Tour of MPC Tools", + "description": "Ethereum made a significant contribution to the Cypherpunk agenda by removing central points of trust, allowing us to gain accountability, yet losing us any semblance of privacy that we had. There is hope at hand for privacy, but hope, in this case, is rather technical.\r\nThis talk aims to bring you up to scratch on privacy preserving tools while discussing S{N,T}ARKS, TEEs, FHE, how MPC elevates them in a decentralized setting, and highlighting their use from Auctions to ZK, from the 90s til now.", + "track": "Cypherpunk & Privacy", "type": "Talk", - "expertise": "Expert", - "audience": "Research", + "expertise": "Intermediate", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Optimistic rollups", - "Challenge period", - "Mechanism design", - "fraud", - "proof", - "Challenge period", - "Mechanism design", - "Optimistic rollups" + "Zero-Knowledge", + "MPC", + "Homomorphic Encryption", + "confidentiality", + "computation", + "Homomorphic Encryption", + "MPC", + "Zero-Knowledge" ], "keywords": [ - "Fraud", - "proofs" + "Confidential", + "computing" ], - "duration": 1471, + "duration": 1533, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "026bba89d2edb936b13ef6502e54fd362b2d4d890f0e4ca06732592db8ed78d2", + "sources_youtubeId": "fC44nTlYz4w", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734395c9dbb7a90e17a7cc5", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6734395c9dbb7a90e17a7cc5.vtt", - "transcript_text": " Hey everyone, I'm Luca, I do research at AltaBeat and I'm here to talk about fraud proofs. So there are many different types of fraud proofs. There are off-chain fraud proofs, on-chain fraud proofs, single round, multi-round. This talk is mainly about on-chain multi-round proof systems which is what the top optimistic roll-ups use today like Arbitrum, Optimism or BASE. And the thing is that these two proof systems so the one developed by Arbitrum, Optimism, or BASE. But the thing is that these two proof systems, so the one developed by Arbitrum and the one developed by Optimism, are very different. There are very different trade-offs, and my goal here is to give you an intuition on why they're different, and the security trade-offs that they make. So first of all, what optimistic rollups it's very simple intuitively you need to communicate to L1 the L2 state so there is an actor could be yourself that publishes the state routes of the L2 on L1 and there is a timer if this timer expires the state route is optimistically considered correct so that it can be used for withdrawals. This is the happy case. You don't need to prove anything about the correctness of the state. In the not so happy case, you can have this actor that publishes an invalid state root and now what do you do? You need to prove that this state route is incorrect. And as I said, there are a lot of different designs and this is what this talk is about. So the original vision about optimistic roll-ups was the following. That any single honest challenger can protect an optimistic roll-up with a fixed challenge period. So, you know, So optimism and arbitrum have a seven days challenge period. If there is anyone monitoring the chain and they see that some state route is incorrect, they should be able to challenge it. So this was the original vision. The problem is that this is not possible at all. And I'm going to explain you why. So the main problem on why this is not possible are Sybil attacks. So let's say that you're an attacker. What you can do in an optimistic roll-up is propose millions of invalid state routes at the same time. So you, as the single honest challenger, how do you defend yourself? How do you defend the chain? You know, imagine you have millions, again, and you need to prove all of them wrong to protect the fans that are in the bridge. There are two ways to do this. The first one is what I call full concurrency, which means you have millions of embodied state routes, you need to challenge all of them at the same time. Which means that for each state route, there is a challenge period which could be seven days. All these challenge periods overlaps. So the advantage of this approach is that since again all the challenge period overlap, after seven days some of them will be confirmed. So your settlement delay is seven days. The problem is that challenging state routes has a cost, has a gas cost because challenges are interactive, you need to interact with the one multiple times and what an attacker can do in this case is trying to outspend you. So the attacker again publishes a lot of invalid state routes, you need to spend funds to attack all of them, it could be that at a certain point, you don't have funds anymore, and one of the invalid state routes will be unchallenged, and it will be confirmed, stealing everything. So I have an example here with some data. Let's say that the challenge period is seven days. They overlap for all these invalid state routes. The cost of challenging on chain, like the gas cost is one ETH. This is symmetrical for the defender and the attacker because challenges are interactive. There are 1,000 if in the bridge. The attacker has 800 if. The honest challenger has only 500. At a certain point, the attacker is going to outspend the defender and the attacker is going to win. So right now, it's not true that any single honest challenger can protect an optimistic roll-up with a fixed period because not anyone has the same amount of funds as the attacker to protect the chain. So let's say I'm a simple user that only has $1,000 on a chain, and the bounty of the bridge is millions of dollars, an attacker can spend millions to attack it, I cannot defend it, clearly. So this is not true, the correct statement is that challengers with more funds than the attackers can protect an optimistic roll-up with a fixed challenge period, which is a much stronger claim. The second option is partial concurrency. Which is, instead of challenging all the Invali state routes at the same time, I challenge them one at a time. So I create this kind of queue, and I play in one challenge. When I finish one challenge, I go to the next one. And this is partial concurrency, because you can still have some kind of concurrency in the sense that if you have multiple defenders, they can play against different attackers in parallel, but like in the worst case, if you're alone, you need to play against all of them one after the other. So let's go through an example. Again, let's say the challenge period is seven days per challenge and there are multiple challenges. The cost is again one ETH. The funds in the bridge are 1,000 ETH. The attacker funds are 800. And the honest challenger now has very little funds. Having little funds here is not a problem because you can play like you only need one ETH for the first challenge. Then when you win, because you're guaranteed to win, if you're honest, you will get the bonds of the adversary. And with that bond, you can play the next one. So, like, you need very little funds. But the problem is that you have an attacker that can spend 800 ETH to play one challenge. You're going to have a delay of 800 weeks, because one challenge takes one week, and you have, you play them sequentially, so you will have 800 weeks of delay, which is more than 15 years. So this is what is called a delay attack. On proof systems that use partial concurrency. So here again, the claim that any single honest challenger can play an optimistic roll-up with a fixed challenge period is not true. The correct statement is that any single honest challenger, in the sense that you need little funds, so it's very decentralized, it can be you, can play an optimistic roll-up with non-fixed challenge period because they use partial concurrency. So there is this trade-off. When you need to design a for-proof system, you need to decide whether you want to use full concurrency or partial concurrency, which means you either are affected by resource exhaustion attacks, which means that the attacker can try to suspend you, or delay attacks. There is this trade-off. So now let's try to optimize these two cases, because it's not just as bad as I told you, but there is something that you can do to improve the situation. In full concurrency, what you can do is to add bonds. When you add bonds, you add a cost to create challenges. So as I said, the cost of one challenge can be one ETH, but if creating a challenge costs 10 ETH, it means that the attacker needs to spend 10 ETH to induce me as the honest defender one ETH of cost, right? So right now, there is asymmetry, like the C builds have to stake a lot of bonds, I only need to stake one, for each bond they induce me gas cost on playing the challenge, the bond is higher than the cost of one challenge. So let's go through an example, again you have a challenge period of seven days. You have the bond that are ten ETH. The cost of one challenge is one ETH. The funds in the bridge are 1,000. The attacker funds are 800 as before. Honest challenger funds are 500 as before. But now the defenders win because with 800 ETH, the attacker can only generate 80 CBELs, which means that I will only spend 80 ETH. I have more than that, so in this case I win, even though I have less funds than the attacker. The problem here is that this is very complex to do. You need something that is called execution history commitments, which means that you cannot allow... The problem is you need to play the base action when you need to go through a challenge, and you should be able to allow everyone to play within the same claim, because otherwise the problem is... So the interactive game works as follows. I propose a state route, the game is interactive, the next player is going to ask me for a midpoint, the problem is I can lie. I can propose the correct state truth at the beginning and then propose an invalid bisection. And then I can lose on purpose in this way. So the problem is I can only have this asymmetry if people can join together and state truths cannot lose on purpose. And the way that you do this, the way you prevent invalid bisections is by having a commitment over all the steps, the state after all the steps, so that when you bisect, you can provide a miracle proof that the bisection is correct. But again, this is super complex. You are not supposed to understand this, because as I said, this is super, super complex. So improving on full concurrency is very complex. You need to add levels because now when you have execution history commitments over all the steps, you need to merkleize the state for all the steps. These steps are like on the order of 2 to the 70 steps. This is unfeasible, so you cannot merkleize all of them. You need to have bigger chunks. And then when you reach, when you bisect and you reach one chunk, you need to repeat the protocol recursively. And then when you do this, you create another type of attack across levels. And then you might say, I don't want to use levels at all, so let me plug ZK proofs. Again, you're not supposed to understand this. You're just supposed to understand that this gets very complicated when you want to improve on full concurrency. So in practice, Arbitrum, the new proof system that they will deploy soon, and Optimism on mainnet today use full concurrency. So they have fixed challenge periods of seven days. Arbitrum uses this very complicated set-up called execution history commitments. Optimism doesn't. And for this reason, Arbitrum can have what is called the resource ratio of 15%, which means if an attacker, let's say, has 1,000 ETH, me as a defender, to protect the chain, I only need 15% of the funds that the attacker is willing to spend to protect the chain, as the example we made before with bonds. While optimism doesn't have this, they cannot have this asymmetry, this advantage, and indeed if you are a defender and you want to protect the chain, you need 109% of the funds that the attacker has. So like more than 100%. Like significantly more, I would say. So again, imagine you have a bridge with 1 million ETH, an attacker can spend a little less than that, and you need more than that to protect the chain. So it's a lot of funds. An attacker can spend a little less than that, and you need more than that to protect the chain. So it's a lot of fun. The problem is you need very big bond sizes because your bond size needs to be a multiple of the gas cost to play a challenge. And in Arbitrum, this bond size is 3600 ETH, which is insane. So you affect decentralization by a lot. And you have this trade-off between resource ratio and issue on bond size, because, as I said, right now, when you increase the bond size, you can have better resource ratio, like a lower resource ratio, which is better, which improves safety, but affects decentralization. Let's try to optimize partial concurrency. So, instead of having all the challenges one after the other, what you can do is to create a tournament, like a bracket tournament. This is something that was developed by Cartesi, which is another team working on for proofs. So what you can do is you create this bracket tournament, the matches are in parallel, and if you have a lot of C builds, since they are in this tournament, they will eliminate each other. Because they are in this tournament, they will eliminate each other because they are paired together. So this is a strategy to eliminate symbols together. And then since the brackets are then sequential, the honest challenger will only play a logarithmic number of challenges compared to the number of decibels. So if you go to an example, right now the delay is not 800 weeks, but it's a logarithm of that which is two months and one week. And in practice, the current arbitrum classic protocol, the one that is live on Meta that will be replaced, uses partial concurrency. Cartesi will use partial concurrency as well. Cartesi uses it in tournamentsrency as well. Cartesi uses the tournament. Arbitrum doesn't. Initial bond size, let's say, 3 ETH. Cartesi is much better than Arbitrum because, again, they use these... There are a lot of caveats here, but to simplify, a tournament is a very nice optimization to use when you want to use partial concurrency. And the bond size that you need are super small as well. So now you also have this trade-off between initial bond size and delay attacks because if you increase the bond size in partial concurrency, you can create less C bills and when you have less C bills, you have less, like a lower number of challenge periods. So like a lower number of matches, which means also the settlement is faster. This is the summary, more or less, which means that you can see on the vertical axis that if you go full concurrency, you're faster, your settlement gets faster, but you sacrifice safety and decentralization. If you go full concurrency, you're faster, your settlement gets faster, but you sacrifice safety and decentralization. If you go partial concurrency, it's very decentralized, very safe, because you need very low bonds, and your resource ratio is like, it's not even a ratio, it's constant. Then you can play with bond sizes for partial concurrency to be faster but less decentralized, or you can play with bonds on full concurrency to be more safe, to have a better resource ratio, sacrifice decentralization, or you can also play in principle with a challenge period to be like if you have a shorter challenge period, your settlement will be faster, but you sacrifice safety significantly. So I have some further points. I don't have time to discuss all of them, but one point is, is there any way to reduce this challenge period? Because right now it's seven days for all of them. Can we go below that? The answer is no. I don't think we can do this. Because, first of all, why seven days? The reason is to protect from a strong censorship, attack on Ethereum. So there are two types of censorship that you can have. You can have weak censorship or strong censorship. Weak censorship means that you have a certain percentage of builders censoring transactions. You know, like as a context, censoring is a concern because if the honest challenger is censored, the invalid state is going to be confirmed, right? So if there is a percentage of builders that is censoring, This is not really a problem because you still have a high percentage of probability, a high probability of getting included within a short amount of time. The main problem is that if there is a majority of attesters, like 51% of attester censoring, all the transactions from the honest defenders will be excluded from the chain. So now we will need to coordinate a hard fork to hard fork away the censoring validators. And we agreed a while ago that to do this, to coordinate a hard fork, we need around seven days, so if your protocol uses a challenge period that is less than seven days, you cannot protect from strong censorship attacks. Do optimistic roll-ups make sense if we have ZK? I think they do, in the sense that you cannot be cheaper than an optimistic roll-up. In the happy case, you don't have costs, apart from the bare execution of your state transition function. You don't have the proof generation cost. Your throughput is the same as the machine. And, well, in ZK, you can parallelize proof generation and so on, but there are also centralization concerns. So, like, you want to exit from a roll-up, not everyone has the hardware or the, like, ZK ASICs to prove a lot of blocks by themselves if they are censored. So optimistic roll-ups have these nicer properties. But should more project be ZK? I think so. I think the answer is yes, in the sense that if, like, most of the L2s today, we have L2s, we have scalability. The problem that L2s created is fragmentation. Most of the L2s want to fix fragmentation. Can you have interoperability with a seven-day challenge period? I don't think so. So every project that aims at being interoperable with a seven days challenge period? I don't think so. So every project that aims at being interoperable with all the others, I think they should be ZK. For all the other use cases, if you're building a game, you don't need interoperability. I think the optimistic approach is great. So that's it. Thank you. Wow, committing fraud is really complicated. I thought you just signed a different name on the check. So we got some questions from the audience. And the first one up there, I saw this one going up, it's really interesting. In all optimistic L2s, how many times has there been a fraud actually reported and acted upon? We actually have a few examples of fraud proofs happening on-chain. But those fraud proofs are not the result of people being malicious. Most of the time it was just that there were some validators, some nodes that didn't upgrade their software. So they saw an invalid state root because they were running on a previous version. This has happened in Chroma, I think. It has happened in Kinto recently, which is an Orbit stack, like an Orbit from Fork. So another example that is a fun one is on FuelV1, we L2Bit tried to steal $8 from FuelV1. And, you know, FuelV1 is this app-specific roll-up that no one uses, basically. There's no activity. So, like, we wanted to see whether there was anyone trying to defend the chain, even if it was not used. The bond size was 0.5 if we lost. So like all the TVL in Fuel V1, it's like our funds. And because there was someone watching, indeed. Yeah, that Fuel V1 story is going to be the stuff of legend. Next question. Can't the challenger just challenge the first wrong state root, hence invalidating all state roots that are proposed after by the proposer? Well, it depends on the protocol. Most of the protocols, like Optimism today, all the state routes are independent. So invalidating one state route doesn't invalidate all of the others, but also you need to allow multiple proposals to be sent for the same state. Because it's not just that state routes are sequential, it could be that for a single point in time, for a single block number, you need to have multiple proposals because again you're going to have a lot of symbols, so you need to challenge all of them. You could have a fallback mechanism such that if multiple state roots are confirmed, you halt or something. But challenging one state root is not enough. That's the TLDR. Cool. Next question. You kind of alluded to this in the slide. How do you see solutions like optimistic ZK hybrid solutions? Yeah, so this is an interesting one. As I said, if you need to use these execution history commitments, which most of the optimistic proof system use, you have these problems with levels and recursion, and you might prefer not to do that because, as I said, it's very complex. Cartesi decided to go hybrid in the sense that they have an optimistic protocol but at a certain point instead of proving just one single step in their RISC-V machine because they use RISC-V underneath they prove a big chunk of steps. So I think it makes sense to improve the gas cost. Well you can have off-chain proving cost so that's debatable, but another approach is single round. Instead of going to the root of multi-round proof system, you can do what Tyco does, which is you propose, you challenge, and when you challenge, you need to provide a Zika proof either with risk zero or SP1. So I think in the future, if an optimistic roll-up wants to use a fraud-proof system, it's very likely that they will also need ZK somehow in their stock. Cool. Next question. Can part of the answers be facilitating an honest offender having snowballing community support? The other honest observers can validate the challenge and support? Yes. So to allow honest defenders to play together as one, you need ad hoc mechanisms. It's not super obvious. There are some proof systems like the current one in Arbitrum that doesn't allow honest defenders to play together because there are some issues. But the idea is, yes, the honest defender in an optimistic roll-up is going to win, is going to profit. So that's the main reason why some of these projects say, look, it's not a problem if we need a lot of funds to protect the chain because it's profitable. A lot of people will join and defend the chain together. So yes, like the plan is to have multiple months. All right, everyone give Luke.", + "sources_streamethId": "67347dda9dbb7a90e1a56a9d", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731468600000, - "slot_end": 1731470400000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1ft-eFG4MqCEgA32GW7jQmKsNVc9dmE6ItmC7m8A1nFs", - "resources_slides": null, + "slot_start": 1731491400000, + "slot_end": 1731493200000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1VLWGFuzmpGa1l5aa_6_T3lRO-nTsPDh5IXDg9sFoZM8", + "resources_slides": "https://drive.google.com/file/d/16vdhB82UKPQJTowHLmV5SXn2si4cRwa0/view", "speakers": [ - "luca-donno" + "aisling-connolly" ] }, "vector": [ @@ -358893,8 +357899,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -359022,6 +358026,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -359248,7 +358253,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -359646,11 +358650,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -359707,7 +358711,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -359728,6 +358731,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -359863,7 +358868,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -360199,13 +359203,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, - 2, 0, 2, 0, @@ -360216,58 +359220,59 @@ 0, 0, 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "from-auctions-to-zk-an-educational-tour-of-mpc-tools", - "sourceId": "7TRTQW", - "title": "From Auctions to ZK: An Educational Tour of MPC Tools", - "description": "Ethereum made a significant contribution to the Cypherpunk agenda by removing central points of trust, allowing us to gain accountability, yet losing us any semblance of privacy that we had. There is hope at hand for privacy, but hope, in this case, is rather technical.\r\nThis talk aims to bring you up to scratch on privacy preserving tools while discussing S{N,T}ARKS, TEEs, FHE, how MPC elevates them in a decentralized setting, and highlighting their use from Auctions to ZK, from the 90s til now.", - "track": "Cypherpunk & Privacy", + "id": "from-bottlenecks-to-breakthroughs-optimizing-zkevm-provers", + "sourceId": "LT8BTE", + "title": "From Bottlenecks to Breakthroughs: Optimizing zkEVM Provers", + "description": "In this session, we introduce how we optimized zkEVM provers in production to significantly reduce prover costs, a major expense in running zkEVM. Topics include diagnosing zkEVM bottlenecks using CPU and memory profiling, leveraging DAGs for parallelization, and efficient memory management with a memory pool, fine-tuned garbage collection, and in-memory swapping for gigantic memory usage. These optimizations reduced zkEVM prover runtime by 75%, representing a substantial performance gain.", + "track": "Applied Cryptography", "type": "Talk", "expertise": "Intermediate", - "audience": "Product", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Zero-Knowledge", - "MPC", - "Homomorphic Encryption", - "confidentiality", - "computation", - "Homomorphic Encryption", - "MPC", - "Zero-Knowledge" + "Layer 2s", + "ZK-EVMs", + "Open Source Software", + "optimization", + "Layer 2s", + "Open Source Software", + "ZK-EVMs" ], "keywords": [ - "Confidential", - "computing" + "Performance", + "Optimization" ], - "duration": 1533, + "duration": 1395, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "301fa43a7bfaa31972ff77b24bb260e2d672d4919d0603965f4abcc4e9fb5a6f", + "sources_youtubeId": "aNF-BM6v-tI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67347dda9dbb7a90e1a56a9d", + "sources_streamethId": "6734723d9dbb7a90e11a9ad5", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731491400000, - "slot_end": 1731493200000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1VLWGFuzmpGa1l5aa_6_T3lRO-nTsPDh5IXDg9sFoZM8", - "resources_slides": null, + "slot_start": 1731488400000, + "slot_end": 1731490200000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1uTR60xRfzUI21BwpSkQ39uJtzxKc0DLJd2BqZBQisTI", + "resources_slides": "https://drive.google.com/file/d/1ABZFd7lDMOd0POVL1TnBpaiAe4Bm9zeD/view", "speakers": [ - "aisling-connolly" + "leo-jeong" ] }, "vector": [ + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -360400,7 +359405,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -360624,6 +359628,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -361031,7 +360036,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -361078,6 +360082,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -361109,7 +360115,6 @@ 0, 0, 2, - 2, 0, 0, 0, @@ -361322,55 +360327,45 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 2, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 2, 0, 0, 0, @@ -361587,10 +360582,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -361604,46 +360599,47 @@ }, { "session": { - "id": "from-bottlenecks-to-breakthroughs-optimizing-zkevm-provers", - "sourceId": "LT8BTE", - "title": "From Bottlenecks to Breakthroughs: Optimizing zkEVM Provers", - "description": "In this session, we introduce how we optimized zkEVM provers in production to significantly reduce prover costs, a major expense in running zkEVM. Topics include diagnosing zkEVM bottlenecks using CPU and memory profiling, leveraging DAGs for parallelization, and efficient memory management with a memory pool, fine-tuned garbage collection, and in-memory swapping for gigantic memory usage. These optimizations reduced zkEVM prover runtime by 75%, representing a substantial performance gain.", - "track": "Applied Cryptography", + "id": "from-concept-to-reality-the-triumph-of-blockchain-in-vaccine-distribution", + "sourceId": "ZBC9ZM", + "title": "From Concept to Reality: The Triumph of Blockchain in Vaccine Distribution", + "description": "Join us for an inspiring session that explores the transformative power of blockchain in vaccine supply chains. Learn how we achieved country-wide deployments in Bangladesh and Costa Rica, enhancing transparency, traceability, and efficiency. Discover the real-world challenges we overcame, the innovative solutions implemented, and the remarkable impact on public health logistics, setting new standards for supply chain management and ensuring the safe delivery of vaccines globally.", + "track": "Real World Ethereum", "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", + "expertise": "Beginner", + "audience": "Business", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", - "ZK-EVMs", - "Open Source Software", - "optimization", - "Layer 2s", - "Open Source Software", - "ZK-EVMs" + "Sustainability", + "Ethereum for Good", + "Public good", + "real-world", + "deployment", + "Ethereum for Good", + "Public good", + "Sustainability" ], "keywords": [ - "Performance", - "Optimization" + "Real-World", + "Deployment" ], - "duration": 1395, + "duration": 974, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "a1313e35846a12d8159baaf1f5419c4b8846b08f315ac4c872079e5de0c97384", + "sources_youtubeId": "0dSz0CN6bI8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734723d9dbb7a90e11a9ad5", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731488400000, - "slot_end": 1731490200000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1uTR60xRfzUI21BwpSkQ39uJtzxKc0DLJd2BqZBQisTI", - "resources_slides": null, + "slot_start": 1731409200000, + "slot_end": 1731410400000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1yuhgDizD0e2BcBSAmT-nwGyHIS4gNNqFjMZbvO34IPc", + "resources_slides": "https://drive.google.com/file/d/1quEEZTN-yLwlDw1CfvkOJjm1DFpHiZgV/view", "speakers": [ - "leo-jeong" + "david-casey", + "arun-maharajan", + "mansi" ] }, "vector": [ @@ -361653,10 +360649,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -361733,6 +360725,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -362008,10 +361002,9 @@ 0, 0, 0, - 6, - 0, 0, 0, + 6, 0, 0, 0, @@ -362464,7 +361457,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -362496,7 +361488,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -362509,6 +361500,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -362525,6 +361517,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -362581,6 +361574,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -362960,7 +361954,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -362976,54 +361969,48 @@ 0, 0, 0, - 0, - 0, 0 ] }, { "session": { - "id": "from-concept-to-reality-the-triumph-of-blockchain-in-vaccine-distribution", - "sourceId": "ZBC9ZM", - "title": "From Concept to Reality: The Triumph of Blockchain in Vaccine Distribution", - "description": "Join us for an inspiring session that explores the transformative power of blockchain in vaccine supply chains. Learn how we achieved country-wide deployments in Bangladesh and Costa Rica, enhancing transparency, traceability, and efficiency. Discover the real-world challenges we overcame, the innovative solutions implemented, and the remarkable impact on public health logistics, setting new standards for supply chain management and ensuring the safe delivery of vaccines globally.", - "track": "Real World Ethereum", - "type": "Talk", - "expertise": "Beginner", - "audience": "Business", + "id": "from-mpc-wallets-to-smart-contract-accounts", + "sourceId": "XMTH8N", + "title": "From MPC Wallets to Smart Contract Accounts", + "description": "The proposal outlines a path for the mass adoption of smart contract accounts by using MPC wallet as a transitional solution. Users can start their web3 journey by using MPC wallets which can be done via social login. Later, users can turn the MPC wallets into smart contract wallets using EIP-7702, enhancing the user experience with feature-rich options while maintaining the security benefits of MPC wallets to protect the EOA private key.", + "track": "Usability", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Sustainability", - "Ethereum for Good", - "Public good", - "real-world", - "deployment", - "Ethereum for Good", - "Public good", - "Sustainability" + "MPC", + "Account Abstraction", + "eip-7702", + "Account Abstraction", + "MPC" ], "keywords": [ - "Real-World", - "Deployment" + "EIP-7702" ], - "duration": 974, + "duration": 537, "language": "en", - "sources_swarmHash": "a1313e35846a12d8159baaf1f5419c4b8846b08f315ac4c872079e5de0c97384", - "sources_youtubeId": "0dSz0CN6bI8", + "sources_swarmHash": "8a617b801597c6e5869ce8d95eece6aff69b5e740275a7e9e649ff2757671a30", + "sources_youtubeId": "Yr0AS9QifjU", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673581a69dbb7a90e14ff55d", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673581a69dbb7a90e14ff55d.vtt", + "transcript_text": " Hello everyone, my name is Phuc Thai, I'm from Sky Mavis. So today I'm going to talk about how already understand that there is a lot of issue with the current EOAC that it's too complex for some non-technical and even technical users. There's inaccessible recovering mechanisms. accessible recovery mechanisms. So to solve this problem we have a few solutions in place already and I want to highlight two main things is MPC wallet and archive collection. So both of them can offer a lot of help with onboarding new users, offer social login, key recovery and some transaction policy. So the difference between MPCs and account protection is most of the things MPC do is on the off-chain, including query-recreation and transaction policy. Whereas on account corrections, it has more transparency and everything can happen on-chain. And with account corrections, users can also have more flexibility in a lot of feature for convenience suggests batch interaction, gas sponsors and automation and something but account collection is relatively new compared with the MPC technologies. It is 40 years old technology already and it's ready to be adopted right now. So yeah, so one of the problems with MPC is that users cannot have the flexibility that account protection offers. But with the introduction of 7.7.0.2, we can actually combine it with MPC to offer a great have. So this is a picture from Vitalik Blocks. As we can see here, with MPC and 7702, we can achieve most of the goal, all the convenient goal that account ratings still can be achieved. And some of them are critical. So, such as like here, we can see that automation can be done if we can delegate the account to some key data store on the server, get attractions, sponsor, and everything. So the reason that 7702 can be a great combination with MPC wallet is that the current infrastructure of MPCs already in place that can help with some of the features of 7702. want to highlight here is the privilege de-escalation. In MPC, we had multiple key sats that store in the user device in the server that we already had a system in place for this. And with 7702, we can set the MPC account to set code for that account to enable delegation, so some access to the key set that in either in the client device or in the server that can do some of the operations that user can do day to day. some of the operations that users can do day to day. And so another thing that I want to mention here is that one of the problems with current MPC is that when we select a threshold for MPC, we usually need to involve a trade-off. For example, if we use like 2.0.3 threshold, some transaction can be signed without the user knowing. But if we use 2.0.2 threshold, then every transaction needs to be signed by user, but the key cannot recover if the user lost the key shot and they have not had any backup. So with the 7.7.0.2, we can introduce a time-locked on-chain key recovery that we can have either key shard can be gain full access to the wallet, it initiates a transaction, and then the other one do not challenge that in, let's say, 30-day periods. So I think that's it for now. Thank you. Thank you, Phuc. We have some time for questions, so raise your hand if you want to ask a question, and I'll get the box to you. Or if you're further in the back, a colleague of mine will do it. I know you want to know things. This is your chance. If not, then I'll ask a question, and trust me, you don't want to hear my questions. They're not going to be that good. Okay, well, I'll still do it. If you want people to come away from this with one thing and start implementing that in their work, what would that be? Can you elaborate that a little bit? So we all go back home after DEF CON and you want people to do one thing. What's the thing that you want people to do? So I think the thing I want people to do is to push out 7702 as soon as possible. Yeah, cool, cool. Anyone have a question? Or shall we call it a day? Yes, amazing. All right, I think I can do it, maybe. Yeah, okay. And one, and two, and woo! Ah, almost. and two and whoo ah almost okay so my question is a little more on the gas abstraction part i i know mpc wallets can be um connected to aa wallets and um yeah uh my question is, is there another solution that is more sustainable compared to just sponsoring the gas for every transaction? Yeah, can you elaborate a little bit? Yeah, so some wallets, because most of the non-crypto native users don't have a concept of gas that they have to pay for each transaction. A lot of protocols and applications sponsor this gas so that the user can continue with using the application. And the way they do it is that they fund the paymaster and that paymaster pays for the user's transaction fees. And yeah, my question is how, are there any solutions that you think can make this sustainable for the applications? So I think, so that is a good question. So that is a good question. But I think the sponsor, so this comes from that if the app developer, that they want to be more convenient for the user. So they want to onboard more users. So one of the things they can do is that they can ease out the onboarding process and pay for the gas for the user. So I think that can be a great help in this case. All right. That's all the time we have for questions. Thanks again, Phuc. Give it up for Phuc for the wonderful talk.", "eventId": "devcon-7", - "slot_start": 1731409200000, - "slot_end": 1731410400000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1yuhgDizD0e2BcBSAmT-nwGyHIS4gNNqFjMZbvO34IPc", - "resources_slides": null, + "slot_start": 1731559200000, + "slot_end": 1731559800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1ZE8L3c1yymoZrVimyFHEaxXRlckYyGWHcRVxv5R5bzQ", + "resources_slides": "https://drive.google.com/file/d/1Oyamd37Uabpp1cerk4t6hCToJ7ecIQsd/view", "speakers": [ - "david-casey", - "arun-maharajan", - "mansi" + "phuc-thai" ] }, "vector": [ @@ -363033,6 +362020,8 @@ 0, 0, 0, + 0, + 0, 6, 0, 0, @@ -363108,9 +362097,7 @@ 0, 0, 0, - 6, 0, - 6, 0, 0, 0, @@ -363826,6 +362813,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -363863,6 +362851,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -363887,8 +362876,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -363904,7 +362891,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -363961,7 +362947,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -364130,8 +363115,6 @@ 0, 0, 2, - 2, - 0, 0, 0, 0, @@ -364340,10 +363323,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -364357,56 +363340,51 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "from-mpc-wallets-to-smart-contract-accounts", - "sourceId": "XMTH8N", - "title": "From MPC Wallets to Smart Contract Accounts", - "description": "The proposal outlines a path for the mass adoption of smart contract accounts by using MPC wallet as a transitional solution. Users can start their web3 journey by using MPC wallets which can be done via social login. Later, users can turn the MPC wallets into smart contract wallets using EIP-7702, enhancing the user experience with feature-rich options while maintaining the security benefits of MPC wallets to protect the EOA private key.", - "track": "Usability", + "id": "from-nanoseconds-to-decades-the-timescales-of-ethereum", + "sourceId": "CGTBC7", + "title": "From Nanoseconds to Decades: The Timescales of Ethereum", + "description": "Ethereum is an intricate machine with numerous gears meshing into each other. Some are tiny and spin at lightning speed, others barely move. In this short talk, we will embark on a brief journey through the various processes within Ethereum, examining how long they take -- from executing a single OP code to accepting an EIP.", + "track": "Core Protocol", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Product", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, - "tags": [ - "MPC", - "Account Abstraction", - "eip-7702", - "Account Abstraction", - "MPC" - ], "keywords": [ - "EIP-7702" + "Fun", + "Data" + ], + "tags": [ + "Core Protocol", + "data", + "fun", + "Core", + "Protocol" ], - "duration": 537, "language": "en", - "sources_swarmHash": "8a617b801597c6e5869ce8d95eece6aff69b5e740275a7e9e649ff2757671a30", - "sources_youtubeId": "Yr0AS9QifjU", + "sources_swarmHash": "43f017afeffc251b70f96edf23f30b02cd8c99c3153c57fb863a1ed583064116", + "sources_youtubeId": "Oylkbu1lmHw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673581a69dbb7a90e14ff55d", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673581a69dbb7a90e14ff55d.vtt", - "transcript_text": " Hello everyone, my name is Phuc Thai, I'm from Sky Mavis. So today I'm going to talk about how already understand that there is a lot of issue with the current EOAC that it's too complex for some non-technical and even technical users. There's inaccessible recovering mechanisms. accessible recovery mechanisms. So to solve this problem we have a few solutions in place already and I want to highlight two main things is MPC wallet and archive collection. So both of them can offer a lot of help with onboarding new users, offer social login, key recovery and some transaction policy. So the difference between MPCs and account protection is most of the things MPC do is on the off-chain, including query-recreation and transaction policy. Whereas on account corrections, it has more transparency and everything can happen on-chain. And with account corrections, users can also have more flexibility in a lot of feature for convenience suggests batch interaction, gas sponsors and automation and something but account collection is relatively new compared with the MPC technologies. It is 40 years old technology already and it's ready to be adopted right now. So yeah, so one of the problems with MPC is that users cannot have the flexibility that account protection offers. But with the introduction of 7.7.0.2, we can actually combine it with MPC to offer a great have. So this is a picture from Vitalik Blocks. As we can see here, with MPC and 7702, we can achieve most of the goal, all the convenient goal that account ratings still can be achieved. And some of them are critical. So, such as like here, we can see that automation can be done if we can delegate the account to some key data store on the server, get attractions, sponsor, and everything. So the reason that 7702 can be a great combination with MPC wallet is that the current infrastructure of MPCs already in place that can help with some of the features of 7702. want to highlight here is the privilege de-escalation. In MPC, we had multiple key sats that store in the user device in the server that we already had a system in place for this. And with 7702, we can set the MPC account to set code for that account to enable delegation, so some access to the key set that in either in the client device or in the server that can do some of the operations that user can do day to day. some of the operations that users can do day to day. And so another thing that I want to mention here is that one of the problems with current MPC is that when we select a threshold for MPC, we usually need to involve a trade-off. For example, if we use like 2.0.3 threshold, some transaction can be signed without the user knowing. But if we use 2.0.2 threshold, then every transaction needs to be signed by user, but the key cannot recover if the user lost the key shot and they have not had any backup. So with the 7.7.0.2, we can introduce a time-locked on-chain key recovery that we can have either key shard can be gain full access to the wallet, it initiates a transaction, and then the other one do not challenge that in, let's say, 30-day periods. So I think that's it for now. Thank you. Thank you, Phuc. We have some time for questions, so raise your hand if you want to ask a question, and I'll get the box to you. Or if you're further in the back, a colleague of mine will do it. I know you want to know things. This is your chance. If not, then I'll ask a question, and trust me, you don't want to hear my questions. They're not going to be that good. Okay, well, I'll still do it. If you want people to come away from this with one thing and start implementing that in their work, what would that be? Can you elaborate that a little bit? So we all go back home after DEF CON and you want people to do one thing. What's the thing that you want people to do? So I think the thing I want people to do is to push out 7702 as soon as possible. Yeah, cool, cool. Anyone have a question? Or shall we call it a day? Yes, amazing. All right, I think I can do it, maybe. Yeah, okay. And one, and two, and woo! Ah, almost. and two and whoo ah almost okay so my question is a little more on the gas abstraction part i i know mpc wallets can be um connected to aa wallets and um yeah uh my question is, is there another solution that is more sustainable compared to just sponsoring the gas for every transaction? Yeah, can you elaborate a little bit? Yeah, so some wallets, because most of the non-crypto native users don't have a concept of gas that they have to pay for each transaction. A lot of protocols and applications sponsor this gas so that the user can continue with using the application. And the way they do it is that they fund the paymaster and that paymaster pays for the user's transaction fees. And yeah, my question is how, are there any solutions that you think can make this sustainable for the applications? So I think, so that is a good question. So that is a good question. But I think the sponsor, so this comes from that if the app developer, that they want to be more convenient for the user. So they want to onboard more users. So one of the things they can do is that they can ease out the onboarding process and pay for the gas for the user. So I think that can be a great help in this case. All right. That's all the time we have for questions. Thanks again, Phuc. Give it up for Phuc for the wonderful talk.", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "jannik-luhn" + ], "eventId": "devcon-7", - "slot_start": 1731559200000, - "slot_end": 1731559800000, + "slot_start": 1731469200000, + "slot_end": 1731469800000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1ZE8L3c1yymoZrVimyFHEaxXRlckYyGWHcRVxv5R5bzQ", - "resources_slides": null, - "speakers": [ - "phuc-thai" - ] + "resources_presentation": "https://docs.google.com/presentation/d/1Ry_A-NlHMHVJmRMfoIquVsBqvO4xh-ZsvcBax7Ji6fk", + "resources_slides": "https://drive.google.com/file/d/1Ke8oNo2T75sYW_ukY22IWDQ-T1Kv32HA/view" }, "vector": [ - 0, - 0, - 0, - 0, 0, 0, 0, @@ -364766,17 +363744,11 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -365180,6 +364152,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -365205,7 +364178,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -365243,7 +364215,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -365469,6 +364440,14 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 2, + 0, 0, 0, 0, @@ -365724,7 +364703,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -365733,50 +364711,59 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "from-nanoseconds-to-decades-the-timescales-of-ethereum", - "sourceId": "CGTBC7", - "title": "From Nanoseconds to Decades: The Timescales of Ethereum", - "description": "Ethereum is an intricate machine with numerous gears meshing into each other. Some are tiny and spin at lightning speed, others barely move. In this short talk, we will embark on a brief journey through the various processes within Ethereum, examining how long they take -- from executing a single OP code to accepting an EIP.", - "track": "Core Protocol", + "id": "from-packets-to-privacy-understanding-and-evolving-network-security", + "sourceId": "XYRFXT", + "title": "From Packets to Privacy: Understanding and Evolving Network Security", + "description": "This talk will provide a comprehensive journey through the fundamentals of network communication, explore the workings and risks of Virtual Private Networks (VPNs), and dive into the world of Mixnets. We’ll discuss how decentralized Mixnets can offer privacy by default, potentially eliminating the need for traditional VPNs.", + "track": "Cypherpunk & Privacy", "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Community", + "audience": "Product", "featured": false, "doNotRecord": false, - "keywords": [ - "Fun", - "Data" - ], "tags": [ - "Core Protocol", - "data", - "fun", - "Core", - "Protocol" + "Privacy", + "Anonymity", + "Censorship Resistance", + "vpn", + "Anonymity", + "Censorship Resistance", + "Privacy" ], - "language": "en", - "speakers": [ - "jannik-luhn" + "keywords": [ + "Mixnet", + "VPN" ], + "duration": 529, + "language": "en", + "sources_swarmHash": "60a1a240af7b45bf3220aa7ef64b4613aeae26d9f09b1dfdab8177ceb235c87b", + "sources_youtubeId": "7FyShvrYnHk", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6734a0f89dbb7a90e1476ca9", "eventId": "devcon-7", - "slot_start": 1731469200000, - "slot_end": 1731469800000, + "slot_start": 1731496200000, + "slot_end": 1731496800000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Ry_A-NlHMHVJmRMfoIquVsBqvO4xh-ZsvcBax7Ji6fk" + "resources_presentation": "https://docs.google.com/presentation/d/12nsOv8WsOMt_04w0HJeyZq7caYnELYCEfrMGbVYyAGM", + "resources_slides": "https://drive.google.com/file/d/1NFhSRmRF62Lhw3fNnMGGmcnYG12wLRyn/view", + "speakers": [ + "max-hampshire", + "med-amor" + ] }, "vector": [ 0, 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -366136,6 +365123,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -366541,12 +365529,10 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, + 2, 0, 0, 0, @@ -366636,6 +365622,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -366667,6 +365654,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -366830,14 +365818,10 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -367085,15 +366069,12 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, 0, 0, - 0, 2, 0, 0, @@ -367102,50 +366083,52 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "from-packets-to-privacy-understanding-and-evolving-network-security", - "sourceId": "XYRFXT", - "title": "From Packets to Privacy: Understanding and Evolving Network Security", - "description": "This talk will provide a comprehensive journey through the fundamentals of network communication, explore the workings and risks of Virtual Private Networks (VPNs), and dive into the world of Mixnets. We’ll discuss how decentralized Mixnets can offer privacy by default, potentially eliminating the need for traditional VPNs.", - "track": "Cypherpunk & Privacy", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "from-peerdas-to-fulldas-towards-massive-scalability-with-32mb-blocks-and-beyond", + "sourceId": "EVSLDH", + "title": "From PeerDAS to FullDAS: towards massive scalability with 32MB blocks and beyond", + "description": "PeerDAS is expected to be one of the most interesting improvements of the Pectra hard fork, enabling long-awaited sharding on Ethereum, unleashing L2 scaling.\r\n\r\nPeerDAS is however just the start with up to 1-2 MB of blob space per slot. We look into the techniques jointly developed by our Codex Research Team and EF researchers to improve this by orders of magnitude, targeting 32 MB (and beyond) of data availability space.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Expert", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Privacy", - "Anonymity", - "Censorship Resistance", - "vpn", - "Anonymity", - "Censorship Resistance", - "Privacy" + "Danksharding", + "DAS", + "Scalability", + "fulldas", + "Danksharding", + "DAS", + "Scalability" ], "keywords": [ - "Mixnet", - "VPN" + "PeerDAS", + "FullDAS" ], - "duration": 529, + "duration": 1441, "language": "en", - "sources_swarmHash": "60a1a240af7b45bf3220aa7ef64b4613aeae26d9f09b1dfdab8177ceb235c87b", - "sources_youtubeId": "7FyShvrYnHk", + "sources_swarmHash": "d1c8176ff1b4c933326ddae3ac900465ccbc6f4bde4090fa63cc7af602715e09", + "sources_youtubeId": "Y8VKmyJMAUk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734a0f89dbb7a90e1476ca9", + "sources_streamethId": "6736cc799dbb7a90e18c1d03", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736cc799dbb7a90e18c1d03.vtt", + "transcript_text": " Hi, hello. So, yes, I have a long title. So I will be speaking about data sampling. And it's a long title and I didn't want to make it longer, but actually I have to make a small remark, it's not even blocks. It's data space that we are speaking of. So let's see who we are first. I'm Chaba, I'm from the research team from Codex. We are working on decentralized storage network, providing high durability guarantees. And our research team is also working with the EF on data sampling and also the client teams. OK, so I think you are all familiar with this chart. You have seen it like 10 times during this DEF CON, I think you are familiar with this chart. You have seen it like ten times during this DevCon, I suppose. We had the IP 4844 and the transaction fees on L2 went down, right? And the right side we see those little, little ones. That's very good. That's very nice of 4844. But I hear that we have a problem. And yeah, to see that problem, I have to zoom in. Sorry. So if we zoom in, we see this problem. So there is a slight increase in L2 transaction fees. And actually, there's a problem. L2 transaction fees are driven by blob space, by data space, and that's running out. problem, and if that happens, prices can rise fast. data availability solutions, in codex we are also building one. speak of the Ethereum data availability solution, so we should scale that Ethereum data availability solution. a little bit behind these numbers. What you see here, maybe you have already seen this one as well, this is the daily average size of blocks on the network. And you see that it was rising and then after the merge it started to rise and then with 4844 it reduced. So this looks fine, right? Now you see this less often. This is the problem. So this is what our node operators are seeing. This is the blobs, so this is the blocks and the blobs cumulative plot together. So this is the data amount that they are seeing. And now we can start worrying, right? There is more data to handle. But we are scientists. We can look at this differently. So let's see it on a log scale. And then it's not that bad, actually. Just like Moore's law. It's actually a different exponent, but we are better than those CPU guys, so why would we? Okay, we just have to do that little technology evolution which keeps this running. And then we have a solution, right? We can draw that line. We just need more Bob space. And that's a plan. And here's a plan. We need to build this and then forward this. And we just have to be a bit lousy about the scale. So it's an unknown scale here and an unknown scale there. And then we solve the problem. What we want is more Bob space and what we want is less node traffic for our operators. We just have to work out these little networking details. So let's see those little networking details that we have to work out. But first, let me have a quick detour, networking. But this is, I was speaking of average box size till now. But average was a daily average. And in the day many things happen, right? Every 12 seconds something is happening. So what you see here is the tail distribution of block size during a long period. You don't even see the small blocks, you just see the large ones. And this was before 4844. And what you see that there were blocks beyond two megabytes. And our nodes have to handle those. What if two blocks of such come one after another? And you have to handle that. So our technology has to handle these things as well. And we were measuring whether you can handle that. And the good news is that you can handle that. So the network was running and live. And what you see here is for different block sizes. These are the bins. Small up to 32 kilobyte blocks. And then between one megabyte and two megabytes. How much time nodes need to get the blocks. And the bigger the block, the more time is needed. This is a distribution. So the nodes which are receiving it only here. and there are many nodes which are receiving it before. Almost everyone is receiving it before four seconds. So things are fine. But clearly it doesn't scale very well, as you see. If we want to increase the block size, then it would go to the right. So we arrive to our point how to scale layer one from the networking side. things. change the slot time, the slot timers, we can change resource requirements. It's a good discussion. It is a bit controversial. We have small gains. We have a lot to lose. So let me not go into that one. Then we can change the gossip sub, the protocol, the networking protocol. We can do improvements on gossip sub for large messages. And this is in the making. It's important. But it's not my talk today. And it's relatively small gains compared to what we need. Still important. Then we can do sharding and data with sampling. And this is my talk today. This is large gains, I hope. And then there's the fourth thing. We can do distributed block building. This is kind of intertwined with many things, but this is large gains. But basically, that would mean re-syncing the whole data flow of the blob transaction from submission of the transaction until the end. Okay, so let's focus on point three, the sharding and data sampling over the network. So data sampling, let's just key concepts. So you need to prepare the data. We were speaking of blobs. There are blobs. There We were speaking of blobs. There are blobs. There's a package of blobs. And then we need an encoding. We need an encoding because we want to sample from it and for the sampling to be effective, we need a laser coding. For the laser coding, we need segmentation. So we need to segment it, chop it to pieces. We need to extend it with a laser coding, then we have to commit to it so that it cannot be, nodes cannot send false pieces basically. And once we've done this, we are ready with the data preparation. And here we have different structures, we have what we call the one-dimensional PIRDA structure where you have blobs which are extended and committed to, or we have the two-dimensional structure where you have blobs which are extended in two-dimension. If you look carefully, you see that actually this is also two-dimensional. The difference is that it's not extended by code in this dimension, and that has consequences. Okay. And then you just have to sample. It sounds easy, right? The question is where you are sampling from and how you are is. And then you just have to sample. It sounds easy, right? The question is where you are sampling from and how you do this sampling. So I wasn't saying anything about that. So let's just see that. So we were typically speaking of DAS, but itA-S, but it's actually a D-A plus an S. So there is the D-A part, which is a property of the network. It's a global property. The data is available. The data is not available. And to have the data available in the system, I have to disperse it in the system. So I have to send it out, different pieces to different nodes, which are custodying it, and only then I can start sampling. So this is the DA part. It's ensuring data ability, it is doing the sharding, nodes are only receiving pieces. And then there is the sampling part. Once the data is in custody at different nodes, you can start test sampling, asking for pieces and getting pieces. Little graphical representation. So you have a builder. It does, in the example I was doing it with the 2D encoding. So it does the 2D encoding. And then you have those beacon nodes in the system. And what you are doing, you are sending out these things, pieces of this, actually rows and columns, to different nodes. Someone is getting only two rows and two columns. Someone is getting a bit more. Someone is getting the whole thing because he's learning hundreds of validators and he's securing so much money, but it's better for him to get the whole thing. Plus, doing that, he can support the system. That's one side. That's just the data availability. Now you have to sample, and that's an individual node, and actually every individual node has to sample. That's his own view. So he's picking a few pieces, and then asking for those from custody. So you're getting this piece from here, that from there, that from there, and if it manages to sample, then things are good. Okay. So we have this kind of two sides of things, and you can see that on the network these are a little bit different. And we need them to behave differently. So we need them to be secure, robust, fast, and cheap. And we need sampling to be secure, robust, fast, and cheap. So we need the same properties for the two things. It's just different protocols. Okay. So I was saying that there is PIRDAS and there is FURDAS. And there is a difference in the data structure. Actually there are a number of differences. If you scan the QR code, we have a light up on FURDAS which is starting from PIRDAS. And some of these techniques apply to PIRDAS, some of them to FURDAS. So what you see here is PIRDAS. Some of these techniques apply to PIRDAS, some of them to FURDAS. What you see here is PIRDAS. In PIRDAS, you have one-dimensional extension, and that also means that when you are sampling, you cannot just get a small piece because it would not tell you probabilistically enough. You have to get a full column. So your sample is a column. Which is more data than a cell that you can sample if you have this two-dimensional extension. But to make this work, you need changes in the networking, changes in the other coding, changes in many things. So, let me just give you a comparison of PIRDAS and FURDAS. Actually let me go back and just go through this list. So we have different techniques here. The first category is about sampling, it's about how I'm selecting what to sample, how I'm selecting those small pieces that I will be looking for. Then the second category is about erasure coding and the using of the erasure coding. Because what I can do is if the data is erasure coded, I can use that as part of the transmission. So when I'm sending the data, I can already use the erasure coding to recover data and then send it on to others. And that's a very important property. We are always using erasure coding when we are doing transmission, actually. Take your mobile phone, take whatever. Here, we can use erasure coding very efficiently to send data. And when we are sending a row, we can use erasure coding very efficiently to send data. And when we are sending a row, we can take a piece and we can send it on a column. And when we have received half of a row, we can extend it to the whole row, for example. And then there are techniques which are protocol changes. And I'm not going through all of them. Just showing you the comparison. So peer-to-peer and foo-to-foos. One is cheap, robust, fast and secure. And the other is cheaper, more robust, faster and more secure. And I was inclined to stop here. But let's see a little bit more in detail. Okay. So FooDAS is cheaper. It's cheaper because it's sampling at the cell level. And the cell is 512 bytes, actually it's 590 bytes because it's the data plus the proof, which is still much smaller than a whole column. So whenever we are so the sampling becomes cheaper. It's more robust. Because it can do a little coding which is allowing what I was calling local repair. But what local means is important to understand. Local means in the sense of the code, in the sense of, let me go back here, in the sense of this square. So I only have a few data, I only have half of a column, and I can repair something. I can generate these. I have these and I can generate these. I have half of the pieces here and I can generate this one. I cannot do that up there. And that's a huge difference. That's a huge difference because that means that every single node can do repair. Not just big nodes who are having old data, but every single node. Actually in that construct we don't need big nodes, which by the way we have in Ethereum, so it's not like we don't have them. But we are not relying on them doing the repair in the data structure. And it's faster. Why is it faster? Because we are working on new protocols, both for the PubSub part and both for the sampling part. So and yes, is it more secure? So the light size is still in research. Should it be more secure actually when this is secure enough that we are changing to this? Actually it doesn't. It shouldn't be less secure. That's still in research. This is still in specification and changing. So let me not do more comparisons, because this will change. And actually, this is still in changing. OK. Sorry. Last slide. As I said, it is not just PIRDAS and FURDAS, but we can go beyond. So, I will show you one slide. slide. As I said, it is not just but we can go beyond. we have to rethink the data flow. So how is this data flow? What is happening? about is what is happening in the consensus layer. So, when a block is generated, how that is distributed. But actually, these blobs are coming as transactions from the world. So let's say one blob is coming in. And that goes to the execution layer. And the execution layer has the mempool, and the mempool is redistributing this, sharing this blob between nodes. Actually when it's blobs, it's not shared like when it's normal small transactions. When it's normal small transactions, these transactions are pushed to others. When it blobs, it's just information that I have this blob and then the others are pulling it. Overall effect, these blobs are spreading in the network. Let's see there's another blob that's spreading also in the network. block proposal. block proposal. block proposal. network. network. block proposal. the data is being distributed. the data is being distributed. the data is being distributed. the encoded version is getting spread in the consensus layer. I think you start to see the problem. It's not the problem, it's an inefficiency. We have an inefficiency in the system in which we are distributing the data here, and then we are actually redistributing the data there. And it's the same data. So we can optimize it a lot, which is good. We need these optimizations for Moore's law to happen. So we found another place where we can save a lot and have that curve go up. So what you can do in first instance is that we do the distribution up there, but whenever the node is realizing that he has a piece, he would need this and this, but he has the blobs, he has the data. He can just pop it up from the execution client. Because the data is also there. Now, of course, this plot is optimistic. Because I was putting this wide blob almost everywhere. And I was putting this red blob almost everywhere in the network. In reality, you don't have every blob everywhere in the execution layer. Because if we could do that, like now you actually have, but when we are scaling, we will not be able to do that. Because if we can do that, then we don't have a problem. Every node can handle all the data which is there. We want to scale beyond that. So in reality, you would have some blobs here, some blobs there, some blobs there. But then when you do that distribution, data can pop up. And that's one of the optimizations. Of course, that's a simple one. We can do much more complex ones where we have interaction between the two, and we are avoiding some of these duplications. But for that, we have to rethink the networking stack here, which is that P2P, here, which is using the P2P. We are doing different kinds of gossiping distribution. This is using GossipSub. This is using other protocols. So this is all. This can all be the same. But that's just that, I think. Okay. Thank you. . Okay. We are now here to take some Q&A. So starting from the top, there are blob fees that play blah, blah, blah, blah, blah, blah, Thank you. Thank you. We are now here to take some Q&A. Starting from the top. There are blob fees that play blah, blah, blah. There are blob fees that pay for blob storage. Is this verified at all? What happens if I get the fee but don't actually store the blob data? Okay. BROP fees that pay for BROP storage. I don't know. Yeah. I'm not sure what the question is about. No worries. Question answer, question asker if you have the question still. Yeah. Fair to clarify. Clarify. But I'll mark this as answered. What are standing problems that hinder current adoption? Okay. So it's just in implementation. So the peer-to-peer is in implementation. It's still compatibility problems. Peer-to-peer will be here very soon. It will not be in Paxra, but it will be in the next fork. For full-to-peer, we still have to figure out many things. So there was a question mark on the security, and we have to work on that to make sure that there are no issues with that. Well, why do we need 2D erasure coding? Can't we put all of the points on one polynomial and extend it? So putting them all on one point and extending it would be one huge code, one dimensional, and that would be very bad for repair in the sense that then you can only repair if you have all the data. And that just means that you have difficulties repairing. The 2D has this advantage of having this code local repair property so that the code distance is small, and you can repair pieces. So you can repair a single column, you can repair a single row, and that gives nodes the possibility to start repairing and contributing even before getting the full data. If full DAWs is way better than pure DA DOS, why not choose to implement it in the first place? As I said, peer DOS itself is changing. We were thinking of techniques for full DOS, then we realized they are actually applying to peer DOS, and then they are part of peer DOS. We're still changing that. Full DOS is still much more research. changing that. It's a simple fact of life that there's too much PC still moving in full DOS to implement that. It seems full DOS approach are much, much better than pure DOS. Should we skip pure DOS and implement the full DOS? Is there any other trade-off? off? So, is using many things that PIRDAS does. So skipping is not a direct skip. It's not fully different. It's giving the 2D Azure core. It has advantages, but it has also complexities. Whether we skip or not might save something on the implementation. When we are introducing it, it might also skip something, but we are losing also the graduality. So overall, I'm not sure it's good to skip, but we can think of it once FUDAS is fully evaluated. What are open research questions? So the open research questions is, so the last slide was fully open research question, or everything around that. On full DAS, there are several questions around the sampling, how we can do the sampling safely in the sense that Sibyl nodes cannot attack the system, for example. So there are still things to clarify there, especially attacks, network partitioning, and other things, how they affect security. There's some fighting and upvoting going on. This is fun. If 2D is better, would there be any benefits of 3D? Absolutely. Multidimensional encoding is fun, but no. Actually not. So we just need the local repairability property, and we have enough of that in 2D. 3D would not contribute to that too much. Can full DOS be implemented progressively? I think so. But we didn't define the plug list. So it's something to think of. But we didn't define it yet. Cool. And then I think this question was already answered. I think we already had that. Right. Yeah. Unless someone's aggressively or I just forgot to mark as answered. Cool. So we have a minute and 34 seconds left. Oh, I was actually spending time. Yeah, no, but so I guess there's one piece to this. You can give him feedback on the QR code, so we'll give you some time to tell him how great he did. Collect a card. But other than that, I think our next session, well, I guess...", "eventId": "devcon-7", - "slot_start": 1731496200000, - "slot_end": 1731496800000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/12nsOv8WsOMt_04w0HJeyZq7caYnELYCEfrMGbVYyAGM", - "resources_slides": null, + "slot_start": 1731575400000, + "slot_end": 1731577200000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1lz7gYMVKQCLb5914Y9OWEh4uWk8dcQ8g132fAtGQIuQ", + "resources_slides": "https://drive.google.com/file/d/1zjFRZKqxjPOuJJrQZt0scUJHCnxRlg3w/view", "speakers": [ - "max-hampshire", - "med-amor" + "csaba-kiraly" ] }, "vector": [ @@ -367153,7 +366136,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -367514,10 +366496,9 @@ 0, 0, 0, - 6, - 6, 0, 0, + 6, 0, 0, 0, @@ -367926,7 +366907,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -367965,6 +366945,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -368016,7 +366997,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -368040,6 +367020,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -368048,7 +367029,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -368256,8 +367236,7 @@ 0, 0, 2, - 0, - 0, + 2, 0, 0, 0, @@ -368467,12 +367446,11 @@ 0, 2, 0, + 2, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -368485,53 +367463,45 @@ }, { "session": { - "id": "from-peerdas-to-fulldas-towards-massive-scalability-with-32mb-blocks-and-beyond", - "sourceId": "EVSLDH", - "title": "From PeerDAS to FullDAS: towards massive scalability with 32MB blocks and beyond", - "description": "PeerDAS is expected to be one of the most interesting improvements of the Pectra hard fork, enabling long-awaited sharding on Ethereum, unleashing L2 scaling.\r\n\r\nPeerDAS is however just the start with up to 1-2 MB of blob space per slot. We look into the techniques jointly developed by our Codex Research Team and EF researchers to improve this by orders of magnitude, targeting 32 MB (and beyond) of data availability space.", - "track": "Core Protocol", + "id": "from-web2-security-with-love", + "sourceId": "VYEKSS", + "title": "From Web2 Security With Love", + "description": "Web3 organizations often rely on Web2 for infrastructure, communications, and development, yet their Web2 security posture is often neglected. This leaves them vulnerable to a wide range of adversaries, from well-funded sophisticated attackers to opportunistic script kiddies. In this talk,Joe Dobson will share hard-earned lessons from the Web2 trenches that can help secure Web3.Don’t make it easy for the adversary. Learn from the past: strengthen your Web2 security to safeguard your Web3 future.", + "track": "Security", "type": "Talk", - "expertise": "Expert", - "audience": "Research", + "expertise": "Beginner", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Danksharding", - "DAS", - "Scalability", - "fulldas", - "Danksharding", - "DAS", - "Scalability" - ], "keywords": [ - "PeerDAS", - "FullDAS" + "Intelligence" + ], + "tags": [ + "Security", + "Hacks", + "intelligence", + "Hacks", + "Security" ], - "duration": 1441, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "b2b9cd3ea5ae963a562ed64dde0eca9d48ca6dbb93cd638d5a55c9e0f287485f", + "sources_youtubeId": "s-8nDKk_kkM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736cc799dbb7a90e18c1d03", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736cc799dbb7a90e18c1d03.vtt", - "transcript_text": " Hi, hello. So, yes, I have a long title. So I will be speaking about data sampling. And it's a long title and I didn't want to make it longer, but actually I have to make a small remark, it's not even blocks. It's data space that we are speaking of. So let's see who we are first. I'm Chaba, I'm from the research team from Codex. We are working on decentralized storage network, providing high durability guarantees. And our research team is also working with the EF on data sampling and also the client teams. OK, so I think you are all familiar with this chart. You have seen it like 10 times during this DEF CON, I think you are familiar with this chart. You have seen it like ten times during this DevCon, I suppose. We had the IP 4844 and the transaction fees on L2 went down, right? And the right side we see those little, little ones. That's very good. That's very nice of 4844. But I hear that we have a problem. And yeah, to see that problem, I have to zoom in. Sorry. So if we zoom in, we see this problem. So there is a slight increase in L2 transaction fees. And actually, there's a problem. L2 transaction fees are driven by blob space, by data space, and that's running out. problem, and if that happens, prices can rise fast. data availability solutions, in codex we are also building one. speak of the Ethereum data availability solution, so we should scale that Ethereum data availability solution. a little bit behind these numbers. What you see here, maybe you have already seen this one as well, this is the daily average size of blocks on the network. And you see that it was rising and then after the merge it started to rise and then with 4844 it reduced. So this looks fine, right? Now you see this less often. This is the problem. So this is what our node operators are seeing. This is the blobs, so this is the blocks and the blobs cumulative plot together. So this is the data amount that they are seeing. And now we can start worrying, right? There is more data to handle. But we are scientists. We can look at this differently. So let's see it on a log scale. And then it's not that bad, actually. Just like Moore's law. It's actually a different exponent, but we are better than those CPU guys, so why would we? Okay, we just have to do that little technology evolution which keeps this running. And then we have a solution, right? We can draw that line. We just need more Bob space. And that's a plan. And here's a plan. We need to build this and then forward this. And we just have to be a bit lousy about the scale. So it's an unknown scale here and an unknown scale there. And then we solve the problem. What we want is more Bob space and what we want is less node traffic for our operators. We just have to work out these little networking details. So let's see those little networking details that we have to work out. But first, let me have a quick detour, networking. But this is, I was speaking of average box size till now. But average was a daily average. And in the day many things happen, right? Every 12 seconds something is happening. So what you see here is the tail distribution of block size during a long period. You don't even see the small blocks, you just see the large ones. And this was before 4844. And what you see that there were blocks beyond two megabytes. And our nodes have to handle those. What if two blocks of such come one after another? And you have to handle that. So our technology has to handle these things as well. And we were measuring whether you can handle that. And the good news is that you can handle that. So the network was running and live. And what you see here is for different block sizes. These are the bins. Small up to 32 kilobyte blocks. And then between one megabyte and two megabytes. How much time nodes need to get the blocks. And the bigger the block, the more time is needed. This is a distribution. So the nodes which are receiving it only here. and there are many nodes which are receiving it before. Almost everyone is receiving it before four seconds. So things are fine. But clearly it doesn't scale very well, as you see. If we want to increase the block size, then it would go to the right. So we arrive to our point how to scale layer one from the networking side. things. change the slot time, the slot timers, we can change resource requirements. It's a good discussion. It is a bit controversial. We have small gains. We have a lot to lose. So let me not go into that one. Then we can change the gossip sub, the protocol, the networking protocol. We can do improvements on gossip sub for large messages. And this is in the making. It's important. But it's not my talk today. And it's relatively small gains compared to what we need. Still important. Then we can do sharding and data with sampling. And this is my talk today. This is large gains, I hope. And then there's the fourth thing. We can do distributed block building. This is kind of intertwined with many things, but this is large gains. But basically, that would mean re-syncing the whole data flow of the blob transaction from submission of the transaction until the end. Okay, so let's focus on point three, the sharding and data sampling over the network. So data sampling, let's just key concepts. So you need to prepare the data. We were speaking of blobs. There are blobs. There We were speaking of blobs. There are blobs. There's a package of blobs. And then we need an encoding. We need an encoding because we want to sample from it and for the sampling to be effective, we need a laser coding. For the laser coding, we need segmentation. So we need to segment it, chop it to pieces. We need to extend it with a laser coding, then we have to commit to it so that it cannot be, nodes cannot send false pieces basically. And once we've done this, we are ready with the data preparation. And here we have different structures, we have what we call the one-dimensional PIRDA structure where you have blobs which are extended and committed to, or we have the two-dimensional structure where you have blobs which are extended in two-dimension. If you look carefully, you see that actually this is also two-dimensional. The difference is that it's not extended by code in this dimension, and that has consequences. Okay. And then you just have to sample. It sounds easy, right? The question is where you are sampling from and how you are is. And then you just have to sample. It sounds easy, right? The question is where you are sampling from and how you do this sampling. So I wasn't saying anything about that. So let's just see that. So we were typically speaking of DAS, but itA-S, but it's actually a D-A plus an S. So there is the D-A part, which is a property of the network. It's a global property. The data is available. The data is not available. And to have the data available in the system, I have to disperse it in the system. So I have to send it out, different pieces to different nodes, which are custodying it, and only then I can start sampling. So this is the DA part. It's ensuring data ability, it is doing the sharding, nodes are only receiving pieces. And then there is the sampling part. Once the data is in custody at different nodes, you can start test sampling, asking for pieces and getting pieces. Little graphical representation. So you have a builder. It does, in the example I was doing it with the 2D encoding. So it does the 2D encoding. And then you have those beacon nodes in the system. And what you are doing, you are sending out these things, pieces of this, actually rows and columns, to different nodes. Someone is getting only two rows and two columns. Someone is getting a bit more. Someone is getting the whole thing because he's learning hundreds of validators and he's securing so much money, but it's better for him to get the whole thing. Plus, doing that, he can support the system. That's one side. That's just the data availability. Now you have to sample, and that's an individual node, and actually every individual node has to sample. That's his own view. So he's picking a few pieces, and then asking for those from custody. So you're getting this piece from here, that from there, that from there, and if it manages to sample, then things are good. Okay. So we have this kind of two sides of things, and you can see that on the network these are a little bit different. And we need them to behave differently. So we need them to be secure, robust, fast, and cheap. And we need sampling to be secure, robust, fast, and cheap. So we need the same properties for the two things. It's just different protocols. Okay. So I was saying that there is PIRDAS and there is FURDAS. And there is a difference in the data structure. Actually there are a number of differences. If you scan the QR code, we have a light up on FURDAS which is starting from PIRDAS. And some of these techniques apply to PIRDAS, some of them to FURDAS. So what you see here is PIRDAS. Some of these techniques apply to PIRDAS, some of them to FURDAS. What you see here is PIRDAS. In PIRDAS, you have one-dimensional extension, and that also means that when you are sampling, you cannot just get a small piece because it would not tell you probabilistically enough. You have to get a full column. So your sample is a column. Which is more data than a cell that you can sample if you have this two-dimensional extension. But to make this work, you need changes in the networking, changes in the other coding, changes in many things. So, let me just give you a comparison of PIRDAS and FURDAS. Actually let me go back and just go through this list. So we have different techniques here. The first category is about sampling, it's about how I'm selecting what to sample, how I'm selecting those small pieces that I will be looking for. Then the second category is about erasure coding and the using of the erasure coding. Because what I can do is if the data is erasure coded, I can use that as part of the transmission. So when I'm sending the data, I can already use the erasure coding to recover data and then send it on to others. And that's a very important property. We are always using erasure coding when we are doing transmission, actually. Take your mobile phone, take whatever. Here, we can use erasure coding very efficiently to send data. And when we are sending a row, we can use erasure coding very efficiently to send data. And when we are sending a row, we can take a piece and we can send it on a column. And when we have received half of a row, we can extend it to the whole row, for example. And then there are techniques which are protocol changes. And I'm not going through all of them. Just showing you the comparison. So peer-to-peer and foo-to-foos. One is cheap, robust, fast and secure. And the other is cheaper, more robust, faster and more secure. And I was inclined to stop here. But let's see a little bit more in detail. Okay. So FooDAS is cheaper. It's cheaper because it's sampling at the cell level. And the cell is 512 bytes, actually it's 590 bytes because it's the data plus the proof, which is still much smaller than a whole column. So whenever we are so the sampling becomes cheaper. It's more robust. Because it can do a little coding which is allowing what I was calling local repair. But what local means is important to understand. Local means in the sense of the code, in the sense of, let me go back here, in the sense of this square. So I only have a few data, I only have half of a column, and I can repair something. I can generate these. I have these and I can generate these. I have half of the pieces here and I can generate this one. I cannot do that up there. And that's a huge difference. That's a huge difference because that means that every single node can do repair. Not just big nodes who are having old data, but every single node. Actually in that construct we don't need big nodes, which by the way we have in Ethereum, so it's not like we don't have them. But we are not relying on them doing the repair in the data structure. And it's faster. Why is it faster? Because we are working on new protocols, both for the PubSub part and both for the sampling part. So and yes, is it more secure? So the light size is still in research. Should it be more secure actually when this is secure enough that we are changing to this? Actually it doesn't. It shouldn't be less secure. That's still in research. This is still in specification and changing. So let me not do more comparisons, because this will change. And actually, this is still in changing. OK. Sorry. Last slide. As I said, it is not just PIRDAS and FURDAS, but we can go beyond. So, I will show you one slide. slide. As I said, it is not just but we can go beyond. we have to rethink the data flow. So how is this data flow? What is happening? about is what is happening in the consensus layer. So, when a block is generated, how that is distributed. But actually, these blobs are coming as transactions from the world. So let's say one blob is coming in. And that goes to the execution layer. And the execution layer has the mempool, and the mempool is redistributing this, sharing this blob between nodes. Actually when it's blobs, it's not shared like when it's normal small transactions. When it's normal small transactions, these transactions are pushed to others. When it blobs, it's just information that I have this blob and then the others are pulling it. Overall effect, these blobs are spreading in the network. Let's see there's another blob that's spreading also in the network. block proposal. block proposal. block proposal. network. network. block proposal. the data is being distributed. the data is being distributed. the data is being distributed. the encoded version is getting spread in the consensus layer. I think you start to see the problem. It's not the problem, it's an inefficiency. We have an inefficiency in the system in which we are distributing the data here, and then we are actually redistributing the data there. And it's the same data. So we can optimize it a lot, which is good. We need these optimizations for Moore's law to happen. So we found another place where we can save a lot and have that curve go up. So what you can do in first instance is that we do the distribution up there, but whenever the node is realizing that he has a piece, he would need this and this, but he has the blobs, he has the data. He can just pop it up from the execution client. Because the data is also there. Now, of course, this plot is optimistic. Because I was putting this wide blob almost everywhere. And I was putting this red blob almost everywhere in the network. In reality, you don't have every blob everywhere in the execution layer. Because if we could do that, like now you actually have, but when we are scaling, we will not be able to do that. Because if we can do that, then we don't have a problem. Every node can handle all the data which is there. We want to scale beyond that. So in reality, you would have some blobs here, some blobs there, some blobs there. But then when you do that distribution, data can pop up. And that's one of the optimizations. Of course, that's a simple one. We can do much more complex ones where we have interaction between the two, and we are avoiding some of these duplications. But for that, we have to rethink the networking stack here, which is that P2P, here, which is using the P2P. We are doing different kinds of gossiping distribution. This is using GossipSub. This is using other protocols. So this is all. This can all be the same. But that's just that, I think. Okay. Thank you. . Okay. We are now here to take some Q&A. So starting from the top, there are blob fees that play blah, blah, blah, blah, blah, blah, Thank you. Thank you. We are now here to take some Q&A. Starting from the top. There are blob fees that play blah, blah, blah. There are blob fees that pay for blob storage. Is this verified at all? What happens if I get the fee but don't actually store the blob data? Okay. BROP fees that pay for BROP storage. I don't know. Yeah. I'm not sure what the question is about. No worries. Question answer, question asker if you have the question still. Yeah. Fair to clarify. Clarify. But I'll mark this as answered. What are standing problems that hinder current adoption? Okay. So it's just in implementation. So the peer-to-peer is in implementation. It's still compatibility problems. Peer-to-peer will be here very soon. It will not be in Paxra, but it will be in the next fork. For full-to-peer, we still have to figure out many things. So there was a question mark on the security, and we have to work on that to make sure that there are no issues with that. Well, why do we need 2D erasure coding? Can't we put all of the points on one polynomial and extend it? So putting them all on one point and extending it would be one huge code, one dimensional, and that would be very bad for repair in the sense that then you can only repair if you have all the data. And that just means that you have difficulties repairing. The 2D has this advantage of having this code local repair property so that the code distance is small, and you can repair pieces. So you can repair a single column, you can repair a single row, and that gives nodes the possibility to start repairing and contributing even before getting the full data. If full DAWs is way better than pure DA DOS, why not choose to implement it in the first place? As I said, peer DOS itself is changing. We were thinking of techniques for full DOS, then we realized they are actually applying to peer DOS, and then they are part of peer DOS. We're still changing that. Full DOS is still much more research. changing that. It's a simple fact of life that there's too much PC still moving in full DOS to implement that. It seems full DOS approach are much, much better than pure DOS. Should we skip pure DOS and implement the full DOS? Is there any other trade-off? off? So, is using many things that PIRDAS does. So skipping is not a direct skip. It's not fully different. It's giving the 2D Azure core. It has advantages, but it has also complexities. Whether we skip or not might save something on the implementation. When we are introducing it, it might also skip something, but we are losing also the graduality. So overall, I'm not sure it's good to skip, but we can think of it once FUDAS is fully evaluated. What are open research questions? So the open research questions is, so the last slide was fully open research question, or everything around that. On full DAS, there are several questions around the sampling, how we can do the sampling safely in the sense that Sibyl nodes cannot attack the system, for example. So there are still things to clarify there, especially attacks, network partitioning, and other things, how they affect security. There's some fighting and upvoting going on. This is fun. If 2D is better, would there be any benefits of 3D? Absolutely. Multidimensional encoding is fun, but no. Actually not. So we just need the local repairability property, and we have enough of that in 2D. 3D would not contribute to that too much. Can full DOS be implemented progressively? I think so. But we didn't define the plug list. So it's something to think of. But we didn't define it yet. Cool. And then I think this question was already answered. I think we already had that. Right. Yeah. Unless someone's aggressively or I just forgot to mark as answered. Cool. So we have a minute and 34 seconds left. Oh, I was actually spending time. Yeah, no, but so I guess there's one piece to this. You can give him feedback on the QR code, so we'll give you some time to tell him how great he did. Collect a card. But other than that, I think our next session, well, I guess...", - "eventId": "devcon-7", - "slot_start": 1731575400000, - "slot_end": 1731577200000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1lz7gYMVKQCLb5914Y9OWEh4uWk8dcQ8g132fAtGQIuQ", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "csaba-kiraly" - ] + "joe-dobson" + ], + "eventId": "devcon-7", + "slot_start": 1731574800000, + "slot_end": 1731576600000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1Q9J9HaQFEJ3SPx50bpp3xIlPzaHzn4kJ8ESPA0lnVoI", + "resources_slides": "https://drive.google.com/file/d/13qm_aUZUBrab3BRLDDAzhCPB86v5zVeM/view" }, "vector": [ - 0, - 0, - 0, - 0, 6, 0, 0, @@ -368895,12 +367865,11 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -369281,6 +368250,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -369344,7 +368314,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -369419,7 +368388,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -369536,8 +368504,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -369635,8 +368605,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -369859,49 +368827,49 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "from-web2-security-with-love", - "sourceId": "VYEKSS", - "title": "From Web2 Security With Love", - "description": "Web3 organizations often rely on Web2 for infrastructure, communications, and development, yet their Web2 security posture is often neglected. This leaves them vulnerable to a wide range of adversaries, from well-funded sophisticated attackers to opportunistic script kiddies. In this talk,Joe Dobson will share hard-earned lessons from the Web2 trenches that can help secure Web3.Don’t make it easy for the adversary. Learn from the past: strengthen your Web2 security to safeguard your Web3 future.", - "track": "Security", - "type": "Talk", - "expertise": "Beginner", + "id": "future-of-onchain-credit-scoring-for-farmers", + "sourceId": "BBEDYL", + "title": "Future of Onchain Credit Scoring for Farmers", + "description": "This talk will illustrate how a farmer's farm records alongside verified government issued ID and mobile money statements (M-Pesa) form the basis for anonymized real time credit scoring onchain, as a foundational layer to build unique farmer DIDs. This talk features Antugrow, a startup in Kenya re-imagining credit scoring and record keeping for farmers.", + "track": "Real World Ethereum", + "type": "Lightning Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, - "doNotRecord": false, + "doNotRecord": true, "keywords": [ - "Intelligence" + "Agriculture" ], "tags": [ - "Security", - "Hacks", - "intelligence", - "Hacks", - "Security" + "Identity", + "agriculture", + "Identity" ], "language": "en", "speakers": [ - "joe-dobson" + "eddie-kago" ], "eventId": "devcon-7", - "slot_start": 1731574800000, - "slot_end": 1731576600000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1Q9J9HaQFEJ3SPx50bpp3xIlPzaHzn4kJ8ESPA0lnVoI" + "slot_start": 1731580200000, + "slot_end": 1731580800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/143aux2LnIoaZxJqy3DpwpFTohgfllg9LWtuYzwx2v78", + "resources_slides": "https://drive.google.com/file/d/1GxWSJZ93c26iDreHhRckS-2svFb6dWJs/view" }, "vector": [ - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -370646,15 +369614,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -370701,6 +369660,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -370900,10 +369860,8 @@ 0, 0, 0, - 2, 0, 0, - 2, 0, 0, 0, @@ -371010,6 +369968,9 @@ 0, 0, 0, + 2, + 0, + 0, 0, 0, 0, @@ -371213,6 +370174,8 @@ 0, 2, 0, + 0, + 0, 2, 0, 0, @@ -371231,41 +370194,51 @@ }, { "session": { - "id": "future-of-onchain-credit-scoring-for-farmers", - "sourceId": "BBEDYL", - "title": "Future of Onchain Credit Scoring for Farmers", - "description": "This talk will illustrate how a farmer's farm records alongside verified government issued ID and mobile money statements (M-Pesa) form the basis for anonymized real time credit scoring onchain, as a foundational layer to build unique farmer DIDs. This talk features Antugrow, a startup in Kenya re-imagining credit scoring and record keeping for farmers.", - "track": "Real World Ethereum", - "type": "Lightning Talk", + "id": "fuzzing-zero-knowledge-infrastructure", + "sourceId": "QYWS83", + "title": "Fuzzing Zero-Knowledge Infrastructure", + "description": "Zero-knowledge (ZK) infrastructure is highly complex and highly critical for the correct operation of L2 chains; that is, a single bug can result in massive financial and reputational damage. To find such potential million-dollar bugs before they are exploited, we have developed a novel fuzzing technique that can find logic flaws that impact liveness or safety of ZK infrastructure. Our fuzzer has already found 16 such issues in four ZK systems, namely Circom, Corset, Gnark, and Noir.", + "track": "Security", + "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, - "doNotRecord": true, - "keywords": [ - "Agriculture" - ], + "doNotRecord": false, "tags": [ - "Identity", - "agriculture", - "Identity" + "ZKP", + "Zero-Knowledge", + "Security", + "Fuzzing", + "Testing", + "metamorphic", + "Fuzzing", + "Security", + "Zero-Knowledge" ], - "language": "en", - "speakers": [ - "eddie-kago" + "keywords": [ + "Metamorphic", + "Testing" ], + "duration": 1352, + "language": "en", + "sources_swarmHash": "bcfdc7603511f26b65425f9d655cd88325107f27d65cce5a4f2e3ada344d1489", + "sources_youtubeId": "c0lmPEE6qDM", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6736c85374749a4b89b3ae2c", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731580200000, - "slot_end": 1731580800000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/143aux2LnIoaZxJqy3DpwpFTohgfllg9LWtuYzwx2v78" + "slot_start": 1731641400000, + "slot_end": 1731643200000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1C0qMB9Xtv-bWWVg8T0URvn0L0LP0y88aiS1n8-LmL1U", + "resources_slides": "https://drive.google.com/file/d/173Qm3wPWejv68uN44W31lJVp2sFSdTzy/view", + "speakers": [ + "valentin-wustholz" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -371629,14 +370602,13 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -372015,6 +370987,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -372025,6 +370999,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -372060,7 +371035,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -372088,6 +371062,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -372256,6 +371231,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -372572,14 +371548,12 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -372596,56 +371570,56 @@ }, { "session": { - "id": "fuzzing-zero-knowledge-infrastructure", - "sourceId": "QYWS83", - "title": "Fuzzing Zero-Knowledge Infrastructure", - "description": "Zero-knowledge (ZK) infrastructure is highly complex and highly critical for the correct operation of L2 chains; that is, a single bug can result in massive financial and reputational damage. To find such potential million-dollar bugs before they are exploited, we have developed a novel fuzzing technique that can find logic flaws that impact liveness or safety of ZK infrastructure. Our fuzzer has already found 16 such issues in four ZK systems, namely Circom, Corset, Gnark, and Noir.", - "track": "Security", + "id": "gas-limit-and-block-execution", + "sourceId": "LPLSDD", + "title": "Gas Limit and Block Execution", + "description": "The talk will focus on scaling L1 through the gas limit, with special attention to block execution, covering challenges and planned solutions. Topics include an overview of control over the gas limit, the current state of execution performance, and hardware comparisons. Key challenges will also be discussed, such as slot organization, state growth, and worst-case scenarios, including gas pricing issues.", + "track": "Core Protocol", "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Stakers/Validators", "featured": false, "doNotRecord": false, "tags": [ - "ZKP", - "Zero-Knowledge", - "Security", - "Fuzzing", - "Testing", - "metamorphic", - "Fuzzing", - "Security", - "Zero-Knowledge" + "Core Protocol", + "Layer 1", + "Protocol Design", + "execution", + "layer", + "Core Protocol", + "Layer 1", + "Protocol Design" ], "keywords": [ - "Metamorphic", - "Testing" + "gas limit", + "block execution", + "Execution Layer" ], - "duration": 1352, + "duration": 1385, "language": "en", - "sources_swarmHash": "bcfdc7603511f26b65425f9d655cd88325107f27d65cce5a4f2e3ada344d1489", - "sources_youtubeId": "c0lmPEE6qDM", + "sources_swarmHash": "77fa8da66a9f37c09763c3b438b62b7147685a96d8a29d1e7a7f4a7149776cb5", + "sources_youtubeId": "L10eJJoTJB4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736c85374749a4b89b3ae2c", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6736c52f9dbb7a90e1893847", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736c52f9dbb7a90e1893847.vtt", + "transcript_text": " Marek. Test, test. Okay. Hi, I'm Marek, Ethereum Curve developer from the Nevermind team. Today I'm going to focus on gas limit and block execution. There are some voices saying that we've either given up on L1 execution or we need to move Ethereum to data centers and eliminate solo stakers to make Ethereum successful. I disagree with all of these statements. I don't want to focus here on suggesting a specific gas limit value. Instead, I want to show that there is a significant untapped potential on execution layer, and this potential will have to be unleashed on some point of time. At the same time, I will point out areas where we need to be careful considering increasing gas limit and i will mention challenges and potential solutions all right let's begin so in 2021 elon musk was interested in cryptocurrency and claimed to know how to scale blockchains he mentioned that he discussed with dogecoin devs to increase block sizes by 10x and decrease slot time by 10x, which would supposedly scale Dogecoin by 100x. Sounds easy, right? Just change one or two variable and we are done. So can we do the same for Ethereum? Increase gas limit 10x and reduce slot time 10x? Yes, we can, but of course it's not that simple. Gas limit is a powerful variable. Increasing it leads to more transactions, more applications, generally greater crypto adoption. However, it has to be adjusted carefully because it might impact decentralization and security. adjusted carefully because it might impact decentralization and security. So there are a few areas when we need to consider when increasing gas limit. This usage, this stems from two things, history grow and state grow. History grows much faster than state. However there is a straightforward path to remove it with EIP for force. With the current gas limit, state doesn't seem to be significant concern. However, we need to observe it closely with potential increases. Networking, so larger blocks are more challenging to propagate and allow for more transactions. So potentially it will increase load on mempool. Other things, so increasing gas limit effects, syncing time, RPC, proof sizes for the future light clients, size of archive notes. However, today I want to focus on block execution a little bit with isolation from other topics. Block execution is the main contributor to other hardware requirements such as CPU or disk speed. Okay, but first of all who controls gas limit? So validators of course can vote for the desired gas limit. However the block gas limit cannot change, cannot increase or decrease by more than 1 over 1,024 of its parent block gas limit. So whenever you propose a block, you can move Ethereum in your desired direction. In other words, if you think that 35 million is the correct gas limit for Ethereum, you can set this value in the config and in every block you will move slightly in this direction. However, other validators can counteract it, bringing it back to 30 million. And this fact was used in EIP proposed by Giulio from Eregon team. He suggested modifying clients in the way that there is very small increase in every block. Thanks to that, we have a better control what's going on in the network with increases. So the control over gas limit can be divided into categories, local block building and external block building. Approximately nine percent of validators are local builders. In this case, execution client teams can set the default gas limit in the config, which can be overridden by node operator. Most of the blocks are externally produced, around 91%, and in this case, consensus client teams can set the default gas limit, which can be overridden by validators. However, this is not enforced. So builders should follow this, but they do not have to. So we can say that builders have the final control over this value. So, I have seen Twitter conversation about increasing gas limit and that we should increase hardware requirements or we should make EVM much faster and parallel and scale L1 in this way. Of course, we should try to optimize things as much as possible there are many reasons there are many reasons to do so future potential of l1 l2s rpcs however let's analyze the block execution performance on my mini computer intel nac 11. so as you can see on the charts this device can execute blocks with the average speed around 330 megagas per seconds, and blocks are being validated in just 50 milliseconds on average. So if we consider higher hardware requirements, let's take a look at a much more expensive machine, four times more expensive, used by one of my colleagues. four times more expensive used by one of my colleagues. And as you can see, blocks are being executed almost three times faster with the average speed 950 megagas per second, and execution time is around 17 milliseconds. So at first glance, the discussion about hardware requirements seems relevant. Many computers are three times slower than the high-end consumer machine. And there are many machines in between that could be considered. However, let's pause for a moment. Ethereum slot time is 12 seconds, and we are talking about average execution in just 50 milliseconds. So what's the point of using much higher hardware when we are not utilizing slower machines? So this suggests huge potential for mainnet, and however, we know that if everything would be that easy, we would already have much higher gas limit. Earlier, I mentioned that I'm talking about block execution a little bit in isolation from other topics. And those other things are more concerning, like state grow, history grow, networking aspects. However, even in block execution, there are challenges. First of all, if we have 12-second slot time, it doesn't mean that we have 12 seconds for execution. This is an obvious thing. And even if we ignore networking aspect entirely, nodes need to be able to catch up with the tip of the chain. What's more, RPC providers must be able to execute, process new blocks while processing heavy if calls and other requests. And lastly, there is a consensus mechanism. So let's take a look at the diagram on the slide. Ethereum consensus requires strict timing for block execution. The attestation deadline is four seconds, however, ELs have much less time to process new blocks. First of all, there is lightweight validation on CL side and blocks need to be propagated across the network. Moreover, blocks often aren't revealed at the beginning of the slot due to timing games. And the solution to this is to reorganize Ethereum slots in such a way to allow for more time for execution. We have two solutions under consideration, EPBS, enshrined proposer builder separation, and APS, att Proposer Separation. While the main goal of those changes is related to block building and MEV space, there is additional nice benefit on the execution side that we will have much more time for execution. I will use EPBS as a primary example. However, both solutions will reorganize the slot in a more optimal way. So, in EPBS, attestation deadline is set at third second. However, attesters will have even up to 11 seconds to execute blocks without affecting consensus rules. So this is very useful in context of worst cases. Okay, other thing that we should consider is, of course, state growth. So with the current gas limit limit state doesn't grow rapidly. In the last three months it has increased four gigabytes in a Nethermite database. If you want to read more about state grow there is an excellent research from the Rev team. However the worst cases of most of the operations is essentially function of block size. So the bigger the block, the more operations you can fit in. However, in case of state, we need to be mindful of state database as well. So in other words, the bigger the tree gets, the slower state access may become. So clients spend a lot of time trying to optimize things related to state. So here is an example from Nethermind, and operations are dominated by Sload and SSStore. Another example from Bezu bezel and similar situation and ref seems to be occupied by state root calculation and other thing related to state is that we know that we will eventually need to transition to another tree either vertical or something post-quantum secure if there is a sudden breakthrough in research. So we know that this transition process will be complex, and it will be easier with a smaller tree. So we need to be careful to not make this task too difficult with increasing gas limit too aggressively. However, everything needs to be put in context of data and as Julia from Erigon team has shown, even if we double gas limit and ship Verkul in four years, the transition will take additional 13 days, which doesn't sound like a very bad solution. Long-term solution for Verkulz is of course state expiry and recently Vitalik wrote the blog post about it. Block execution is strictly related to gas pricing. Gas pricing represents the time it takes to execute the blocks and other resources required to do so. So we need to take into account contribution to state grow and history grow. And recently Vitalik proposed that we should increase gas prices for hashing functions as blocks that are heavy with hashing are harder to prove than average blocks. with hashing are harder to prove than average blocks. So we benchmark different operations and you can see it on the table on the slide. We filled the block entirely with one of the operations and as you can see the differences in gas pricing can be substantial. For example if we fill the block with repmd precompile, it executes with the speed 1 giga gas on a very slow machine. If we do the same with simple if transfers, the speed is around 700 mega gas per second. However, other operations can be even slower. EC recover, 80 Mbps. Point evaluation precompile, 65 Mbps. So this is important because the slowest operation should be treated as a bottleneck for increasing throughput rather than relying solely on average execution. So let's talk more about worst cases. Client teams are improving and being better and better on executing average transactions. However, Ethereum security must account not just for average case, but for worst cases as well. And by worst cases, I mean situation when optimistic parallel EVM encounters as many conflicts as possible, when caches are being missed, when attacker found some slow operations in EVM, and crafted the block in the way to disrupt the network. So let's take one more look at execution times of my Intel NAC11. As you can see, the performance on different blocks can vary significantly. I marked a few spikes with red rectangles. So, the average execution is 50 milliseconds, but the maximum is 425 milliseconds. The known worst case in all execution clients that are observed on real networks are mining contracts. So those contracts work in this way that they are doing a lot of storage writes and sets values to zero. And by setting values to zero they are getting refund and thanks to that they can feed the block with excessive state access so our team member ban proposed an EAP to prevent such situation EAP-7778, prevent block gas smuggling. So, slow blocks can affect consensus. If attesters won't be able to meet attestation deadline, it might impact, they will miss attestation and it will impact chain health. What else? Slow blocks can affect block production, so the timeout may occur and it might impact aliveness of the chain. And of course, the slower the operation, the more probability for DDoS vulnerabilities. And I'm not saying that we have an immediate concern here. We need to analyse our worst cases, we need to understand them, we need to optimise them, we need to reprice operation that could be a bottleneck for increasing throughput. The long-term solution for gas pricing issue could be a vital proposal for multi-dimensional gas pricing. All these challenges are important, but there is other side of gas limit. Gas limit hasn't been increased for a long time. The last increase was done in April 2021 from 12.5 to 15 million. The increase from 15 to 30 was related to introduction of base fee mechanism, so while gas limit is higher, the gas target is still the same. In the last three years, hardware improved, so it could be an argument to increase gas limit. However, even stronger argument is that Ethereum client software improved massively. So, to summarize, taking disk space aside, which will be solved with EIP for force? I don't think that the discussion about hardware requirements is relevant. Ethereum can be run on mini computers and it will stay in this way even if we increase gas limit a little bit. I see challenges. We cannot forget about state grow, about history grow. We need to reorganize the slot. We need to migrate to another tree. However, all of them seem to be solvable and are planned for in Ethereum roadmap. We need to ask ourselves what data we need, what other problems we need to solve, what the correct gas limit should be. what other problems we need to solve, what the correct gas limit should be. I don't think that we've given up on L1 execution, and execution is not a bottleneck for scaling L1. Of course, I agree strongly with Vitalik's vision that Ethereum mainnet should be a strong base layer, and definitely it shouldn't take hyperscaling approach or make any shortcuts. All the improvements in the clients and in protocol as well will benefit L2s, allowing them to take hyperscaling approach and use the full potential of EVM, while Ethereum L1 should stay to be a strong, secure, and decentralized base layer. And that's all I want to say. Thank you, everyone. Okay, thank you, Marek, for a great summary of all the factors that can affect gas limit increase. So we have extra time. Let's go through the first question. Please scan the QR code here. Go to the bottom right. You'll see the live Q&A button there. Click there. You can add questions. You can upvote. We'll go by the most upvote race. First question, what do you think about Reth saying they are fastest? Is this relevant? That's a tough question. So, actually, I discussed it with Georgios yesterday, and he got the same question on his presentation, and he said that actually, Nettermind is the fastest now, and they are kicking our asses, so we need to work harder. And yeah, that is my comment about this. I hope it makes sense. I run Nettermind on my validator. Will parallel EVM ever become relevant for L1 nodes? I think so, but I think it will take time. So if we have multidimensional gas pricing that controls state grow, we could allow for more execution with parallel EVM. What's more, parallel EVM might be important if we kept the transaction, so we will allow, for example, only 30 million gas transaction, and then worst cases are better, basically. So, yeah, I think maybe on some point of time. Okay, the next question is, what's your opinion on pipeline state routes, state routes being calculated with one block delay? I think it makes sense, but I'm not sure if for L1, for L2, yes. But for L1, I'm not sure about consequences for light clients. Next question is, what tooling does NetherMind use to measure and benchmark mega-gas per second on a node if the node is not in sync with the chain? So, if the node is not in syncing with the chain, I'm not sure about this, but generally NetherMite uses the tooling that monitors new payloads, so we measure the time it takes to execute new payload in NetherMite. And once we have time and we know how much gas was used in this block, we can calculate the speed. Maybe you can use for that. Not sure. Next question would be, why gas per second if each operation have different gas use? Did you think opcode per second makes more sense? Why gas per second if each operation... No. I think there is a problem with gas pricing. So ideally, gas pricing should represent the correct resource usage, but we have, as I showed in this presentation, we have operation that doesn't follow that. So, yes. I think we should still use megagas per second, but we should maybe improve our gas pricing in EVM. Cool. We have about three more minutes, so if you guys have more questions, please feel free to submit. But the last question we have here is, what do you think about increasing time spent on signature aggregation to allow faster finality instead of gas limit increase? Oh. I don't know. Sorry for not answering this question. Signature aggregation. I have no opinion. Cool. We have two more minutes left.", "eventId": "devcon-7", - "slot_start": 1731641400000, - "slot_end": 1731643200000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1C0qMB9Xtv-bWWVg8T0URvn0L0LP0y88aiS1n8-LmL1U", - "resources_slides": null, + "slot_start": 1731566400000, + "slot_end": 1731568200000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/17JZL3bUgGRPxJs5ybdBTY70V_NqPo7xH7Sc7QI5zw5A", + "resources_slides": "https://drive.google.com/file/d/1xDBhEUsI5mphjG5uLtmoZfhZp3s_KwAq/view", "speakers": [ - "valentin-wustholz" + "marekm25" ] }, "vector": [ - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -373392,9 +372366,6 @@ 0, 0, 0, - 6, - 6, - 0, 0, 0, 0, @@ -373404,14 +372375,14 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, + 6, 0, 0, 0, + 2, 0, 0, 0, @@ -373438,6 +372409,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -373467,7 +372439,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -373587,6 +372558,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -373636,7 +372608,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -373953,19 +372924,17 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -373977,48 +372946,45 @@ }, { "session": { - "id": "gas-limit-and-block-execution", - "sourceId": "LPLSDD", - "title": "Gas Limit and Block Execution", - "description": "The talk will focus on scaling L1 through the gas limit, with special attention to block execution, covering challenges and planned solutions. Topics include an overview of control over the gas limit, the current state of execution performance, and hardware comparisons. Key challenges will also be discussed, such as slot organization, state growth, and worst-case scenarios, including gas pricing issues.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Stakers/Validators", + "id": "gas-metering-comparing-appchain-rollups-vs-general-purpose-rollups", + "sourceId": "KXFHXJ", + "title": "Gas Metering: Comparing Appchain Rollups vs. General Purpose Rollups", + "description": "General purpose rollups, with all applications running in the same virtual machine, face specific constraints in their gas metering systems that appchain rollups do not.\r\n\r\nIn this lightning talk, we'll explore the differences and the design freedom in gas metering when your application isn't in an adversarial setting, avoiding potential attacks from newly deployed code. Discover the benefits and challenges of customized gas metering in appchain rollups.", + "track": "Layer 2", + "type": "Lightning Talk", + "expertise": "Expert", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Core Protocol", - "Layer 1", - "Protocol Design", - "execution", - "layer", - "Core Protocol", - "Layer 1", - "Protocol Design" + "Gas", + "Appchains", + "Mechanism design", + "metering", + "Appchains", + "Gas", + "Mechanism design" ], "keywords": [ - "gas limit", - "block execution", - "Execution Layer" + "Metering" ], - "duration": 1385, + "duration": 355, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "b2ee4a847959a118c5bafdece36856cc369872f4faa3be0a46189efad3c358a8", + "sources_youtubeId": "IsMxuWWsH_g", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736c52f9dbb7a90e1893847", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736c52f9dbb7a90e1893847.vtt", - "transcript_text": " Marek. Test, test. Okay. Hi, I'm Marek, Ethereum Curve developer from the Nevermind team. Today I'm going to focus on gas limit and block execution. There are some voices saying that we've either given up on L1 execution or we need to move Ethereum to data centers and eliminate solo stakers to make Ethereum successful. I disagree with all of these statements. I don't want to focus here on suggesting a specific gas limit value. Instead, I want to show that there is a significant untapped potential on execution layer, and this potential will have to be unleashed on some point of time. At the same time, I will point out areas where we need to be careful considering increasing gas limit and i will mention challenges and potential solutions all right let's begin so in 2021 elon musk was interested in cryptocurrency and claimed to know how to scale blockchains he mentioned that he discussed with dogecoin devs to increase block sizes by 10x and decrease slot time by 10x, which would supposedly scale Dogecoin by 100x. Sounds easy, right? Just change one or two variable and we are done. So can we do the same for Ethereum? Increase gas limit 10x and reduce slot time 10x? Yes, we can, but of course it's not that simple. Gas limit is a powerful variable. Increasing it leads to more transactions, more applications, generally greater crypto adoption. However, it has to be adjusted carefully because it might impact decentralization and security. adjusted carefully because it might impact decentralization and security. So there are a few areas when we need to consider when increasing gas limit. This usage, this stems from two things, history grow and state grow. History grows much faster than state. However there is a straightforward path to remove it with EIP for force. With the current gas limit, state doesn't seem to be significant concern. However, we need to observe it closely with potential increases. Networking, so larger blocks are more challenging to propagate and allow for more transactions. So potentially it will increase load on mempool. Other things, so increasing gas limit effects, syncing time, RPC, proof sizes for the future light clients, size of archive notes. However, today I want to focus on block execution a little bit with isolation from other topics. Block execution is the main contributor to other hardware requirements such as CPU or disk speed. Okay, but first of all who controls gas limit? So validators of course can vote for the desired gas limit. However the block gas limit cannot change, cannot increase or decrease by more than 1 over 1,024 of its parent block gas limit. So whenever you propose a block, you can move Ethereum in your desired direction. In other words, if you think that 35 million is the correct gas limit for Ethereum, you can set this value in the config and in every block you will move slightly in this direction. However, other validators can counteract it, bringing it back to 30 million. And this fact was used in EIP proposed by Giulio from Eregon team. He suggested modifying clients in the way that there is very small increase in every block. Thanks to that, we have a better control what's going on in the network with increases. So the control over gas limit can be divided into categories, local block building and external block building. Approximately nine percent of validators are local builders. In this case, execution client teams can set the default gas limit in the config, which can be overridden by node operator. Most of the blocks are externally produced, around 91%, and in this case, consensus client teams can set the default gas limit, which can be overridden by validators. However, this is not enforced. So builders should follow this, but they do not have to. So we can say that builders have the final control over this value. So, I have seen Twitter conversation about increasing gas limit and that we should increase hardware requirements or we should make EVM much faster and parallel and scale L1 in this way. Of course, we should try to optimize things as much as possible there are many reasons there are many reasons to do so future potential of l1 l2s rpcs however let's analyze the block execution performance on my mini computer intel nac 11. so as you can see on the charts this device can execute blocks with the average speed around 330 megagas per seconds, and blocks are being validated in just 50 milliseconds on average. So if we consider higher hardware requirements, let's take a look at a much more expensive machine, four times more expensive, used by one of my colleagues. four times more expensive used by one of my colleagues. And as you can see, blocks are being executed almost three times faster with the average speed 950 megagas per second, and execution time is around 17 milliseconds. So at first glance, the discussion about hardware requirements seems relevant. Many computers are three times slower than the high-end consumer machine. And there are many machines in between that could be considered. However, let's pause for a moment. Ethereum slot time is 12 seconds, and we are talking about average execution in just 50 milliseconds. So what's the point of using much higher hardware when we are not utilizing slower machines? So this suggests huge potential for mainnet, and however, we know that if everything would be that easy, we would already have much higher gas limit. Earlier, I mentioned that I'm talking about block execution a little bit in isolation from other topics. And those other things are more concerning, like state grow, history grow, networking aspects. However, even in block execution, there are challenges. First of all, if we have 12-second slot time, it doesn't mean that we have 12 seconds for execution. This is an obvious thing. And even if we ignore networking aspect entirely, nodes need to be able to catch up with the tip of the chain. What's more, RPC providers must be able to execute, process new blocks while processing heavy if calls and other requests. And lastly, there is a consensus mechanism. So let's take a look at the diagram on the slide. Ethereum consensus requires strict timing for block execution. The attestation deadline is four seconds, however, ELs have much less time to process new blocks. First of all, there is lightweight validation on CL side and blocks need to be propagated across the network. Moreover, blocks often aren't revealed at the beginning of the slot due to timing games. And the solution to this is to reorganize Ethereum slots in such a way to allow for more time for execution. We have two solutions under consideration, EPBS, enshrined proposer builder separation, and APS, att Proposer Separation. While the main goal of those changes is related to block building and MEV space, there is additional nice benefit on the execution side that we will have much more time for execution. I will use EPBS as a primary example. However, both solutions will reorganize the slot in a more optimal way. So, in EPBS, attestation deadline is set at third second. However, attesters will have even up to 11 seconds to execute blocks without affecting consensus rules. So this is very useful in context of worst cases. Okay, other thing that we should consider is, of course, state growth. So with the current gas limit limit state doesn't grow rapidly. In the last three months it has increased four gigabytes in a Nethermite database. If you want to read more about state grow there is an excellent research from the Rev team. However the worst cases of most of the operations is essentially function of block size. So the bigger the block, the more operations you can fit in. However, in case of state, we need to be mindful of state database as well. So in other words, the bigger the tree gets, the slower state access may become. So clients spend a lot of time trying to optimize things related to state. So here is an example from Nethermind, and operations are dominated by Sload and SSStore. Another example from Bezu bezel and similar situation and ref seems to be occupied by state root calculation and other thing related to state is that we know that we will eventually need to transition to another tree either vertical or something post-quantum secure if there is a sudden breakthrough in research. So we know that this transition process will be complex, and it will be easier with a smaller tree. So we need to be careful to not make this task too difficult with increasing gas limit too aggressively. However, everything needs to be put in context of data and as Julia from Erigon team has shown, even if we double gas limit and ship Verkul in four years, the transition will take additional 13 days, which doesn't sound like a very bad solution. Long-term solution for Verkulz is of course state expiry and recently Vitalik wrote the blog post about it. Block execution is strictly related to gas pricing. Gas pricing represents the time it takes to execute the blocks and other resources required to do so. So we need to take into account contribution to state grow and history grow. And recently Vitalik proposed that we should increase gas prices for hashing functions as blocks that are heavy with hashing are harder to prove than average blocks. with hashing are harder to prove than average blocks. So we benchmark different operations and you can see it on the table on the slide. We filled the block entirely with one of the operations and as you can see the differences in gas pricing can be substantial. For example if we fill the block with repmd precompile, it executes with the speed 1 giga gas on a very slow machine. If we do the same with simple if transfers, the speed is around 700 mega gas per second. However, other operations can be even slower. EC recover, 80 Mbps. Point evaluation precompile, 65 Mbps. So this is important because the slowest operation should be treated as a bottleneck for increasing throughput rather than relying solely on average execution. So let's talk more about worst cases. Client teams are improving and being better and better on executing average transactions. However, Ethereum security must account not just for average case, but for worst cases as well. And by worst cases, I mean situation when optimistic parallel EVM encounters as many conflicts as possible, when caches are being missed, when attacker found some slow operations in EVM, and crafted the block in the way to disrupt the network. So let's take one more look at execution times of my Intel NAC11. As you can see, the performance on different blocks can vary significantly. I marked a few spikes with red rectangles. So, the average execution is 50 milliseconds, but the maximum is 425 milliseconds. The known worst case in all execution clients that are observed on real networks are mining contracts. So those contracts work in this way that they are doing a lot of storage writes and sets values to zero. And by setting values to zero they are getting refund and thanks to that they can feed the block with excessive state access so our team member ban proposed an EAP to prevent such situation EAP-7778, prevent block gas smuggling. So, slow blocks can affect consensus. If attesters won't be able to meet attestation deadline, it might impact, they will miss attestation and it will impact chain health. What else? Slow blocks can affect block production, so the timeout may occur and it might impact aliveness of the chain. And of course, the slower the operation, the more probability for DDoS vulnerabilities. And I'm not saying that we have an immediate concern here. We need to analyse our worst cases, we need to understand them, we need to optimise them, we need to reprice operation that could be a bottleneck for increasing throughput. The long-term solution for gas pricing issue could be a vital proposal for multi-dimensional gas pricing. All these challenges are important, but there is other side of gas limit. Gas limit hasn't been increased for a long time. The last increase was done in April 2021 from 12.5 to 15 million. The increase from 15 to 30 was related to introduction of base fee mechanism, so while gas limit is higher, the gas target is still the same. In the last three years, hardware improved, so it could be an argument to increase gas limit. However, even stronger argument is that Ethereum client software improved massively. So, to summarize, taking disk space aside, which will be solved with EIP for force? I don't think that the discussion about hardware requirements is relevant. Ethereum can be run on mini computers and it will stay in this way even if we increase gas limit a little bit. I see challenges. We cannot forget about state grow, about history grow. We need to reorganize the slot. We need to migrate to another tree. However, all of them seem to be solvable and are planned for in Ethereum roadmap. We need to ask ourselves what data we need, what other problems we need to solve, what the correct gas limit should be. what other problems we need to solve, what the correct gas limit should be. I don't think that we've given up on L1 execution, and execution is not a bottleneck for scaling L1. Of course, I agree strongly with Vitalik's vision that Ethereum mainnet should be a strong base layer, and definitely it shouldn't take hyperscaling approach or make any shortcuts. All the improvements in the clients and in protocol as well will benefit L2s, allowing them to take hyperscaling approach and use the full potential of EVM, while Ethereum L1 should stay to be a strong, secure, and decentralized base layer. And that's all I want to say. Thank you, everyone. Okay, thank you, Marek, for a great summary of all the factors that can affect gas limit increase. So we have extra time. Let's go through the first question. Please scan the QR code here. Go to the bottom right. You'll see the live Q&A button there. Click there. You can add questions. You can upvote. We'll go by the most upvote race. First question, what do you think about Reth saying they are fastest? Is this relevant? That's a tough question. So, actually, I discussed it with Georgios yesterday, and he got the same question on his presentation, and he said that actually, Nettermind is the fastest now, and they are kicking our asses, so we need to work harder. And yeah, that is my comment about this. I hope it makes sense. I run Nettermind on my validator. Will parallel EVM ever become relevant for L1 nodes? I think so, but I think it will take time. So if we have multidimensional gas pricing that controls state grow, we could allow for more execution with parallel EVM. What's more, parallel EVM might be important if we kept the transaction, so we will allow, for example, only 30 million gas transaction, and then worst cases are better, basically. So, yeah, I think maybe on some point of time. Okay, the next question is, what's your opinion on pipeline state routes, state routes being calculated with one block delay? I think it makes sense, but I'm not sure if for L1, for L2, yes. But for L1, I'm not sure about consequences for light clients. Next question is, what tooling does NetherMind use to measure and benchmark mega-gas per second on a node if the node is not in sync with the chain? So, if the node is not in syncing with the chain, I'm not sure about this, but generally NetherMite uses the tooling that monitors new payloads, so we measure the time it takes to execute new payload in NetherMite. And once we have time and we know how much gas was used in this block, we can calculate the speed. Maybe you can use for that. Not sure. Next question would be, why gas per second if each operation have different gas use? Did you think opcode per second makes more sense? Why gas per second if each operation... No. I think there is a problem with gas pricing. So ideally, gas pricing should represent the correct resource usage, but we have, as I showed in this presentation, we have operation that doesn't follow that. So, yes. I think we should still use megagas per second, but we should maybe improve our gas pricing in EVM. Cool. We have about three more minutes, so if you guys have more questions, please feel free to submit. But the last question we have here is, what do you think about increasing time spent on signature aggregation to allow faster finality instead of gas limit increase? Oh. I don't know. Sorry for not answering this question. Signature aggregation. I have no opinion. Cool. We have two more minutes left.", + "sources_streamethId": "6736d34b1b0f83434d91b15e", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731566400000, - "slot_end": 1731568200000, + "slot_start": 1731583200000, + "slot_end": 1731583800000, "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/17JZL3bUgGRPxJs5ybdBTY70V_NqPo7xH7Sc7QI5zw5A", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1RCYOul1XxqYV0BU6bMqResTDK6sazsIhKVB2ctdgBKU", + "resources_slides": "https://drive.google.com/file/d/1U5GFX636xkiBxm-tcZCDY0wiBphJbfZ8/view", "speakers": [ - "marekm25" + "felipe-argento" ] }, "vector": [ @@ -374026,10 +372992,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -374777,29 +373743,10 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -374819,14 +373766,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -374968,13 +373907,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -375031,6 +373963,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -375163,6 +374096,38 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -375329,23 +374294,14 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, 2, 0, 0, @@ -375353,50 +374309,58 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, 0 ] }, { "session": { - "id": "gas-metering-comparing-appchain-rollups-vs-general-purpose-rollups", - "sourceId": "KXFHXJ", - "title": "Gas Metering: Comparing Appchain Rollups vs. General Purpose Rollups", - "description": "General purpose rollups, with all applications running in the same virtual machine, face specific constraints in their gas metering systems that appchain rollups do not.\r\n\r\nIn this lightning talk, we'll explore the differences and the design freedom in gas metering when your application isn't in an adversarial setting, avoiding potential attacks from newly deployed code. Discover the benefits and challenges of customized gas metering in appchain rollups.", - "track": "Layer 2", + "id": "giga-staking-for-school-connectivity", + "sourceId": "ZU3AEJ", + "title": "Giga: Staking for school connectivity", + "description": "Giga is a joint venture between UNICEF and the ITU with the mission of connecting all the world's schools to the internet. Over the past years, a novel approach to fund the ongoing operating expenses of school connectivity has been running as a pilot in Rwanda and Giga is currently scaling up operations.\r\n\r\nAs part of this pilot, one staking node has been generating returns that are being spent on connectivity in a school in Rwanda. All of this has been done in compliance with local regulations.", + "track": "Real World Ethereum", "type": "Lightning Talk", - "expertise": "Expert", - "audience": "Research", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Gas", - "Appchains", - "Mechanism design", - "metering", - "Appchains", - "Gas", - "Mechanism design" + "Staking", + "Sustainability", + "Ethereum for Good", + "Social", + "impact", + "Ethereum for Good", + "Staking", + "Sustainability" ], "keywords": [ - "Metering" + "connectivity", + "schools", + "social impact" ], - "duration": 355, + "duration": 472, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "4ba71b291e596a21671ea12501fbde925f46699752ba06188c0e734f1f72af2e", + "sources_youtubeId": "nqe0zeu_z7w", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736d34b1b0f83434d91b15e", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6735c98d9dbb7a90e17afef8", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735c98d9dbb7a90e17afef8.vtt", + "transcript_text": " Hello everybody. Can you guys hear me well? Good. Thanks to the organizers for putting me right after Chris. It saves me a lot of introduction for what Giga is and does. A quick summary. So we're a project about connecting every school in the world to the internet and every young person to information, opportunity, and choice. A little bit of a summary of what we've achieved so far. We've connected about 14,500 schools benefiting almost 8 million students. And we're currently active in 34 countries. So it's a big operation. One of the opportunities that we found, one of the challenges that we have, is in once a school is connected, to fund the operational expenses of the school. So we're kind of looking for a sustainable financing model. And one of the projects that we've looked into is around staking. So I thought it'd be interesting to talk to a crowd that's very familiar with staking and talk a little bit about the challenges that we faced in implementing this in the real world. So what we want to do is we want to create staking nodes and use the staking rewards to pay for the operational expenses of Internet connectivity. So it's kind of like an endowment. A little bit about the size of the opportunity or challenge that we have. We did a case study a few years ago where we looked at what it would cost to connect the 86,000 schools in the 17 countries that we were active in at the time. And we discovered that it would cost approximately $305 million in operational expenses. So it's a very significant problem that we're looking to solve for. But the benefits also speak for themselves. So with sustainable funding, about 25.5 million students and teachers will be able to benefit from connectivity. What we've done so far is we started with setting up a staking node and loading it up with 32 ETH. So you can look at the validator node there. And what we are using this funding for is to pay for connectivity for one particular school, the Murama Primary School in Rwanda. So about 1,000 students in Rwanda are benefiting from this connectivity at the moment. But we ran into some problems. So currently we have to jump through all kinds of regulatory hoops in making this system work. So we have to off-ramp the ETH into fiat, then the ISP needs to send a paper invoice to a company that pays this invoice, and then the school needs to be connected. So you kind of lose track of the payment process in that way. And it's something that we feel hinders the scalability of this a lot. So there's no problem in sort of paying for one staking node ourselves and connecting one school, but doing this for all the remaining schools becomes a problem for impact investors if you can't prove the impact. So this is where we are. We have completed this pilot and we want to scale. But we're currently sort of stuck in the middle where we're trying to set ourselves up for success. One of the things that we need to do for that is create an end-to-end crypto flow in cooperation with the central bank of Rwanda. And we have a pathway for doing that. And the second is to set up a pooled impact staking product. But I'm here today with a super specific call to action. And Vivian, I might ask you where that group of African community was gathered. But we're essentially looking today for a crypto company based in Rwanda who could join us in this regulatory sandbox of the central bank and help us make this more scalable going forward. I'll leave a few minutes for questions. Thank you. Do we have anyone from Rwanda here? We're not here. Nobody. It's okay. We have the whole hub. Okay. We have one person here, one, I think, over there. Let's start with this gentleman with the white shirt. Yes. I've got a crypto company in Rwanda. I'll talk to you after. Oh, good. I told you it would work. So I'm from Myanmar, and we are having all kinds of crisis. Can you speak up a little bit? Yeah. So I'm from Myanmar, and in Myanmar, there are a all kinds of crisis so is there can you speak up a little yeah so I'm from Myanmar and in Myanmar there are a lot of civil wars going on and we the schools needs a lot of this internet connectivity issue so the first challenge is that not only the internet connectivity but also we only got around six hours seven hours electricity a year. So in that kind of scenario, will the GIGR infrastructure will be work? Or that's the first question. And second is, will there be a future plan to come to Myanmar to support this kind of internet infrastructure development? Yeah. So this particular project was really about raising the money to pay for operational expenses. There's of course this whole infrastructure question and electricity providing question that also needs to be solved for, which is part of a bigger problem, I suppose. We do intend to see if this solution will scale on a global level. So far, we've always hit this regulatory glass ceiling in this solution. So we are quite excited to be working with the central bank in Rwanda in trying to break that glass ceiling for this particular use case, and then hopefully using that as a pilot study to show central banks that there is an opportunity in this space in other countries as well I think for the Robonert they already have you know support of the central bank but in Myanmar the central bank and all kinds of financial transactions are always checked by this dictator military so I think this will be not happen in like next a few years I believe. Yeah Myanmar sounds like a very complicated situation I agree with that. Thanks for raising the question. Any other question? We have time for one last question. One over there. So I'm from Kenya, and I really appreciate the work that you are doing with Chris. So my question would be in terms of locating, for example, the institutions, you had mentioned that you have an ML model that helps you identify what is a school and what is not a school. Is that model available for use to other use cases? So are you working agriculture identity for farmers? I mean, the answer is yes. It will probably have to be a different model. But we do have a data team that looks into that. And I don't know if you mentioned it. Did you mention that we're also doing hospitals and health centers? Or am I spoiling something? Yeah, so we're also branching out into other public infrastructure, like hospitals and health centers. Okay, Luc, for you afterwards. Thank you. And thank you, Gerben. Thank you, Giga team. That was a lovely half an hour. Okay, that's another round of applause.", "eventId": "devcon-7", - "slot_start": 1731583200000, - "slot_end": 1731583800000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1RCYOul1XxqYV0BU6bMqResTDK6sazsIhKVB2ctdgBKU", - "resources_slides": null, + "slot_start": 1731577200000, + "slot_end": 1731577800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1rmmBw3SZZEyNNDi7PgdUEMlN6Wfogmt3EIpC8WZe-5I", + "resources_slides": "https://drive.google.com/file/d/1AhKmqFIfP4_kfIJKdenhbX1DVgOp8Ipk/view", "speakers": [ - "felipe-argento" + "gerben-kijne" ] }, "vector": [ @@ -375406,7 +374370,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -375772,9 +374735,8 @@ 0, 0, 0, - 6, - 0, 0, + 6, 0, 0, 0, @@ -376158,7 +375120,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -376277,8 +375238,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -376329,6 +375292,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -376378,7 +375342,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -376417,6 +375380,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -376482,7 +375446,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -376499,6 +375462,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -376512,7 +375476,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -376712,12 +375675,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 2, 0, @@ -376727,58 +375690,48 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "giga-staking-for-school-connectivity", - "sourceId": "ZU3AEJ", - "title": "Giga: Staking for school connectivity", - "description": "Giga is a joint venture between UNICEF and the ITU with the mission of connecting all the world's schools to the internet. Over the past years, a novel approach to fund the ongoing operating expenses of school connectivity has been running as a pilot in Rwanda and Giga is currently scaling up operations.\r\n\r\nAs part of this pilot, one staking node has been generating returns that are being spent on connectivity in a school in Rwanda. All of this has been done in compliance with local regulations.", + "id": "giga-undepin-to-connect-every-school-in-the-world", + "sourceId": "JXH3T3", + "title": "Giga: (UN)DePIN to connect every school in the world", + "description": "Giga (a startup built by UNICEF and ITU) has built a long-lasting friendship with the Ethereum community, starting w/ the 2019 Devcon launch of UNICEF's Crypto Fund, to the first Eth staking with the Government of Rwanda, putting schools onchain, and now working on a global Connectivity Credits Marketplace.\r\n \r\nBlockchain, and particularly Ethereum, is fundamental to scaling connectivity for the 1.8 billion people who aren't online. http://giga.global", "track": "Real World Ethereum", - "type": "Lightning Talk", + "type": "Talk", "expertise": "Beginner", "audience": "Community", "featured": false, "doNotRecord": false, + "keywords": [ + "Connectivity", + "real world digital assets", + "" + ], "tags": [ - "Staking", - "Sustainability", - "Ethereum for Good", - "Social", - "impact", + "DePIN", "Ethereum for Good", - "Staking", - "Sustainability" - ], - "keywords": [ - "connectivity", - "schools", - "social impact" + "Politics" ], - "duration": 472, "language": "en", - "sources_swarmHash": "4ba71b291e596a21671ea12501fbde925f46699752ba06188c0e734f1f72af2e", - "sources_youtubeId": "nqe0zeu_z7w", + "sources_swarmHash": "801527a603a3ccb989baca376f305b79064af4e39bdd8edde90678215994fdcb", + "sources_youtubeId": "45Ma4-p_rZc", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735c98d9dbb7a90e17afef8", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735c98d9dbb7a90e17afef8.vtt", - "transcript_text": " Hello everybody. Can you guys hear me well? Good. Thanks to the organizers for putting me right after Chris. It saves me a lot of introduction for what Giga is and does. A quick summary. So we're a project about connecting every school in the world to the internet and every young person to information, opportunity, and choice. A little bit of a summary of what we've achieved so far. We've connected about 14,500 schools benefiting almost 8 million students. And we're currently active in 34 countries. So it's a big operation. One of the opportunities that we found, one of the challenges that we have, is in once a school is connected, to fund the operational expenses of the school. So we're kind of looking for a sustainable financing model. And one of the projects that we've looked into is around staking. So I thought it'd be interesting to talk to a crowd that's very familiar with staking and talk a little bit about the challenges that we faced in implementing this in the real world. So what we want to do is we want to create staking nodes and use the staking rewards to pay for the operational expenses of Internet connectivity. So it's kind of like an endowment. A little bit about the size of the opportunity or challenge that we have. We did a case study a few years ago where we looked at what it would cost to connect the 86,000 schools in the 17 countries that we were active in at the time. And we discovered that it would cost approximately $305 million in operational expenses. So it's a very significant problem that we're looking to solve for. But the benefits also speak for themselves. So with sustainable funding, about 25.5 million students and teachers will be able to benefit from connectivity. What we've done so far is we started with setting up a staking node and loading it up with 32 ETH. So you can look at the validator node there. And what we are using this funding for is to pay for connectivity for one particular school, the Murama Primary School in Rwanda. So about 1,000 students in Rwanda are benefiting from this connectivity at the moment. But we ran into some problems. So currently we have to jump through all kinds of regulatory hoops in making this system work. So we have to off-ramp the ETH into fiat, then the ISP needs to send a paper invoice to a company that pays this invoice, and then the school needs to be connected. So you kind of lose track of the payment process in that way. And it's something that we feel hinders the scalability of this a lot. So there's no problem in sort of paying for one staking node ourselves and connecting one school, but doing this for all the remaining schools becomes a problem for impact investors if you can't prove the impact. So this is where we are. We have completed this pilot and we want to scale. But we're currently sort of stuck in the middle where we're trying to set ourselves up for success. One of the things that we need to do for that is create an end-to-end crypto flow in cooperation with the central bank of Rwanda. And we have a pathway for doing that. And the second is to set up a pooled impact staking product. But I'm here today with a super specific call to action. And Vivian, I might ask you where that group of African community was gathered. But we're essentially looking today for a crypto company based in Rwanda who could join us in this regulatory sandbox of the central bank and help us make this more scalable going forward. I'll leave a few minutes for questions. Thank you. Do we have anyone from Rwanda here? We're not here. Nobody. It's okay. We have the whole hub. Okay. We have one person here, one, I think, over there. Let's start with this gentleman with the white shirt. Yes. I've got a crypto company in Rwanda. I'll talk to you after. Oh, good. I told you it would work. So I'm from Myanmar, and we are having all kinds of crisis. Can you speak up a little bit? Yeah. So I'm from Myanmar, and in Myanmar, there are a all kinds of crisis so is there can you speak up a little yeah so I'm from Myanmar and in Myanmar there are a lot of civil wars going on and we the schools needs a lot of this internet connectivity issue so the first challenge is that not only the internet connectivity but also we only got around six hours seven hours electricity a year. So in that kind of scenario, will the GIGR infrastructure will be work? Or that's the first question. And second is, will there be a future plan to come to Myanmar to support this kind of internet infrastructure development? Yeah. So this particular project was really about raising the money to pay for operational expenses. There's of course this whole infrastructure question and electricity providing question that also needs to be solved for, which is part of a bigger problem, I suppose. We do intend to see if this solution will scale on a global level. So far, we've always hit this regulatory glass ceiling in this solution. So we are quite excited to be working with the central bank in Rwanda in trying to break that glass ceiling for this particular use case, and then hopefully using that as a pilot study to show central banks that there is an opportunity in this space in other countries as well I think for the Robonert they already have you know support of the central bank but in Myanmar the central bank and all kinds of financial transactions are always checked by this dictator military so I think this will be not happen in like next a few years I believe. Yeah Myanmar sounds like a very complicated situation I agree with that. Thanks for raising the question. Any other question? We have time for one last question. One over there. So I'm from Kenya, and I really appreciate the work that you are doing with Chris. So my question would be in terms of locating, for example, the institutions, you had mentioned that you have an ML model that helps you identify what is a school and what is not a school. Is that model available for use to other use cases? So are you working agriculture identity for farmers? I mean, the answer is yes. It will probably have to be a different model. But we do have a data team that looks into that. And I don't know if you mentioned it. Did you mention that we're also doing hospitals and health centers? Or am I spoiling something? Yeah, so we're also branching out into other public infrastructure, like hospitals and health centers. Okay, Luc, for you afterwards. Thank you. And thank you, Gerben. Thank you, Giga team. That was a lovely half an hour. Okay, that's another round of applause.", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "christopher-fabian" + ], "eventId": "devcon-7", - "slot_start": 1731577200000, - "slot_end": 1731577800000, + "slot_start": 1731576000000, + "slot_end": 1731577200000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1rmmBw3SZZEyNNDi7PgdUEMlN6Wfogmt3EIpC8WZe-5I", - "resources_slides": null, - "speakers": [ - "gerben-kijne" - ] + "resources_presentation": "https://docs.google.com/presentation/d/1Kux95LlPqrqyaIMbQZgE8OhOIzJM8A61evcBSSNF7dY", + "resources_slides": "https://drive.google.com/file/d/1T_-tkKWxpusU0dt5GDCYxuwowurHRLFU/view" }, "vector": [ 0, @@ -377551,10 +376504,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -377661,7 +376611,6 @@ 2, 0, 0, - 2, 0, 0, 0, @@ -377712,7 +376661,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -377801,7 +376749,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -377871,6 +376818,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -377883,7 +376832,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -378117,44 +377065,45 @@ }, { "session": { - "id": "giga-undepin-to-connect-every-school-in-the-world", - "sourceId": "JXH3T3", - "title": "Giga: (UN)DePIN to connect every school in the world", - "description": "Giga (a startup built by UNICEF and ITU) has built a long-lasting friendship with the Ethereum community, starting w/ the 2019 Devcon launch of UNICEF's Crypto Fund, to the first Eth staking with the Government of Rwanda, putting schools onchain, and now working on a global Connectivity Credits Marketplace.\r\n \r\nBlockchain, and particularly Ethereum, is fundamental to scaling connectivity for the 1.8 billion people who aren't online. http://giga.global", - "track": "Real World Ethereum", - "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "going-from-alzheimers-to-pandemics-bringing-floss-to-bio-testing", + "sourceId": "PZACPB", + "title": "Going from Alzheimer's to Pandemics: Bringing FLOSS to Bio Testing", + "description": "Varro has developed a unique semiconductor-based biosensor platform that detects pathogens in human breath and indoor air with significant improvements in speed, sensitivity, and cost over existing technologies. The platform will be offered via open source to expand its reach and accessibility. We will discuss the core technology and how it can be used to prevent the spread of disease and new pandemics around the world.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Connectivity", - "real world digital assets", - "" - ], - "tags": [ - "DePIN", - "Ethereum for Good", - "Politics" - ], + "tags": [], + "keywords": [], + "duration": 906, "language": "en", - "speakers": [ - "christopher-fabian" - ], + "sources_swarmHash": "d6b12801d76f8d3ecc49462a59a788d3cbbee618466e555a21e5473f02f9f91c", + "sources_youtubeId": "fHaE0iv9Szs", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735ad659dbb7a90e1abcc2f", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ad659dbb7a90e1abcc2f.vtt", + "transcript_text": " Tanya Cushman Reviewer:\" Peter van de Ven I'm sure after those last two talks, we're all thinking about locking ourselves in a closet to keep ourselves safe. Hopefully, by the time I'm done, you'll agree that there's some hope and that won't be necessary. So, my name is Tom Cerrito. I'm a CEO and co-founder of Varo. Varo is a biotechnology company focused on preventing the transmission of infectious diseases. We founded Varo in early 2020 with the recognition of two facts. First, the rate and spread of infectious diseases, of epidemics and pandemics, has been steadily increasing for over 20 years. Despite that, during the same time frame, there haven't been any significant innovations in preventing the spread of diseases. So why am I at DEVCON? Varo shares Vitalik's vision for DIAC and for decentralized pandemic preparedness. We believe in empowering individuals around the world by giving them access to technologies that allow them to protect themselves and their communities. VARO has built a platform to detect the presence of pathogens in aerosols in real time with exquisite sensitivity, using easy-to- use and affordable devices. Actually let me stay here for a second. So aerosols are the tiny droplets, they're about a micrometer in size that we release when we breathe. These tiny droplets hang in the air, they can stay in the air for days. So when an infected person is in an indoor environment, like a room like this, for example, they release these aerosols, and they contain the bacteria or the viruses that they're sick with. And those just hang out in the air and wait for someone else to breathe them in. This is how the majority of infectious diseases, certainly respiratory diseases, are transmitted. Until now, there really hasn't been a way to detect the presence of pathogens and aerosols, and that's what the VARO platform is designed to do. So the basis of our biosensor platform is the micro-immuno-electrode. We call it the MIE. The MIE is essentially a modified semiconductor. What we do is we take an electrode and we attach an antibody-like molecule, which is what we call a nanobody, and that's what binds to the virus or the pathogen that we're looking for to that electrode. This is a technology that was originally developed by neuroscientists at Washington University in St. Louis named John Cerrito and Carla Yudy. And the name is not a coincidence. John Cerrito is my brother. He's a professor of neurology at Washington University. And he originally developed the MIE technology to study brain chemistry in Alzheimer's disease. He would implant the immunoelectrodes into the brains of mice in order to study the proteins that cause Alzheimer's. In 2020, John realized that the same electrode technology that he puts down into the brain of a mouse, he can stick up into the air to detect COVID. He pulled in another collaborator from Washington University, an engineer and aerosol scientist named Rajan Chakrabarty, and these are our scientific founders. And we've worked very closely with this team over the last four years to develop this platform to where it is today. So I'm going to show a brief video of our scientific founders talking about how we're leveraging the MIE platform to develop two devices that are designed to prevent the spread of infectious diseases. Those two devices are the breath-based diagnostic and the air biodetector. So can we play the video? This is going to be a game changer. You are not just looking at the current strains, what is prevalent right now. We are also getting ready for what could be in the future. Whether it's something we know, or it's an emerging pathogen, we can go to it very quickly within a span of weeks, pull out a new nanobody, put it on an electrode, and have it ready to go. Gives us another tool to work with in order to really get on top or ahead of something like COVID, so we won't be kind of slow like we were in the past. We're using this in two devices, then the breathalyzer and the air quality monitor. If you wake up in the morning, you have respiratory illness. You go to the pharmacy to get a test. The technician behind the counter doesn't have to be trained. They'll take this, they'll pull off the cap that activates the device, breathe into it. A single breath is enough to get a reading here. You then hand this back to the technician who turns the knob. They then take this, plug it into a permanent device, and then within 60 seconds from when you blew into it, you get a result of whether you're positive or negative. Their quality monitor will determine when pathogens are present. It will also tell you when the pathogens are gone so you can know you can re-enter a room so you keep business open, you keep people safe and healthy, and you can still go about your daily lives. If you can identify that initial route of where the infected virus is being spread from, you can control the further spread of that virus. By deploying this device, we also want to ensure that the inequity which is prevalent in the world is in many ways addressed. Great. So I want to take a minute and talk about these two devices that we're developing. First is our breath-based diagnostic, what Carla called the breathalyzer. The breath-based diagnostic is capable of detecting multiple pathogens from a single breath in 60 seconds with sensitivity that is comparable to PCR it is a true point-of-care device sorry so you can imagine going to a pharmacy you get diagnosis, you can get your medication before you leave, you can get a diagnosis with whatever the known pathogen is, you can get your medication, you can get a faster diagnosis, you can do all of this at a lower cost than current methods like swabs or PCR. We recently completed a clinical trial with this diagnostic device. And I don't need to go through all the details of this. I don't have time to do that today, although I'll be here afterwards if anybody wants to talk about this further. But the clinical trial was a great success. It showed that we have exquisite sensitivity and specificity, and patients reported that they preferred this method of diagnosis, which is, of course, non-invasive, compared to PCR or the swabs that we've all become familiar with. Our second device is our air biodetector. So there's never been a device like this before. This is a device that will sit in a room like this, passively sampling the air in that room. So if somebody were to enter that room who were sick and infected, producing pathogenic aerosols. The air biodetector could sense those pathogens before you could infect most, if not all, of the people in that room. So this truly gives us the ability to interfere in the transmission of disease and prevent it in those environments where people are most susceptible, which is in indoor environments. During COVID, we know that the overwhelming majority of cases of COVID were transmitted in indoor environments through aerosols. So what does this mean for DIAC and what does VARO mean for DIAC? So both of our devices rely on the MIE technology, but the MIE is a true platform technology. And we believe that there are many applications for this, some of which we probably haven't even thought of yet. We want to empower people around the world to innovate with this technology and develop new things, new ways to do it. People that have different backgrounds, different ideas, different goals than what we have at Varo. By empowering people, by giving them access to this technology, we're giving them the ability to protect themselves, protect their families, and protect their communities. So how are we doing this? Varo has adopted an open source business model. We're making all of our plans, designs, and data publicly available. We've signed a pledge that we will not enforce our patents against third-party innovators. In fact, we're going to encourage people to adopt this technology. We're hiring an open source community manager to put all this information and make it available, package it up so that it's easy for people to access, and even promote people to come and enter into the open source community and learn about our technology and hopefully motivate them to take up this technology and use it themselves. We've embraced the principles of free and open source hardware and this has become sort of the core of what we're doing in Varo. This is the core of our business model. See, infectious diseases don't care about patents, right? They don't care about borders. They don't care about intellectual property rights. And we all know this, right? We all lived through the same hellish pandemic for the last four years. What we want to do at Varo is open up all of this technology. You know, we recognize that as a small company, it's going to take a lot of time for us to be able to access markets like Southeast Asia or Africa. Viruses, bacteria don't have those restrictions. So by empowering governments and companies and individuals around the world to practice our technology and to innovate around that technology, we're giving them the ability to protect themselves, protect their people, and protect their economies. It also allows us to ensure that important technologies like this can get into markets in low- and middle-income countries that may not normally have access to these types of technologies and hopefully address the gruesome inequity that we all saw during the pandemic, which Vitalik actually spoke about during his opening remarks. So typically in the pharmaceutical industry, patents are your competitive advantage. And I've been in this industry for my entire career. I've developed drugs and gotten them approved by the FDA. I've started about a dozen biotech companies. Patents have always been sort of the bedrock of the biotech community. But patents are a negative right. Patents basically give you the right to tell someone else they can't do something. What we're trying to do at Varo is turn that on its head. Instead of standing behind our patents and preventing innovation, we're going to leverage our patents and promote innovation. We're going to get all of this out into the world. Our competitive advantage instead of our patents will be our R&D. So we're based in St. Louis, Missouri. We're in this great area called the Cortex Innovation Community. And we're building a state-of-the-art manufacturing facility to produce MIEs. And our challenge, our competitive advantages, will be to produce the lowest cost, highest quality MIEs in the world. And then supply those to our innovators. Give them the ability to innovate and give them that secret sauce, the MIEs, that they can incorporate into their own devices. Or they can build capacity for their own countries. We're doing something else. I haven't been keeping up with my slides. build capacity for their own countries. We're doing something else. I haven't been keeping up with my slides. There we go. So we're doing something else with our R&D platform that's important when we think about pandemic preparedness as well. We're creating a nanobody library. So you may remember I said at the beginning of my talk that nanobodies are the proteins that we link to our electrodes to make MIEs, and those are what bind to viruses and bacteria. We're creating a library with trillions of different nanobodies. very quickly against whatever the next emerging pathogen is, whether it's SARS-CoV-3 or monkeypox 2 or whatever comes along that threatens to become the next pandemic. Our platform, our R&D capability, will allow us to produce new MIEs against those emerging pathogens within a matter of weeks and deploy devices so that hopefully the next time this comes around, we can get out in front of it. We can either bend the curve or maybe even prevent the spread by getting those devices for those emerging pathogens out into the world. So at Varo, we're very passionate about preventing the spread of infectious diseases. We're grateful for Vitalik for his support and for his support in supporting a new business model for innovation in the biotech industry. So I will be here. I'll be out in the back. I have a few demos that I can show of the the breath-based diagnostic device. I have some MIEs that I can show. So feel free.", "eventId": "devcon-7", - "slot_start": 1731576000000, - "slot_end": 1731577200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Kux95LlPqrqyaIMbQZgE8OhOIzJM8A61evcBSSNF7dY" + "slot_start": 1731569700000, + "slot_end": 1731570600000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1JN8fHkrJUSmMwQcFE6vbbWGfFN8h8mUo1A7pu-plAU8", + "resources_slides": "https://drive.google.com/file/d/1LJ6LGQQPVs_iLSbMOLoZFwoHE0rfQly9/view", + "speakers": [ + "tom-cirrito-phd" + ] }, "vector": [ 0, + 6, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -378921,7 +377870,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -379025,7 +377973,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -379236,33 +378183,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -379464,6 +378384,32 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, 2, 0, 0, @@ -379471,8 +378417,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -379484,40 +378428,56 @@ }, { "session": { - "id": "going-from-alzheimers-to-pandemics-bringing-floss-to-bio-testing", - "sourceId": "PZACPB", - "title": "Going from Alzheimer's to Pandemics: Bringing FLOSS to Bio Testing", - "description": "Varro has developed a unique semiconductor-based biosensor platform that detects pathogens in human breath and indoor air with significant improvements in speed, sensitivity, and cost over existing technologies. The platform will be offered via open source to expand its reach and accessibility. We will discuss the core technology and how it can be used to prevent the spread of disease and new pandemics around the world.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "id": "governance-innovation-analysis-on-voter-behavior-in-blockchain-governance", + "sourceId": "ZKNSAL", + "title": "Governance Innovation: Analysis on Voter Behavior in Blockchain Governance", + "description": "As the first comprehensive examination of voter behavior in Web3, the following research explores two significant blockchain ecosystems, Curve Finance and Polkadot, using a novel quantitative methodology to decompose and highlight governance patterns.\r\n\r\nThe presented analysis shows, among other findings, a significant influence of market conditions on voter tendencies, diverse patterns relating to voting power accumulation, and a potential effect of financial incentives on voter participation.", + "track": "Coordination", "type": "Lightning Talk", - "expertise": "", - "audience": "Engineering", + "expertise": "Expert", + "audience": "Product", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 906, + "tags": [ + "Permissionless", + "Coordination", + "Governance", + "Decentralization", + "Game Theory", + "Tokenomics", + "voting", + "analytics", + "Coordination", + "Decentralization", + "Game Theory", + "Governance", + "Permissionless", + "Tokenomics" + ], + "keywords": [ + "Vote Escrow", + "Funding Allocation", + "Voter Analytics" + ], + "duration": 535, "language": "en", - "sources_swarmHash": "d6b12801d76f8d3ecc49462a59a788d3cbbee618466e555a21e5473f02f9f91c", - "sources_youtubeId": "fHaE0iv9Szs", + "sources_swarmHash": "8a6871d32bd8b80aedb0c02d15838b45bb8315fb3f851587e4f2362a09ca2690", + "sources_youtubeId": "wLw9Xvigdqs", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735ad659dbb7a90e1abcc2f", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ad659dbb7a90e1abcc2f.vtt", - "transcript_text": " Tanya Cushman Reviewer:\" Peter van de Ven I'm sure after those last two talks, we're all thinking about locking ourselves in a closet to keep ourselves safe. Hopefully, by the time I'm done, you'll agree that there's some hope and that won't be necessary. So, my name is Tom Cerrito. I'm a CEO and co-founder of Varo. Varo is a biotechnology company focused on preventing the transmission of infectious diseases. We founded Varo in early 2020 with the recognition of two facts. First, the rate and spread of infectious diseases, of epidemics and pandemics, has been steadily increasing for over 20 years. Despite that, during the same time frame, there haven't been any significant innovations in preventing the spread of diseases. So why am I at DEVCON? Varo shares Vitalik's vision for DIAC and for decentralized pandemic preparedness. We believe in empowering individuals around the world by giving them access to technologies that allow them to protect themselves and their communities. VARO has built a platform to detect the presence of pathogens in aerosols in real time with exquisite sensitivity, using easy-to- use and affordable devices. Actually let me stay here for a second. So aerosols are the tiny droplets, they're about a micrometer in size that we release when we breathe. These tiny droplets hang in the air, they can stay in the air for days. So when an infected person is in an indoor environment, like a room like this, for example, they release these aerosols, and they contain the bacteria or the viruses that they're sick with. And those just hang out in the air and wait for someone else to breathe them in. This is how the majority of infectious diseases, certainly respiratory diseases, are transmitted. Until now, there really hasn't been a way to detect the presence of pathogens and aerosols, and that's what the VARO platform is designed to do. So the basis of our biosensor platform is the micro-immuno-electrode. We call it the MIE. The MIE is essentially a modified semiconductor. What we do is we take an electrode and we attach an antibody-like molecule, which is what we call a nanobody, and that's what binds to the virus or the pathogen that we're looking for to that electrode. This is a technology that was originally developed by neuroscientists at Washington University in St. Louis named John Cerrito and Carla Yudy. And the name is not a coincidence. John Cerrito is my brother. He's a professor of neurology at Washington University. And he originally developed the MIE technology to study brain chemistry in Alzheimer's disease. He would implant the immunoelectrodes into the brains of mice in order to study the proteins that cause Alzheimer's. In 2020, John realized that the same electrode technology that he puts down into the brain of a mouse, he can stick up into the air to detect COVID. He pulled in another collaborator from Washington University, an engineer and aerosol scientist named Rajan Chakrabarty, and these are our scientific founders. And we've worked very closely with this team over the last four years to develop this platform to where it is today. So I'm going to show a brief video of our scientific founders talking about how we're leveraging the MIE platform to develop two devices that are designed to prevent the spread of infectious diseases. Those two devices are the breath-based diagnostic and the air biodetector. So can we play the video? This is going to be a game changer. You are not just looking at the current strains, what is prevalent right now. We are also getting ready for what could be in the future. Whether it's something we know, or it's an emerging pathogen, we can go to it very quickly within a span of weeks, pull out a new nanobody, put it on an electrode, and have it ready to go. Gives us another tool to work with in order to really get on top or ahead of something like COVID, so we won't be kind of slow like we were in the past. We're using this in two devices, then the breathalyzer and the air quality monitor. If you wake up in the morning, you have respiratory illness. You go to the pharmacy to get a test. The technician behind the counter doesn't have to be trained. They'll take this, they'll pull off the cap that activates the device, breathe into it. A single breath is enough to get a reading here. You then hand this back to the technician who turns the knob. They then take this, plug it into a permanent device, and then within 60 seconds from when you blew into it, you get a result of whether you're positive or negative. Their quality monitor will determine when pathogens are present. It will also tell you when the pathogens are gone so you can know you can re-enter a room so you keep business open, you keep people safe and healthy, and you can still go about your daily lives. If you can identify that initial route of where the infected virus is being spread from, you can control the further spread of that virus. By deploying this device, we also want to ensure that the inequity which is prevalent in the world is in many ways addressed. Great. So I want to take a minute and talk about these two devices that we're developing. First is our breath-based diagnostic, what Carla called the breathalyzer. The breath-based diagnostic is capable of detecting multiple pathogens from a single breath in 60 seconds with sensitivity that is comparable to PCR it is a true point-of-care device sorry so you can imagine going to a pharmacy you get diagnosis, you can get your medication before you leave, you can get a diagnosis with whatever the known pathogen is, you can get your medication, you can get a faster diagnosis, you can do all of this at a lower cost than current methods like swabs or PCR. We recently completed a clinical trial with this diagnostic device. And I don't need to go through all the details of this. I don't have time to do that today, although I'll be here afterwards if anybody wants to talk about this further. But the clinical trial was a great success. It showed that we have exquisite sensitivity and specificity, and patients reported that they preferred this method of diagnosis, which is, of course, non-invasive, compared to PCR or the swabs that we've all become familiar with. Our second device is our air biodetector. So there's never been a device like this before. This is a device that will sit in a room like this, passively sampling the air in that room. So if somebody were to enter that room who were sick and infected, producing pathogenic aerosols. The air biodetector could sense those pathogens before you could infect most, if not all, of the people in that room. So this truly gives us the ability to interfere in the transmission of disease and prevent it in those environments where people are most susceptible, which is in indoor environments. During COVID, we know that the overwhelming majority of cases of COVID were transmitted in indoor environments through aerosols. So what does this mean for DIAC and what does VARO mean for DIAC? So both of our devices rely on the MIE technology, but the MIE is a true platform technology. And we believe that there are many applications for this, some of which we probably haven't even thought of yet. We want to empower people around the world to innovate with this technology and develop new things, new ways to do it. People that have different backgrounds, different ideas, different goals than what we have at Varo. By empowering people, by giving them access to this technology, we're giving them the ability to protect themselves, protect their families, and protect their communities. So how are we doing this? Varo has adopted an open source business model. We're making all of our plans, designs, and data publicly available. We've signed a pledge that we will not enforce our patents against third-party innovators. In fact, we're going to encourage people to adopt this technology. We're hiring an open source community manager to put all this information and make it available, package it up so that it's easy for people to access, and even promote people to come and enter into the open source community and learn about our technology and hopefully motivate them to take up this technology and use it themselves. We've embraced the principles of free and open source hardware and this has become sort of the core of what we're doing in Varo. This is the core of our business model. See, infectious diseases don't care about patents, right? They don't care about borders. They don't care about intellectual property rights. And we all know this, right? We all lived through the same hellish pandemic for the last four years. What we want to do at Varo is open up all of this technology. You know, we recognize that as a small company, it's going to take a lot of time for us to be able to access markets like Southeast Asia or Africa. Viruses, bacteria don't have those restrictions. So by empowering governments and companies and individuals around the world to practice our technology and to innovate around that technology, we're giving them the ability to protect themselves, protect their people, and protect their economies. It also allows us to ensure that important technologies like this can get into markets in low- and middle-income countries that may not normally have access to these types of technologies and hopefully address the gruesome inequity that we all saw during the pandemic, which Vitalik actually spoke about during his opening remarks. So typically in the pharmaceutical industry, patents are your competitive advantage. And I've been in this industry for my entire career. I've developed drugs and gotten them approved by the FDA. I've started about a dozen biotech companies. Patents have always been sort of the bedrock of the biotech community. But patents are a negative right. Patents basically give you the right to tell someone else they can't do something. What we're trying to do at Varo is turn that on its head. Instead of standing behind our patents and preventing innovation, we're going to leverage our patents and promote innovation. We're going to get all of this out into the world. Our competitive advantage instead of our patents will be our R&D. So we're based in St. Louis, Missouri. We're in this great area called the Cortex Innovation Community. And we're building a state-of-the-art manufacturing facility to produce MIEs. And our challenge, our competitive advantages, will be to produce the lowest cost, highest quality MIEs in the world. And then supply those to our innovators. Give them the ability to innovate and give them that secret sauce, the MIEs, that they can incorporate into their own devices. Or they can build capacity for their own countries. We're doing something else. I haven't been keeping up with my slides. build capacity for their own countries. We're doing something else. I haven't been keeping up with my slides. There we go. So we're doing something else with our R&D platform that's important when we think about pandemic preparedness as well. We're creating a nanobody library. So you may remember I said at the beginning of my talk that nanobodies are the proteins that we link to our electrodes to make MIEs, and those are what bind to viruses and bacteria. We're creating a library with trillions of different nanobodies. very quickly against whatever the next emerging pathogen is, whether it's SARS-CoV-3 or monkeypox 2 or whatever comes along that threatens to become the next pandemic. Our platform, our R&D capability, will allow us to produce new MIEs against those emerging pathogens within a matter of weeks and deploy devices so that hopefully the next time this comes around, we can get out in front of it. We can either bend the curve or maybe even prevent the spread by getting those devices for those emerging pathogens out into the world. So at Varo, we're very passionate about preventing the spread of infectious diseases. We're grateful for Vitalik for his support and for his support in supporting a new business model for innovation in the biotech industry. So I will be here. I'll be out in the back. I have a few demos that I can show of the the breath-based diagnostic device. I have some MIEs that I can show. So feel free.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731569700000, - "slot_end": 1731570600000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1JN8fHkrJUSmMwQcFE6vbbWGfFN8h8mUo1A7pu-plAU8", - "resources_slides": null, + "slot_start": 1731489000000, + "slot_end": 1731489600000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1hyhPIjZoL4CayjCbBks0Dvhf-v1OSKN0dKkek0vNdSE", + "resources_slides": "https://drive.google.com/file/d/1ysoWwrm94ML3YCcOHq2Fhl2vnvZ-PMPO/view", "speakers": [ - "tom-cirrito-phd" + "tanisha-katara" ] }, "vector": [ 0, - 6, 0, 0, 0, @@ -379528,6 +378488,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -380272,6 +379233,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -380307,6 +379269,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -380335,6 +379298,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -380357,11 +379321,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -380414,6 +379380,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -380525,6 +379492,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -380618,6 +379586,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -380820,25 +379789,12 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -380852,52 +379808,41 @@ }, { "session": { - "id": "governance-innovation-analysis-on-voter-behavior-in-blockchain-governance", - "sourceId": "ZKNSAL", - "title": "Governance Innovation: Analysis on Voter Behavior in Blockchain Governance", - "description": "As the first comprehensive examination of voter behavior in Web3, the following research explores two significant blockchain ecosystems, Curve Finance and Polkadot, using a novel quantitative methodology to decompose and highlight governance patterns.\r\n\r\nThe presented analysis shows, among other findings, a significant influence of market conditions on voter tendencies, diverse patterns relating to voting power accumulation, and a potential effect of financial incentives on voter participation.", - "track": "Coordination", - "type": "Lightning Talk", - "expertise": "Expert", - "audience": "Product", + "id": "grandine-on-windows", + "sourceId": "SUTU99", + "title": "Grandine on Windows", + "description": "In this talk, the speaker will discuss the problems encountered in porting Grandine, an Ethereum consensus client, to Windows systems and the solutions from the perspectives of language, engineering, and cross-platform. The speaker found that these problems are common to the current Ethereum infrastructure based on the Rust language. Finally, the speaker will summarize and look forward to the development of Ethereum clients based on the Rust language, especially from the point of cross-platform.", + "track": "[CLS] EPF Day", + "type": "Talk", + "expertise": "Beginner", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Permissionless", - "Coordination", - "Governance", - "Decentralization", - "Game Theory", - "Tokenomics", - "voting", - "analytics", - "Coordination", - "Decentralization", - "Game Theory", - "Governance", - "Permissionless", - "Tokenomics" + "Best Practices", + "Core Protocol", + "Languages" ], "keywords": [ - "Vote Escrow", - "Funding Allocation", - "Voter Analytics" + "Rust", + "Client", + "Engineering" ], - "duration": 535, + "duration": 759, "language": "en", - "sources_swarmHash": "8a6871d32bd8b80aedb0c02d15838b45bb8315fb3f851587e4f2362a09ca2690", - "sources_youtubeId": "wLw9Xvigdqs", + "sources_swarmHash": "3c373e0cffaa2ece62499d889302699c6414cc3b3651b30fc5031e2b0dd91bff", + "sources_youtubeId": "8GcL9zQdrrQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6734602e9dbb7a90e1626bf0", "eventId": "devcon-7", - "slot_start": 1731489000000, - "slot_end": 1731489600000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1hyhPIjZoL4CayjCbBks0Dvhf-v1OSKN0dKkek0vNdSE", - "resources_slides": null, + "slot_start": 1731481200000, + "slot_end": 1731482100000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1W4lSdrWzgMoJHrCdD1XG6PWUZq7B7QBp09iTEJj9lH0", + "resources_slides": "https://drive.google.com/file/d/1W2PWD0BKpvQqWTqMjMzb5gBqDM3R7hAM/view", "speakers": [ - "tanisha-katara" + "jin-mingjian" ] }, "vector": [ @@ -380912,11 +379857,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -381660,8 +380605,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -381670,6 +380613,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -381681,6 +380625,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -381696,7 +380641,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -381725,7 +380669,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -381754,7 +380697,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -381807,7 +380749,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -381920,7 +380861,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -382014,7 +380954,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -382220,10 +381159,11 @@ 0, 2, 0, + 2, + 0, 0, 0, 0, - 2, 0, 0, 0, @@ -382237,42 +381177,39 @@ }, { "session": { - "id": "grandine-on-windows", - "sourceId": "SUTU99", - "title": "Grandine on Windows", - "description": "In this talk, the speaker will discuss the problems encountered in porting Grandine, an Ethereum consensus client, to Windows systems and the solutions from the perspectives of language, engineering, and cross-platform. The speaker found that these problems are common to the current Ethereum infrastructure based on the Rust language. Finally, the speaker will summarize and look forward to the development of Ethereum clients based on the Rust language, especially from the point of cross-platform.", - "track": "[CLS] EPF Day", + "id": "grapheneos-a-brief-introduction-to-private-and-secure-android", + "sourceId": "QK3ZTL", + "title": "GrapheneOS: a brief introduction to private and secure Android", + "description": "Smartphones have become an essential part of our lives. The operating systems on smartphones act like a boundary layer between personal data and a plethora of untrusted code, but how easy is it to penetrate this boundary? We present GrapheneOS - the privacy and security-focused operating system developed as a non-profit open-source project. We will focus on some state-of-the-art GrapheneOS features such as low-level USB-C control, hardened memory allocator, and Sandboxed Google Play.", + "track": "Cypherpunk & Privacy", "type": "Talk", "expertise": "Beginner", "audience": "Engineering", "featured": false, - "doNotRecord": false, - "tags": [ - "Best Practices", - "Core Protocol", - "Languages" - ], + "doNotRecord": true, "keywords": [ - "Rust", - "Client", - "Engineering" + "Android" + ], + "tags": [ + "Privacy", + "Security", + "Mobile", + "android", + "Mobile", + "Privacy", + "Security" ], - "duration": 759, "language": "en", - "sources_swarmHash": "3c373e0cffaa2ece62499d889302699c6414cc3b3651b30fc5031e2b0dd91bff", - "sources_youtubeId": "8GcL9zQdrrQ", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6734602e9dbb7a90e1626bf0", - "eventId": "devcon-7", - "slot_start": 1731481200000, - "slot_end": 1731482100000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1W4lSdrWzgMoJHrCdD1XG6PWUZq7B7QBp09iTEJj9lH0", - "resources_slides": null, "speakers": [ - "jin-mingjian" - ] + "hulk", + "maade" + ], + "eventId": "devcon-7", + "slot_start": 1731486000000, + "slot_end": 1731487800000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/105h0erRlmvaHWuoC8pgHHPTJmdK7CiGkTOcyb1Vs4Nw", + "resources_slides": "https://drive.google.com/file/d/1TmNUJgwWoxHx2V1WXJIN5AsiCx7gNCHI/view" }, "vector": [ 0, @@ -382280,6 +381217,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -382290,7 +381228,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -382653,6 +381590,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -383022,7 +381960,7 @@ 0, 0, 0, - 0, + 6, 0, 0, 0, @@ -383057,7 +381995,22 @@ 0, 0, 0, - 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -383369,28 +382322,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -383611,38 +382543,42 @@ }, { "session": { - "id": "grapheneos-a-brief-introduction-to-private-and-secure-android", - "sourceId": "QK3ZTL", - "title": "GrapheneOS: a brief introduction to private and secure Android", - "description": "Smartphones have become an essential part of our lives. The operating systems on smartphones act like a boundary layer between personal data and a plethora of untrusted code, but how easy is it to penetrate this boundary? We present GrapheneOS - the privacy and security-focused operating system developed as a non-profit open-source project. We will focus on some state-of-the-art GrapheneOS features such as low-level USB-C control, hardened memory allocator, and Sandboxed Google Play.", - "track": "Cypherpunk & Privacy", + "id": "growing-the-biomes-gdp-using-digital-matter-and-smart-items", + "sourceId": "AZCYRS", + "title": "Growing The Biomes GDP Using Digital Matter & Smart Items", + "description": "Biomes is growing the virtual world with the largest GDP. As a fully onchain 3D voxel world, every single action in Biomes -- mining, building, crafting, even moving -- is a transaction on the Redstone L2. \r\n\r\nWe will share stories how we're working to grow the GDP of Biomes, what is working and what isn't. We will also share examples and ideas for onchain smart items in the Biomes world enabled by smart contracts.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", "type": "Talk", "expertise": "Beginner", "audience": "Engineering", "featured": false, - "doNotRecord": true, - "keywords": [ - "Android" - ], + "doNotRecord": false, "tags": [ - "Privacy", - "Security", - "Mobile", - "android", - "Mobile", - "Privacy", - "Security" + "Gaming", + "Autonomous World", + "Autonomous World", + "Gaming" ], + "keywords": [], + "duration": 1275, "language": "en", - "speakers": [ - "hulk", - "maade" - ], + "sources_swarmHash": "05306df31b1eabf2bcbcbda8022bb6d7f8dc597d11459ea4ba143bb63e4dca6a", + "sources_youtubeId": "Y5VCidGZgJo", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735ac844ccb22799e395271", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ac844ccb22799e395271.vtt", + "transcript_text": " Hello, I'm Dhrumil. I work on biomes. Biomes you probably played in the gaming corner over there or maybe you saw the demo earlier in the morning. You can also just go in biomes.aw and actually play it. So today we're going to talk about growing the biomes GDP, which is the primary thing we're trying to prove out with biomes, and two things we want to use to try and achieve those goals. Before we start, we're going to take just like a minute to think about why we're doing this. Most people who view Ethereum, they view it as an open financial system. If you view it this way, you care about a particular set of things. You care about stable coins, coins, RWAs, treasuries, wallets, and payments. But this is kind of the weird room. That's why it's in a classroom. And it's a bit of a different scene. It's not interested in moving our existing world on chain. Rather, it's interested in trying to uncover a totally new and weird future that's very different from the world that exists today. And if I was going to sum up the core question that's being asked in this room, okay, I think people probably will have a problem with me speaking on behalf of the room, but I will do it. If Bitcoin was able to birth digital coins that people treat like real valuable coins, can we basically 100x this and birth a digital planet that people treat like a real planet? And I think the short answer is that I think we can. And I think it actually might end up being much more simple and obvious than we might have thought before. There might be a line of sight to this. I think what we should just do is we should take ideas that already work, and we should just reshape them, right? We should reshape them from being built for finance and just make them work with virtual worlds, right? So we've got to take the idea of scarce digital coins and convert that to scarce digital matter. We've got to take tamper-proof IOUs and turn that into tamper-proof physics, and we've got to take smart contracts and turn that into smart items. And that's basically what this talk will be about. And we're going to actually make this concrete with examples of us trying to do this within biomes. And I hope that makes this much more legible. And the claim with those three pieces, the claim is that the digital matter and smart items can birth a crazy new asset class, the weirdest asset class the world has seen. And that, if it gets mindshared, will unlock autonomous worlds. It'll make them super obvious, super visceral, and you'll never have to answer the question of why you put this on chain. So to make things, let's look at biomes. On the surface, biomes is just Minecraft with everything on chain. You can, you've probably tried it out. The items, players, physics, all the actions, you can see it move and mine and build. It's all on chain. You know, right now there's like client optimization thing going on to make this work. Sometimes there'll be bugs. But, you know, I think you've got to press your T dot wherever you find them to make Cori go mainnet faster. And then this will not happen. And what's been happening with biomes ever since we have sort of been in our alpha phase the last month is it's coming alive, the world is coming alive, there's a lot more activity, we're sort of seeing it go. The transactions are up and to the right, you know, the players up and to the right, GDP up and to the right, everything bullish. There's people like Ben who are spending five hours building something, Deki spending eight hours plus days mining stuff. You know, so people are doing stuff. Redstone is like sometimes in the top five change in rope.wtf, which is really funny considering this like barely ever hits 15 concurrent players, right? And Ditto is a community member who made this grand claim that BAMS feels like Ethereum but more playful. And so what we're going to do today is we're going to go down the rabbit hole. Why are people so excited? On the surface, it just looks like on-chain Minecraft. Why are we even doing this, right? The first concept is digital matter, the thing we brought up in the beginning. Think of digital matter as a playful vision of blockchain-created scarcity. So while in Bitcoin, you have scarce coins, in biomes, you have scarce crops and wood and ore and land, and in Bitcoin, you have tamper-proof IOUs, in biomes, you have tamper-proof physics. And what this means is, if there is a tree, I can't just snap my fingers and mint more trees arbitrarily. The only way for there to be more trees is if I farm it according to the physics, right? So there's always like, wood is actually valuable. And if you do like the weird thought experiment, you might be like, if Bitcoin was an AWS, it could only be worth so much. But Bitcoin with scarcity and tamper-proof rules could be worth a trillion dollars. And if you go a bit crazy, why can't the same happen with digital wood, right? You have like Ben, who's maybe thinks one sakura wood will be worth $31,000. So like, this can happen, this can happen, just you just need to believe, right? And just like having lots of scarce Bitcoin grants you power in Bitcoin's world, controlling lots of the scarce resources and land lets you build businesses and be effective in the biomes world. The second thing we're going to look at is smart items. So we've got digital matter. Now we're going to look at smart items. Smart items present a playful vision of smart contracts. You can think of them as items with functionality in the physics that you can program with a smart contract. They make it really easy for players to build real economies for anything in the world. To visualize it, think of a smart contract as a chip. And you can insert a chip into an item, like a chest, to program it. So if I have my Uniswap chip and I put it into a chest, it turns into a Uniswap chest. And at this point, if people mine ores around the world, they can put ores in the chest and swap it for some tokens according to the AMM prices. Or they can give tokens to the chest and get ores according to AMM set prices. And you can sort of take this concept and basically apply it to all the kinds of different objects in the world. You can imagine I have a door and I turn it into a token gate, right? I need the token to actually open it. So what you just did here is you like gave the Holmeson treatment to Ethereum where if you told Homer Simpson what is a token gate, that's what he would think it is, right? And so now you're taking all of these smart contract track patterns and you're just making them much more relatable for a more normal audience. So you can turn a door into a token gate, a force field, a cart, a bed. And with all of these items, you can build a lot of cool stuff. And these are all real examples of things that are already being built within biomes. The Bazaar is the biomes version of Uniswap. It's like this shop with a lot of liquidity where you can trade all kinds of items for coins. The funny thing is as soon as we made the Bazaar, there's this existing other group that put up a really crappy shop next to it, relatively. And it's like the sushi swap of biomes, right? And it's trying to vampire attack the bazaar and take all the liquidity. You also have like Sakura Temple where there's this like big, big temple where you donate Sakura wood to get this membership NFT. They use all that wood to build the largest Sakura tree in the world and all future Sakura meme events that happen will probably gate access based on whether you donated the Sakura tree and got the NFT. The Castle Hotel Sakura Inn, they give you safe spaces to sleep and recharge your player and load without worrying about getting grieved. And in return, you just sort of have to pay rent. The Pyramid Arena and Parkour Challenge are going to host a bunch of games, and these games will use smart items to have betting and prize pools and everything. So in a way, you can basically think of these as like deploying dApps inside of a virtual world, right? And third, if you have Digital Matter and you have smart items, you have a new asset class. And I think it will be the craziest asset class that crypto has ever seen, maybe the world has ever seen. And I think when that asset class gets mindshare is when we no longer have a classroom and we have a building. So to sort of imagine a chest, right? And I can program a chest with a smart contract. Imagine I program it such that when a player gets gold ore from the world, which is scarce, and they put it in, it mints a coin. Right? So now you have a coin that is backed by the actual gold in the virtual world. And if I put the coin in, it burns the coin and lets me take out a gold ore. And every time this happens, the chest takes a small fee, and it uses that fee to incentivize players to charge the force field and defense of the chest to make sure it cannot be broken in. If this coin rises in price, let's say it's even worth like 10 million or something, it becomes extremely, extremely obvious why all the gold ores need to be provably scarce. Because if I just mint more gold ores with a thin air, I can just put them in the chest and mint more coins for myself. So it's really obvious why you don't want, you know, you need the gold ores to be scarce and you don't want the devs to just randomly mint more. It's also really obvious why the physics needs to be on chain. Because if I change the physics such that it becomes super easy to break the force field, I can now come in and steal all the gold ores from within the chest, and now that coin that was backed by the gold ores is no longer backed by anything, and it's just gonna plummet. So if you have this asset class take off, it's super, super obvious why the actual physics and research of the world are significant. To do one more example, we can kind of imagine how in the 2021 like crazy bull cycle, there was this idea of NFT land, right? So basically in the sandbox and Decentraland, you could own and trade land NFTs. And if you held one of these NFTs, you had total forever land ownership of that land. And this was really good for a particular type of user that you can have hyper-rational financial markets and speculation on those NFTs. Because if I buy a land NFT, I forever own that land. So I can speculate on it and trade these NFTs eight months down the line. It'd be totally okay. You do that in biomes, right? If you want to control land in biomes, you need to build defenses. You need a military, you need force field. The military can now decide that, okay, us military are protecting this land. We're going to tokenize this land and sell it to you. And if you hold this token, you're allowed to come and build within our premises knowing that everything will be secure. Let's say I buy this token, I come back in four months and that military is just dead, all the defenses are broken. Now this token doesn't mean anything. Even if I hold this token, I don't actually have access to any land, right? So now you have this entire asset class that is only worth anything as long as players actually maintain their defenses. And what you just did right here is you made a fundamental switch. You made a switch from the system caring about who owns what asset into the system only caring about physics, and assets can live on top of them, but the assets are not the first class citizen. To make it concrete, you went from metaverse land NFTs, where you own something until you sell it, to a tokenized force field, where you own something until someone breaks the force field. And when you have a new asset class, you birth an economy with new properties. This requires new types of participation and appeals to a new set of users, which is really cool, which is why even though this has voxels and stuff, it's not the sandbox, right? It's a very different system. And this brings us back to the original claim that we started off with. No matter, and smart items can birth a crazy new asset class, tokenize digital commodities, tokenize force fields that will unlock autonomous worlds because they'll make the autonomous worlds really obvious. To visualize it one more time, you think, let's just expand that statement. Unlike Bitcoin, scarce digital coins, and tamper-proof IOUs, autonomous worlds will have scarce digital matter and tamper-proof physics. Inside, the players will use smart items, which are tangible, physical forms of smart contracts, to grow extremely large economies. And what we have done here is we have kept it simple. We haven't sort of, you know, went too outside and too wild. We have taken ideas that already work, and we have just kind of reapplied them. We have changed these ideas such that instead of being built for finance, they're now built for these virtual worlds. Scarce digital coins become scarce matter. Temper-proof IOUs becomes temper-proof physics. Smart contracts become smart items. I think if you take anything away from this talk, I think it would just be this slide. Okay, now zooming out, only because I have a few more minutes so I can just be this slide. And okay, now zooming out, only because I have like a few more minutes so I can just say some bullshit. We're seeing how Ethereum can eventually birth worlds we don't inhabit physically, but we still treat as tremendously real and valuable, right? So imagine biomes as like a $10 billion GDP, right? Or maybe the autonomous world you build has a $10 billion GDP. And autonomous agents start living inside. Then you build has a 10 billion dollar GDP. And autonomous agents start living inside. Then you'll have a really funny thing. So if they live inside, they're obviously going to value the scarce digital matter in the world they live inside, much more than the coins from our world. They don't actually live in our world. If I have an autonomous agent in biomes and I give it a bunch of neptunium ore, it can take that and build weapons with it and use those weapons. So it's good, it's clear why it has value. If I give it coins, if I give it the US dollar, it can't buy a burger, right? What's it going to do with a burger? So it doesn't actually give a shit about our coins, it will only care about all the scarce matter. And then you have a really weird future, right? AGIs will only accept secure for payment. They don't believe in coins. In 2027, biomes is our only communication channel to ASI agents. And when we want counsel, we travel into biomes and journey to their lands and homes and bring the agents glowstone. And in return, they give you some cryptographically encrypted Riemann hypothesis solution in a book. So this is how we solve the problems of the future. We just appeal to the agents in Biomes, and they give us all their solutions. And Skyler's really smart. You need to trust in Skyler. He built this, he runs this event thing, which means everything he says is definitely true. And what he said is that biomes is unstoppable Minecraft today but that doesn't mean you can take it you know for fun you have to take it very seriously because tomorrow he said it'll be an unstoppable lifelike simulation that's gonna have unalterable digital physics run by AI who believe they're sentience as we believe we are which which is crazy. So take it seriously. Stock up on all the Sakura you can. Go to biomes.aw. You'll see all this unfold. It'll probably go through many cycles over the years, but hopefully something really crazy will come out of it at some point. And thank you. All right, question times. As before, scan the QR code, log in with Zupass, start asking your questions. You can also upvote other questions if you want them to come to the top of the screen. Please do that so we don't have to scroll because it was awkward last time. So we'll start with the first question. Can smart items interact with the world, like place, remove blocks, and define regions? Yeah, so smart items are items with functionality in the world, like place to remove blocks and define regions? Yeah, so smart items are items with functionality in the world. For example, a chest is an item that you can store stuff in and take stuff out of. A door is an item that you open or close to enter something. So smart items are just regular items with functionality in the world. And all you do is, by inserting a smart contract into the item, you can hook into that functionality and further augment it with your own economics. So I turn a door that opens and closes into a door that is now token gating before deciding if it wants to open or close. Thank you. What are the considerations when designing or deploying physics in new primitives? It seems that playing God is high stakes. Yeah, I feel like something we've been noticing is as people start investing their time into this stuff and they start building stuff with the world, especially if they use smart items, so now they're actually building an economy in the world, you can't anymore just arbitrarily push physics updates. If we start randomly one day minting a lot more sakura trees, it kind of screws up what a lot of people are building. So it does make it more high stakes. You need to really get some sort of rough consensus within your community so you don't piss people off who are trying to build stuff inside. It is higher stakes, yeah. Okay, I'll take the digital physics one. How can we design digital physics to attract agents to live in our games? Coins may be useful for them to pay for compute and storage. Second part is probably a statement, not a question. Yeah, yeah. Ultimately, I'm going to say at the end of the day, right now biomes is very much Minecraft. It's not, we're not creating some super AGI thing. But okay, how do we design this, like, this world? How do we evolve biomes to attract agents? I think basically if you get the GDP of the world to be high enough, like, if you reach a world where there's a Neptunium or people value it at $100 because they can build a weapon with it and be powerful in the world, agents might just want to live in these worlds from a purely rational reason such that all the GDP is there and all the activity is there. Okay, I think there's one down there, which was, can you play for free and earn for free? Can you play for free? At some point, I'm sure you'll be able to play for semi-free when you have the Passkey onboarding stuff, so you won't need to worry too much about gas. What you can do in the world is, yeah, I can come in, okay, imagine this. Imagine you have different parts of the world, have different types of resources that they're abundant in. If I build a train system to move resources from one part of the world to another part, to create all these arbitrage opportunities, if I build this train system, I just need to play the game and build a train, right? And then I protect the train system with a force field to make sure it can't be broken down by others. And I make this train system start charging tickets for people to actually take it. I can start selling these tickets. People can buy them. And now I'm making money from the game. But it's not play to earn in the sense of I'm earning these points. It's kind of if you build a business inside and the world sort of has a GDP, then the normal actions you take inside will be economically significant. I'm just going to log in on my phone to see the questions myself. But for now, I saw someone's question, which roughly goes like, is it possible for me to put a door down as a smart item that grieves someone by using a ton of gas like in a while loop or something like that? Yeah, I think one of the challenges with the smart item stuff is going to be what if you insert a track into one of these smart items that is just a bad actor, and then if players interact with it, it's just doing some crazy stuff. This is possible, but I think this is also possible on the EVM. It's kind of the Wild West. And we're going to probably let it up to clients and communities to maybe white label which type of small item chips they audit and verify and know to be trustworthy, and they'll only use those items. So I don't think you need to build security against this at the protocol level, and you can sort of deal with it higher up. Another quick one is, are biomes contracts going to be audited? Well, right now, biomes is like, it's in a heavy dev update cycle thing, so there's like the physics is changing and getting updated. At some point, you probably want it to be audited, especially if the GDP is going to be high. So I could see this happening down the line. Another one is, how do you bridge biomes to solve real-world scientific problems? Yeah, so I think... I'm not totally sure you want to do that. I think if you want to solve real-world problems with simulation technology, I think you can just run simulation technology on a normal computer and simulate whatever you want there and use that to solve your problems in the world. I don't think this will solve that for you. I think this is more about creating this digital simulation that for some reason people take super seriously and they just want all the wood inside. Okay, so maybe any IRL questions? I'm running out of the list. If someone wants to raise their hand, we can bring them a mic. I'll give 10 seconds for people to make their mind. Okay, so thank you. Thank you.", "eventId": "devcon-7", - "slot_start": 1731486000000, - "slot_end": 1731487800000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/105h0erRlmvaHWuoC8pgHHPTJmdK7CiGkTOcyb1Vs4Nw" + "slot_start": 1731567000000, + "slot_end": 1731568500000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/13_hK3uoJSZ6tt20JSY81twd2hzFWCOGFjUGmSMP9_R4", + "resources_slides": "https://drive.google.com/file/d/1MTJhaRSY0Mh5Cg4zHZbQB-XmGvS5c_gc/view", + "speakers": [ + "dhrumil-shah", + "dhvani-patel" + ] }, "vector": [ 0, @@ -383650,10 +382586,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -383661,6 +382593,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -383725,6 +382658,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -384023,8 +382958,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -384396,7 +383329,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -384419,7 +383351,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -384508,11 +383439,12 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -384759,7 +383691,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -384981,41 +383912,39 @@ }, { "session": { - "id": "growing-the-biomes-gdp-using-digital-matter-and-smart-items", - "sourceId": "AZCYRS", - "title": "Growing The Biomes GDP Using Digital Matter & Smart Items", - "description": "Biomes is growing the virtual world with the largest GDP. As a fully onchain 3D voxel world, every single action in Biomes -- mining, building, crafting, even moving -- is a transaction on the Redstone L2. \r\n\r\nWe will share stories how we're working to grow the GDP of Biomes, what is working and what isn't. We will also share examples and ideas for onchain smart items in the Biomes world enabled by smart contracts.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", + "id": "hacking-thai-beats-cities-and-dances", + "sourceId": "NM8B9E", + "title": "Hacking Thai Beats, Cities & Dances", + "description": "Can we inspire Thai builders to be more creative through hacking our own culture? Stories of an algorithmic Thai music festival in Thailand's oldest museum, an open-source hackathon to improve the city of Bangkok, an interactive art performance that blends algorithms with traditional Thai dance; and how you can build better builder communities by inter-disciplinary thinking.", + "track": "Real World Ethereum", "type": "Talk", "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Gaming", - "Autonomous World", - "Autonomous World", - "Gaming" + "Art", + "FOSS", + "Live Coding" ], "keywords": [], - "duration": 1275, + "duration": 522, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "c127a713d0d245b536157b346a5bb540daeb2d2951f255a26af1b86af9a7f766", + "sources_youtubeId": "WrWIehDVA8E", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735ac844ccb22799e395271", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ac844ccb22799e395271.vtt", - "transcript_text": " Hello, I'm Dhrumil. I work on biomes. Biomes you probably played in the gaming corner over there or maybe you saw the demo earlier in the morning. You can also just go in biomes.aw and actually play it. So today we're going to talk about growing the biomes GDP, which is the primary thing we're trying to prove out with biomes, and two things we want to use to try and achieve those goals. Before we start, we're going to take just like a minute to think about why we're doing this. Most people who view Ethereum, they view it as an open financial system. If you view it this way, you care about a particular set of things. You care about stable coins, coins, RWAs, treasuries, wallets, and payments. But this is kind of the weird room. That's why it's in a classroom. And it's a bit of a different scene. It's not interested in moving our existing world on chain. Rather, it's interested in trying to uncover a totally new and weird future that's very different from the world that exists today. And if I was going to sum up the core question that's being asked in this room, okay, I think people probably will have a problem with me speaking on behalf of the room, but I will do it. If Bitcoin was able to birth digital coins that people treat like real valuable coins, can we basically 100x this and birth a digital planet that people treat like a real planet? And I think the short answer is that I think we can. And I think it actually might end up being much more simple and obvious than we might have thought before. There might be a line of sight to this. I think what we should just do is we should take ideas that already work, and we should just reshape them, right? We should reshape them from being built for finance and just make them work with virtual worlds, right? So we've got to take the idea of scarce digital coins and convert that to scarce digital matter. We've got to take tamper-proof IOUs and turn that into tamper-proof physics, and we've got to take smart contracts and turn that into smart items. And that's basically what this talk will be about. And we're going to actually make this concrete with examples of us trying to do this within biomes. And I hope that makes this much more legible. And the claim with those three pieces, the claim is that the digital matter and smart items can birth a crazy new asset class, the weirdest asset class the world has seen. And that, if it gets mindshared, will unlock autonomous worlds. It'll make them super obvious, super visceral, and you'll never have to answer the question of why you put this on chain. So to make things, let's look at biomes. On the surface, biomes is just Minecraft with everything on chain. You can, you've probably tried it out. The items, players, physics, all the actions, you can see it move and mine and build. It's all on chain. You know, right now there's like client optimization thing going on to make this work. Sometimes there'll be bugs. But, you know, I think you've got to press your T dot wherever you find them to make Cori go mainnet faster. And then this will not happen. And what's been happening with biomes ever since we have sort of been in our alpha phase the last month is it's coming alive, the world is coming alive, there's a lot more activity, we're sort of seeing it go. The transactions are up and to the right, you know, the players up and to the right, GDP up and to the right, everything bullish. There's people like Ben who are spending five hours building something, Deki spending eight hours plus days mining stuff. You know, so people are doing stuff. Redstone is like sometimes in the top five change in rope.wtf, which is really funny considering this like barely ever hits 15 concurrent players, right? And Ditto is a community member who made this grand claim that BAMS feels like Ethereum but more playful. And so what we're going to do today is we're going to go down the rabbit hole. Why are people so excited? On the surface, it just looks like on-chain Minecraft. Why are we even doing this, right? The first concept is digital matter, the thing we brought up in the beginning. Think of digital matter as a playful vision of blockchain-created scarcity. So while in Bitcoin, you have scarce coins, in biomes, you have scarce crops and wood and ore and land, and in Bitcoin, you have tamper-proof IOUs, in biomes, you have tamper-proof physics. And what this means is, if there is a tree, I can't just snap my fingers and mint more trees arbitrarily. The only way for there to be more trees is if I farm it according to the physics, right? So there's always like, wood is actually valuable. And if you do like the weird thought experiment, you might be like, if Bitcoin was an AWS, it could only be worth so much. But Bitcoin with scarcity and tamper-proof rules could be worth a trillion dollars. And if you go a bit crazy, why can't the same happen with digital wood, right? You have like Ben, who's maybe thinks one sakura wood will be worth $31,000. So like, this can happen, this can happen, just you just need to believe, right? And just like having lots of scarce Bitcoin grants you power in Bitcoin's world, controlling lots of the scarce resources and land lets you build businesses and be effective in the biomes world. The second thing we're going to look at is smart items. So we've got digital matter. Now we're going to look at smart items. Smart items present a playful vision of smart contracts. You can think of them as items with functionality in the physics that you can program with a smart contract. They make it really easy for players to build real economies for anything in the world. To visualize it, think of a smart contract as a chip. And you can insert a chip into an item, like a chest, to program it. So if I have my Uniswap chip and I put it into a chest, it turns into a Uniswap chest. And at this point, if people mine ores around the world, they can put ores in the chest and swap it for some tokens according to the AMM prices. Or they can give tokens to the chest and get ores according to AMM set prices. And you can sort of take this concept and basically apply it to all the kinds of different objects in the world. You can imagine I have a door and I turn it into a token gate, right? I need the token to actually open it. So what you just did here is you like gave the Holmeson treatment to Ethereum where if you told Homer Simpson what is a token gate, that's what he would think it is, right? And so now you're taking all of these smart contract track patterns and you're just making them much more relatable for a more normal audience. So you can turn a door into a token gate, a force field, a cart, a bed. And with all of these items, you can build a lot of cool stuff. And these are all real examples of things that are already being built within biomes. The Bazaar is the biomes version of Uniswap. It's like this shop with a lot of liquidity where you can trade all kinds of items for coins. The funny thing is as soon as we made the Bazaar, there's this existing other group that put up a really crappy shop next to it, relatively. And it's like the sushi swap of biomes, right? And it's trying to vampire attack the bazaar and take all the liquidity. You also have like Sakura Temple where there's this like big, big temple where you donate Sakura wood to get this membership NFT. They use all that wood to build the largest Sakura tree in the world and all future Sakura meme events that happen will probably gate access based on whether you donated the Sakura tree and got the NFT. The Castle Hotel Sakura Inn, they give you safe spaces to sleep and recharge your player and load without worrying about getting grieved. And in return, you just sort of have to pay rent. The Pyramid Arena and Parkour Challenge are going to host a bunch of games, and these games will use smart items to have betting and prize pools and everything. So in a way, you can basically think of these as like deploying dApps inside of a virtual world, right? And third, if you have Digital Matter and you have smart items, you have a new asset class. And I think it will be the craziest asset class that crypto has ever seen, maybe the world has ever seen. And I think when that asset class gets mindshare is when we no longer have a classroom and we have a building. So to sort of imagine a chest, right? And I can program a chest with a smart contract. Imagine I program it such that when a player gets gold ore from the world, which is scarce, and they put it in, it mints a coin. Right? So now you have a coin that is backed by the actual gold in the virtual world. And if I put the coin in, it burns the coin and lets me take out a gold ore. And every time this happens, the chest takes a small fee, and it uses that fee to incentivize players to charge the force field and defense of the chest to make sure it cannot be broken in. If this coin rises in price, let's say it's even worth like 10 million or something, it becomes extremely, extremely obvious why all the gold ores need to be provably scarce. Because if I just mint more gold ores with a thin air, I can just put them in the chest and mint more coins for myself. So it's really obvious why you don't want, you know, you need the gold ores to be scarce and you don't want the devs to just randomly mint more. It's also really obvious why the physics needs to be on chain. Because if I change the physics such that it becomes super easy to break the force field, I can now come in and steal all the gold ores from within the chest, and now that coin that was backed by the gold ores is no longer backed by anything, and it's just gonna plummet. So if you have this asset class take off, it's super, super obvious why the actual physics and research of the world are significant. To do one more example, we can kind of imagine how in the 2021 like crazy bull cycle, there was this idea of NFT land, right? So basically in the sandbox and Decentraland, you could own and trade land NFTs. And if you held one of these NFTs, you had total forever land ownership of that land. And this was really good for a particular type of user that you can have hyper-rational financial markets and speculation on those NFTs. Because if I buy a land NFT, I forever own that land. So I can speculate on it and trade these NFTs eight months down the line. It'd be totally okay. You do that in biomes, right? If you want to control land in biomes, you need to build defenses. You need a military, you need force field. The military can now decide that, okay, us military are protecting this land. We're going to tokenize this land and sell it to you. And if you hold this token, you're allowed to come and build within our premises knowing that everything will be secure. Let's say I buy this token, I come back in four months and that military is just dead, all the defenses are broken. Now this token doesn't mean anything. Even if I hold this token, I don't actually have access to any land, right? So now you have this entire asset class that is only worth anything as long as players actually maintain their defenses. And what you just did right here is you made a fundamental switch. You made a switch from the system caring about who owns what asset into the system only caring about physics, and assets can live on top of them, but the assets are not the first class citizen. To make it concrete, you went from metaverse land NFTs, where you own something until you sell it, to a tokenized force field, where you own something until someone breaks the force field. And when you have a new asset class, you birth an economy with new properties. This requires new types of participation and appeals to a new set of users, which is really cool, which is why even though this has voxels and stuff, it's not the sandbox, right? It's a very different system. And this brings us back to the original claim that we started off with. No matter, and smart items can birth a crazy new asset class, tokenize digital commodities, tokenize force fields that will unlock autonomous worlds because they'll make the autonomous worlds really obvious. To visualize it one more time, you think, let's just expand that statement. Unlike Bitcoin, scarce digital coins, and tamper-proof IOUs, autonomous worlds will have scarce digital matter and tamper-proof physics. Inside, the players will use smart items, which are tangible, physical forms of smart contracts, to grow extremely large economies. And what we have done here is we have kept it simple. We haven't sort of, you know, went too outside and too wild. We have taken ideas that already work, and we have just kind of reapplied them. We have changed these ideas such that instead of being built for finance, they're now built for these virtual worlds. Scarce digital coins become scarce matter. Temper-proof IOUs becomes temper-proof physics. Smart contracts become smart items. I think if you take anything away from this talk, I think it would just be this slide. Okay, now zooming out, only because I have a few more minutes so I can just be this slide. And okay, now zooming out, only because I have like a few more minutes so I can just say some bullshit. We're seeing how Ethereum can eventually birth worlds we don't inhabit physically, but we still treat as tremendously real and valuable, right? So imagine biomes as like a $10 billion GDP, right? Or maybe the autonomous world you build has a $10 billion GDP. And autonomous agents start living inside. Then you build has a 10 billion dollar GDP. And autonomous agents start living inside. Then you'll have a really funny thing. So if they live inside, they're obviously going to value the scarce digital matter in the world they live inside, much more than the coins from our world. They don't actually live in our world. If I have an autonomous agent in biomes and I give it a bunch of neptunium ore, it can take that and build weapons with it and use those weapons. So it's good, it's clear why it has value. If I give it coins, if I give it the US dollar, it can't buy a burger, right? What's it going to do with a burger? So it doesn't actually give a shit about our coins, it will only care about all the scarce matter. And then you have a really weird future, right? AGIs will only accept secure for payment. They don't believe in coins. In 2027, biomes is our only communication channel to ASI agents. And when we want counsel, we travel into biomes and journey to their lands and homes and bring the agents glowstone. And in return, they give you some cryptographically encrypted Riemann hypothesis solution in a book. So this is how we solve the problems of the future. We just appeal to the agents in Biomes, and they give us all their solutions. And Skyler's really smart. You need to trust in Skyler. He built this, he runs this event thing, which means everything he says is definitely true. And what he said is that biomes is unstoppable Minecraft today but that doesn't mean you can take it you know for fun you have to take it very seriously because tomorrow he said it'll be an unstoppable lifelike simulation that's gonna have unalterable digital physics run by AI who believe they're sentience as we believe we are which which is crazy. So take it seriously. Stock up on all the Sakura you can. Go to biomes.aw. You'll see all this unfold. It'll probably go through many cycles over the years, but hopefully something really crazy will come out of it at some point. And thank you. All right, question times. As before, scan the QR code, log in with Zupass, start asking your questions. You can also upvote other questions if you want them to come to the top of the screen. Please do that so we don't have to scroll because it was awkward last time. So we'll start with the first question. Can smart items interact with the world, like place, remove blocks, and define regions? Yeah, so smart items are items with functionality in the world, like place to remove blocks and define regions? Yeah, so smart items are items with functionality in the world. For example, a chest is an item that you can store stuff in and take stuff out of. A door is an item that you open or close to enter something. So smart items are just regular items with functionality in the world. And all you do is, by inserting a smart contract into the item, you can hook into that functionality and further augment it with your own economics. So I turn a door that opens and closes into a door that is now token gating before deciding if it wants to open or close. Thank you. What are the considerations when designing or deploying physics in new primitives? It seems that playing God is high stakes. Yeah, I feel like something we've been noticing is as people start investing their time into this stuff and they start building stuff with the world, especially if they use smart items, so now they're actually building an economy in the world, you can't anymore just arbitrarily push physics updates. If we start randomly one day minting a lot more sakura trees, it kind of screws up what a lot of people are building. So it does make it more high stakes. You need to really get some sort of rough consensus within your community so you don't piss people off who are trying to build stuff inside. It is higher stakes, yeah. Okay, I'll take the digital physics one. How can we design digital physics to attract agents to live in our games? Coins may be useful for them to pay for compute and storage. Second part is probably a statement, not a question. Yeah, yeah. Ultimately, I'm going to say at the end of the day, right now biomes is very much Minecraft. It's not, we're not creating some super AGI thing. But okay, how do we design this, like, this world? How do we evolve biomes to attract agents? I think basically if you get the GDP of the world to be high enough, like, if you reach a world where there's a Neptunium or people value it at $100 because they can build a weapon with it and be powerful in the world, agents might just want to live in these worlds from a purely rational reason such that all the GDP is there and all the activity is there. Okay, I think there's one down there, which was, can you play for free and earn for free? Can you play for free? At some point, I'm sure you'll be able to play for semi-free when you have the Passkey onboarding stuff, so you won't need to worry too much about gas. What you can do in the world is, yeah, I can come in, okay, imagine this. Imagine you have different parts of the world, have different types of resources that they're abundant in. If I build a train system to move resources from one part of the world to another part, to create all these arbitrage opportunities, if I build this train system, I just need to play the game and build a train, right? And then I protect the train system with a force field to make sure it can't be broken down by others. And I make this train system start charging tickets for people to actually take it. I can start selling these tickets. People can buy them. And now I'm making money from the game. But it's not play to earn in the sense of I'm earning these points. It's kind of if you build a business inside and the world sort of has a GDP, then the normal actions you take inside will be economically significant. I'm just going to log in on my phone to see the questions myself. But for now, I saw someone's question, which roughly goes like, is it possible for me to put a door down as a smart item that grieves someone by using a ton of gas like in a while loop or something like that? Yeah, I think one of the challenges with the smart item stuff is going to be what if you insert a track into one of these smart items that is just a bad actor, and then if players interact with it, it's just doing some crazy stuff. This is possible, but I think this is also possible on the EVM. It's kind of the Wild West. And we're going to probably let it up to clients and communities to maybe white label which type of small item chips they audit and verify and know to be trustworthy, and they'll only use those items. So I don't think you need to build security against this at the protocol level, and you can sort of deal with it higher up. Another quick one is, are biomes contracts going to be audited? Well, right now, biomes is like, it's in a heavy dev update cycle thing, so there's like the physics is changing and getting updated. At some point, you probably want it to be audited, especially if the GDP is going to be high. So I could see this happening down the line. Another one is, how do you bridge biomes to solve real-world scientific problems? Yeah, so I think... I'm not totally sure you want to do that. I think if you want to solve real-world problems with simulation technology, I think you can just run simulation technology on a normal computer and simulate whatever you want there and use that to solve your problems in the world. I don't think this will solve that for you. I think this is more about creating this digital simulation that for some reason people take super seriously and they just want all the wood inside. Okay, so maybe any IRL questions? I'm running out of the list. If someone wants to raise their hand, we can bring them a mic. I'll give 10 seconds for people to make their mind. Okay, so thank you. Thank you.", + "sources_streamethId": "67356bfe9dbb7a90e1565e6b", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67356bfe9dbb7a90e1565e6b.vtt", + "transcript_text": " Hello, my name is Phum and I'm a software engineer at Metabase and I work on open source analytics. But in my free time, I'm actually the co-founder of an open source initiative in Thailand called Creator's Garden, where we basically work on open source projects and create events to explore things like synthetic biology and topics like philosophy. But we try to explore those mainly with coding. So while working on these projects in Creator's Garden, I got asked two questions mainly. First is basically how do we get Thai people or specifically Thai builders to be creative. And second thing is how do you bring builders to your spaces? For example, if you have basically an Ethereum space or Web3 space, how do you get people hooked or interested? We have been hosting these events in Thailand for about eight years now, so since I was in middle school, actually. And I'm going to talk about two of these events that kind of gives me this idea on how to engage with builders. So first is on hacking Thai beats. So this is a project I work with Thailand's oldest museum. So I was working with this guy, Khun Hanoi, and he might look very traditional to you, like a Thai musician, but don't let that fool you. He's actually an AI researcher at TikTok and at Google. So he was working on a program that would be able to synthesize music out of AI. So I was thinking, what if we would take Thai instruments, for example, the pinai and granade, and we would be Thai instruments, for example, the PNI and RANAT, and we would be able to use computer and coding to basically synthesize sounds out of nothing? So that's where the idea comes in. What if you can use algorithmic audio synthesis to basically create an event where musicians can come together? But instead of instruments, they do coding. And instead of Western music, they do coding. And instead of Western music, they do Thai music. So in this event, we actually did a big projection mapping onto the wall of Thailand's oldest museum. And people were using things like AI synthesis tools and algorithmic coding tools where they can basically build sounds out of nothing. And I think this is one of the most important lessons we know over the years, which is it's very important not to only have a strong domain or a strong theme or a strong tool, but to combine that. For example, a lot of hacker house, they only focus on the technology. For example, they focus on Ethereum, but they didn't really let people know what they can build with it. In contrast, a lot of hacker houses, they focus on themes. So, for example, finance, but they don't really see what other interesting tools you can interact with that. So, with the juxtaposition, it kind of helps a lot. So, Seymour Papert kind of puts it best when he says, people really learn best when you build things that you're interested in. He was talking about children, but this really applies to anyone. So that kind of brings me to my second project, Hacking Thai Cities. So everyone knows, like, Bangkok is a great city, but there's one catch. Well, you know, there's, like, the air we have right here is probably not the best, and maybe the traffic is not the lightest. And the mayor of Bangkok didn't actually just stop and do nothing. He actually hosted a hackathon, actually was done about three years in a row now, and I kind of like that idea. But the problem is, in a lot of these hackathons, the idea is to stop and really goes nowhere. So I was thinking what happens if we use the power of open source? I know the space for Ethereum really got this far because it's open source, so why not the whole city? Why not make an open source city? So we started with the idea of basically letting people come together, but they don't have to formally form a team, but rather an open source project where anyone can join anytime. If you're interested in someone, well, just open a PR. And we started by basically bringing the domain experts in Bangkok, people who have the data of the whole city, like what are the problems in the city, what are the areas that have flooded, and the distribution of those people, as well as people who have accessibility problems, like people who are in a wheelchair, for example. And I think the greatest thing about open source is it lets everyone contribute. So it follows the principle of low floor, high ceiling, and white walls. Low floor means you can contribute anytime. If you know how to do translations, you can do it. But there's also very high ceiling. So if you're an AI researcher, you can kickstart a new project. And it has white walls. So no matter if you're a scientist, if you're a researcher, well, there's a place for you to contribute. And this brings very interesting projects in the hackathon we did. So for example, there were a lot of motorcycles that were parking near basically the sidewalks, and you cannot basically get over that, and it was really annoying. So someone developed a software that was computer vision to check that and automatically report that and get a bounty. So in Bangkok, we have that bounty system where you can make money by just catching the people who are on the sidewalks, the motorcycles, and you can automate that with computer vision or automating the tree detection of every tree in Bangkok. So we did that. And the last project I want to share with you is hacking Thai dance. In 1923, there was this photo, which is the first ever photo taken of the Thai dance. And the interesting thing is it's 100 years later, but nothing has changed. We're still teaching dance the same way in school. And to be honest, a lot of kids like my age, they don't really get interested in dance anymore. It's unlike K-pop where you're able to hack. So we were thinking, can we use AI to basically change that? So we think of the project where you can generate new dance out of old dance moves, like for example, the 59 principal dances. So we were turning this into an art performance called Cyber Subin, which lets human dancers kind of dance with AI. So we built a software that would try to learn the characteristics of Thai dance. The way we did this is we started basically a group of builders, but also a group of Thai choreographers. And we were going through the old documents of how people would decompose the dance that makes us learn that there are six principles in a Thai dance for example the energy in your body and the circle and curve that surrounds you and from the knowledge we gained from the choreographer we were able to build a software that about allows us to basically generate new dance out of old ones by tweaking the parameters. So we think of things as basically a set of animation transformations where you'd have the original dance, for example, the dance of Te Panom, which looks like this. And then you generally run that through a series of programs that would generate very new dance that you can command and we turn this into an interface where dancers can use to basically generate dances and we put this all together in a dance performance so we just show this up in bali uh i think that was indonesia and also in taipei taiwan so dance performance, we got a lot of people like upstage to kind of join in the fun and play around with Thai dance. So generally, I think this brings me like all the three projects to a final conclusion, which is if your culture or your domain is very hackable, for example, if you make finance like very hackable or you make Thai culture, there are some elements where people can go in and can use their tools or use their technologies to hack. Then it makes it really fun. So compare, let's say, a Thai student who were learning the Thai dance in school and they were really bored because there's nothing new to make. Versus like kids who know how to use AI and they're able to construct new dances and have fun with it. Yeah, so I would like to say thank you to my team in Cyber Subin for the dance production. And thank you to you too for listening. This is it. Thank you.", "eventId": "devcon-7", - "slot_start": 1731567000000, - "slot_end": 1731568500000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/13_hK3uoJSZ6tt20JSY81twd2hzFWCOGFjUGmSMP9_R4", - "resources_slides": null, + "slot_start": 1731552900000, + "slot_end": 1731554100000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/16NvToD2NQxicsfxWktPRLuxSt7qrL73mCEcujVhk_i0", + "resources_slides": "https://drive.google.com/file/d/1IQQP48SNOBU7GhDHArFHe0HDDud7GMcM/view", "speakers": [ - "dhrumil-shah", - "dhvani-patel" + "phoomparin-mano" ] }, "vector": [ @@ -385025,16 +383954,13 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -385096,9 +384022,6 @@ 0, 0, 0, - 6, - 6, - 0, 0, 0, 0, @@ -385404,6 +384327,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -385881,14 +384805,13 @@ 0, 0, 2, - 2, - 0, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -386108,6 +385031,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -386355,39 +385279,45 @@ }, { "session": { - "id": "hacking-thai-beats-cities-and-dances", - "sourceId": "NM8B9E", - "title": "Hacking Thai Beats, Cities & Dances", - "description": "Can we inspire Thai builders to be more creative through hacking our own culture? Stories of an algorithmic Thai music festival in Thailand's oldest museum, an open-source hackathon to improve the city of Bangkok, an interactive art performance that blends algorithms with traditional Thai dance; and how you can build better builder communities by inter-disciplinary thinking.", - "track": "Real World Ethereum", + "id": "hallucinated-servers-another-prog-crypto-chip", + "sourceId": "DYJ88A", + "title": "hallucinated servers another prog crypto chip", + "description": "An introduction to programmable cryptography, culminating in the dream of a \"hallucinated server\".", + "track": "Applied Cryptography", "type": "Talk", - "expertise": "Beginner", + "expertise": "Expert", "audience": "Engineering", "featured": false, - "doNotRecord": false, + "doNotRecord": true, "tags": [ - "Art", - "FOSS", - "Live Coding" + "Cryptography", + "MPC", + "fhe", + "Cryptography", + "MPC" ], - "keywords": [], - "duration": 522, + "keywords": [ + "Cyprography", + "fhe", + "mpc" + ], + "duration": 1396, "language": "en", "sources_swarmHash": "", "sources_youtubeId": "", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67356bfe9dbb7a90e1565e6b", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67356bfe9dbb7a90e1565e6b.vtt", - "transcript_text": " Hello, my name is Phum and I'm a software engineer at Metabase and I work on open source analytics. But in my free time, I'm actually the co-founder of an open source initiative in Thailand called Creator's Garden, where we basically work on open source projects and create events to explore things like synthetic biology and topics like philosophy. But we try to explore those mainly with coding. So while working on these projects in Creator's Garden, I got asked two questions mainly. First is basically how do we get Thai people or specifically Thai builders to be creative. And second thing is how do you bring builders to your spaces? For example, if you have basically an Ethereum space or Web3 space, how do you get people hooked or interested? We have been hosting these events in Thailand for about eight years now, so since I was in middle school, actually. And I'm going to talk about two of these events that kind of gives me this idea on how to engage with builders. So first is on hacking Thai beats. So this is a project I work with Thailand's oldest museum. So I was working with this guy, Khun Hanoi, and he might look very traditional to you, like a Thai musician, but don't let that fool you. He's actually an AI researcher at TikTok and at Google. So he was working on a program that would be able to synthesize music out of AI. So I was thinking, what if we would take Thai instruments, for example, the pinai and granade, and we would be Thai instruments, for example, the PNI and RANAT, and we would be able to use computer and coding to basically synthesize sounds out of nothing? So that's where the idea comes in. What if you can use algorithmic audio synthesis to basically create an event where musicians can come together? But instead of instruments, they do coding. And instead of Western music, they do coding. And instead of Western music, they do Thai music. So in this event, we actually did a big projection mapping onto the wall of Thailand's oldest museum. And people were using things like AI synthesis tools and algorithmic coding tools where they can basically build sounds out of nothing. And I think this is one of the most important lessons we know over the years, which is it's very important not to only have a strong domain or a strong theme or a strong tool, but to combine that. For example, a lot of hacker house, they only focus on the technology. For example, they focus on Ethereum, but they didn't really let people know what they can build with it. In contrast, a lot of hacker houses, they focus on themes. So, for example, finance, but they don't really see what other interesting tools you can interact with that. So, with the juxtaposition, it kind of helps a lot. So, Seymour Papert kind of puts it best when he says, people really learn best when you build things that you're interested in. He was talking about children, but this really applies to anyone. So that kind of brings me to my second project, Hacking Thai Cities. So everyone knows, like, Bangkok is a great city, but there's one catch. Well, you know, there's, like, the air we have right here is probably not the best, and maybe the traffic is not the lightest. And the mayor of Bangkok didn't actually just stop and do nothing. He actually hosted a hackathon, actually was done about three years in a row now, and I kind of like that idea. But the problem is, in a lot of these hackathons, the idea is to stop and really goes nowhere. So I was thinking what happens if we use the power of open source? I know the space for Ethereum really got this far because it's open source, so why not the whole city? Why not make an open source city? So we started with the idea of basically letting people come together, but they don't have to formally form a team, but rather an open source project where anyone can join anytime. If you're interested in someone, well, just open a PR. And we started by basically bringing the domain experts in Bangkok, people who have the data of the whole city, like what are the problems in the city, what are the areas that have flooded, and the distribution of those people, as well as people who have accessibility problems, like people who are in a wheelchair, for example. And I think the greatest thing about open source is it lets everyone contribute. So it follows the principle of low floor, high ceiling, and white walls. Low floor means you can contribute anytime. If you know how to do translations, you can do it. But there's also very high ceiling. So if you're an AI researcher, you can kickstart a new project. And it has white walls. So no matter if you're a scientist, if you're a researcher, well, there's a place for you to contribute. And this brings very interesting projects in the hackathon we did. So for example, there were a lot of motorcycles that were parking near basically the sidewalks, and you cannot basically get over that, and it was really annoying. So someone developed a software that was computer vision to check that and automatically report that and get a bounty. So in Bangkok, we have that bounty system where you can make money by just catching the people who are on the sidewalks, the motorcycles, and you can automate that with computer vision or automating the tree detection of every tree in Bangkok. So we did that. And the last project I want to share with you is hacking Thai dance. In 1923, there was this photo, which is the first ever photo taken of the Thai dance. And the interesting thing is it's 100 years later, but nothing has changed. We're still teaching dance the same way in school. And to be honest, a lot of kids like my age, they don't really get interested in dance anymore. It's unlike K-pop where you're able to hack. So we were thinking, can we use AI to basically change that? So we think of the project where you can generate new dance out of old dance moves, like for example, the 59 principal dances. So we were turning this into an art performance called Cyber Subin, which lets human dancers kind of dance with AI. So we built a software that would try to learn the characteristics of Thai dance. The way we did this is we started basically a group of builders, but also a group of Thai choreographers. And we were going through the old documents of how people would decompose the dance that makes us learn that there are six principles in a Thai dance for example the energy in your body and the circle and curve that surrounds you and from the knowledge we gained from the choreographer we were able to build a software that about allows us to basically generate new dance out of old ones by tweaking the parameters. So we think of things as basically a set of animation transformations where you'd have the original dance, for example, the dance of Te Panom, which looks like this. And then you generally run that through a series of programs that would generate very new dance that you can command and we turn this into an interface where dancers can use to basically generate dances and we put this all together in a dance performance so we just show this up in bali uh i think that was indonesia and also in taipei taiwan so dance performance, we got a lot of people like upstage to kind of join in the fun and play around with Thai dance. So generally, I think this brings me like all the three projects to a final conclusion, which is if your culture or your domain is very hackable, for example, if you make finance like very hackable or you make Thai culture, there are some elements where people can go in and can use their tools or use their technologies to hack. Then it makes it really fun. So compare, let's say, a Thai student who were learning the Thai dance in school and they were really bored because there's nothing new to make. Versus like kids who know how to use AI and they're able to construct new dances and have fun with it. Yeah, so I would like to say thank you to my team in Cyber Subin for the dance production. And thank you to you too for listening. This is it. Thank you.", + "sources_streamethId": "67357c399dbb7a90e1e3ce59", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731552900000, - "slot_end": 1731554100000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/16NvToD2NQxicsfxWktPRLuxSt7qrL73mCEcujVhk_i0", - "resources_slides": null, + "slot_start": 1731556800000, + "slot_end": 1731558600000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1vVTMx-WFRYRYIkDhxt9cWeLavDtiXTRNFX6sO0Z4Nyo", + "resources_slides": "https://drive.google.com/file/d/1VX6NZ3kubScPXkijneNWfWVBYyYOA3r0/view", "speakers": [ - "phoomparin-mano" + "b-l" ] }, "vector": [ @@ -386397,11 +385327,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -387152,6 +386082,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -387227,6 +386158,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -387250,14 +386182,8 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, 0, 0, - 2, 0, 0, 0, @@ -387478,7 +386404,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -387507,6 +386432,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -387708,7 +386634,6 @@ 0, 0, 2, - 0, 2, 0, 0, @@ -387727,45 +386652,45 @@ }, { "session": { - "id": "hallucinated-servers-another-prog-crypto-chip", - "sourceId": "DYJ88A", - "title": "hallucinated servers another prog crypto chip", - "description": "An introduction to programmable cryptography, culminating in the dream of a \"hallucinated server\".", - "track": "Applied Cryptography", - "type": "Talk", - "expertise": "Expert", - "audience": "Engineering", + "id": "hardening-the-commons", + "sourceId": "BMTVJK", + "title": "Hardening the Commons", + "description": "A hands-on workshop for those interested in strengthening the capture resistance and general survivability of commons under their stewardship. This session will be a sequence of guided small group discussions that will flesh out the levels of a capability maturity model for how a commons resource, whether it is a blockchain or a city, can be gradually \"hardened\" by developing and maturing capabilities at material, philosophical, skill, social, and mission levels.", + "track": "Coordination", + "type": "Workshop", + "expertise": "Beginner", + "audience": "Community", "featured": false, - "doNotRecord": true, + "doNotRecord": false, "tags": [ - "Cryptography", - "MPC", - "fhe", - "Cryptography", - "MPC" + "adoption", + "Censorship Resistance", + "Coordination", + "Solarpunk" ], "keywords": [ - "Cyprography", - "fhe", - "mpc" + "Impact", + "Commons", + "Adoption" ], - "duration": 1396, + "duration": 10846, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "1e514371335c0a4647504273d5c37330cd160b33866c5d01bbef14c88c145b82", + "sources_youtubeId": "8pIq-LaP9x0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67357c399dbb7a90e1e3ce59", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "673492da9dbb7a90e1a406e2", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673492da9dbb7a90e1a406e2.vtt", + "transcript_text": " Hi everybody, thanks for coming. Can we actually all please come into the center of the room since it's a smaller group? Can all the people who have been part of the Summer Protocols program raise their hands or just stand up? Stand up. All right. Do not be in one cluster. Please distribute yourself. I do not want to see two SOP people in the same breakout group. So SOP people, please spread yourselves out. All right. People trickling in. As you can see from my hat, it's my birthday today, so make this workshop a success. It's not just my birthday. It's a big one. It's 50. So how this workshop goes is going to be the predictor of the rest of my life. Please make it go well. Okay. All right. Welcome. This is the Hardening the Commons workshop. If you were at Tim's talk yesterday, you heard him raise the team. It's been part of, like, many of the talks here, the Cypherpunk history session. So a whole bunch of talks had these teams going. So this is a workshop, and it'll have some talking, but it's mainly a working workshop. And this is a research workshop. We are not here to teach you anything. You are here to help us do some research. So we have what we think is a good question. It's one of the most important questions. How to harden the commons is in our opinion one of the most important research questions and this has been validated by like two years of the Summer of Protocols program. And we're hoping that in the next three hours you guys will help us come up with some really good answers, right? And this will actually, oh sorry. Can you hold this? good answers, right? And this will actually... Oh, sorry. Okay. So this will actually help us shape the agenda for the 2025 program. So this is not just going to be like, you know, you do some exercises on paper and it then vanishes into the void and nothing else happens. So please, we will be collecting the workshop output. We will be analyzing it and we'll be trying to make use of it to shape the 2025 program. If you don't know what Summer Protocols is, go to summerprotocols.com or talk to the various program alumni who are scattered around the room. All right, let's see. Okay, oops. So the goal of the workshop is to try and put together everything we know and have been talking about for years in the Ethereum community. Capture and censorship resistance, decentralized governance, permissionless innovation. How do you put this all together in a big picture model and what we're calling a capability maturity model? Raise your hand if you've heard the term capability maturity model. One, two, three, four, five. maturity model, raise your hand if you've heard the term capability maturity model. One, two, three, four, five. So if this had been one of my traditional, you know, big corporation consulting gigs and I'd asked the question, 100% of the middle managers would have raised their hand because this is like standard LinkedIn boilerplate business management stuff. It can feel very like awkward and bureaucratic, but it's a very useful way to put lots of random thoughts together in a model. So this is a capability maturity model workshop. I'll show you a few examples to see what that means. And if we do a good job, it can serve as a roadmap for a lot of people trying to adapt the technologies we've been all working on. So harden the common. So two terms there. Harden is in two senses. One,. So harden the commons, so two terms there. Harden is in two senses. One as in harden technology against threats, like radiation hardening electronics for space missions. That's one sense of harden. The other sense of harden is the ability to make strong commitments about the future, and this is something Josh Stark will talk about in his provocation in like an hour or so. And the commons, of course, you've all heard the term commons, and we have a lot of examples in our mind. So, you know, Ethereum L1 is a commons. Open source projects are commons. Forests and lakes and rivers are commons. So we want to like think broadly about commons. And Trent is here. He'll be actually leading a breakout session on how to think about commons. And Trent is here. He'll be actually leading a breakout session on how to think about commons. But here are some examples to keep in mind. And I've put this icon on some of the slides, because for the workshopping pieces, it'll be helpful to have these lists handy. So take pictures of the slides where I have these sort of like prompting things available. OK? So these are some examples of commons. Don't worry too much about the definitions. Systems for producing or stewarding shared resources is a good enough working definition for our work here, okay? All right, how many of you have heard of Eleanor Ostrom? Okay, this is impressive, like more than half the room. Nobel Prize winner, did some amazing work on how commons are built and stewarded. These are her famous principles for stewarding commons. There's a little bit of a cult or religion aspect around Ustrom, so don't take this as gospel, but take it as a thought starter. They're good ideas, but feel free to contradict and challenge them, okay? So again, another picture-taking slide. We do have a shared G drive where hopefully you'll dump your final CMM model. So you'll be working on paper. You'll be, like, making revisions of the model that you're working on. But the final one, take a picture and dump it in this folder. If you have trouble, just hand your paper model to Timber. Timber, are you here? Raise your hand. Can you stand up and show your face to everybody? So Timber is going to be running logistics in the room. Okay, so that's the housekeeping done. So the agenda is, I'll give you a quick introduction to what CMMs are. Then we'll do a set of five alternating provocations and breakouts. So short, like seven-minute lightning talk type thing, followed by 17 minutes where you do one pass through the CMM, then another talk and so on. So we'll do that. Halfway through, we'll take a little bit of a break, and then we'll come back, do some sharebacks, broad room comments, and then wrap up. So standard workshop structure, but hopefully the content is what will make this. Okay? Okay. So short TLDR version of hardening the comments problem, right? How many of you are seeing this cartoon for the first time? Raise your hands. Okay. So most of you have seen this cartoon, right? So you don't want to be too idealistic. We are not writing science fiction. We are not writing weird time machine stories. We are being realistic. So any speculation we do has to be kind of like plausible and realistic, which means, yeah, don't get into like weird fantasy scenario about cryptos. Keep the, you know, $5 wrench condition in mind. That's what it means to harden a commons against attacks. You should be able to defend against $5 wrench attacks. Okay. What's a CMM? It's a well-known model that management consultants like me and, you know, career bureaucrats use to just think about how organizations acquire and learn capabilities by rising through several levels over several years. So we need to adapt it a little bit to apply to commons and open kind of systems, but I think it works. And yes, it can be LinkedIn middle manager bullshit if it's done poorly, but I think this room can do it well, okay? Because remember, it's my birthday. You have to do it well as a gift to me. Okay, so here's like a classic. If you Google CMM, this is the kind of diagram you'll find. It's a pyramid with five levels. You start out with an initial condition of skills where maybe there's random skills. People have like crappy, disorganized ways of doing things, like all of us using Web3 wallets. I would say we are in this initial disorganized level. Managed is when there's a little bit more structure is coming in. Then defined, people have textbook definitions of what things are. Then people are beginning to measure things, so things are getting quantitative. Then finally, people are starting to optimize things, like this is the mature stage. So this is a generic CMM template. Here's another way to visualize one. You can think of it as like a ramp increasing in time. So this one happens to be about analytics. So this type of diagram was popular about 10 years ago when people were talking about analytics. So as you can see, the maturation is when your capabilities are very primitive, you can only do hindsight analytics. But as you progress through the level, you can do foresight. You can do insight, right? So this is evolving high-level capability based on the micro tool-level tactical capabilities. So you learn to use the tools better and better, and you go from being reactive in hindsight to predictive. Some organizations actually acquired capabilities this way. Other people talked a lot about analytics, but didn't actually get there. Here's another one. These are all things I pulled from Google Images, so they're real images that people presumably used in some organizations. This is an example of security capabilities, and this one is interesting because it visualizes it as growing capabilities on three vectors, technology, process, and people, so the red, blue, and green, and each of them evolves through multiple levels, right? So another good example. You've probably heard a lot about this one. If you've been following like the self-driving cars discourse, people talk about five levels of autonomy. What does it mean? Well, a little bit bureaucratic, a little bit engineering. It's a mix of both. It's technocratic. We'll have Sam doing a provocation later on challenging the ideology of technocracy. But there's a technocratic model of what self-driving should look like. Bureaucrats plus engineers putting it together. This one, actually, I just made half an hour ago. I was in a workshop that I was running called Web of Roots. And it was all about the problems of crypto adoption. And the breakout session I was in was about how there's a lack of tools in the ecosystem. And I made up this capability maturity model. So I have like five levels, AI is like a cloud around that. So you can get creative with this stuff. This one, Tim, do you want to quickly speak to this? Vitalik's roadmap is kind of like CMM. So Tim, come around and speak into the mic. Oh, we lost my slides. Okay. Yeah, so in Ethereum, like Venkat was saying, we don't have a lot of LinkedIn CMMs, and I think this is the closest we've come to as an ecosystem to mapping things out. And it's sort of like an implied CMM here, where if you think of the way Vitalik summarizes the Ethereum roadmap, 2020, it was just like, here's a bunch of stuff we have to do in the next 10 years and how they fit together. And not on this slide is the 2019 version, which is here's a bunch of stuff scattered over the internet that we have to do in the next 10 years. And it's sort of like a refinement in thinking over the years where a couple years after that, we were able to actually break this down in specific tracks, so like the merge, verge, surge, all those things. There's still sort of just like these rough pointers that we have that are quite low context. And then just before, DevCon and Vitalik actually put out a blog post for each of these tracks, sort of like going very deep into details around like, OK, what's the actual thing that needs to be done? Where is it at? What are the blockers and whatnot? And so you can see that we have this sort of evolving capability, or at least Vitalik has, and then guiding the ecosystem to articulate or understand what this entire set of things we have to do is and how they all relate to each other. Thanks, Jim. All right, so now you have a bunch of examples of CMMs to refer to. All these are real world ones, so don't worry too much about what an abstract definition of a CMM is. Keep these examples in mind. So how do you make a CMM? So one, pick a specific important commons. Two, pick a specific target social group that's trying to mature its capabilities, right? So if the commons you pick is like a lake and a community around it that wants to keep the lake ecosystem healthy, the lake is the commons and the people who are trying to keep it healthy is the social group. Try and define four to seven levels, don't go over seven, keep it actually close to four or five if you want because it gets weird. The traditional ones are initial, manage, defined, quantified, optimized, but feel free to get totally creative. Name each level and characterize it in terms of people, technologies, capabilities. Then this one is very important, number five. Try and define a test for each level. So if your CMM is about preserving a forest and you have a five-level hierarchy of how to preserve a forest and say, oh, Brazil is at level three, but Indonesia is at level two. What the hell does that mean? Define a test for level two and level three about how rainforest commons work, okay? Pick a visualization. So I gave you a few examples, pyramids, ramps, stacked blocks and stuff. So pick a good visualization. If you don't like the ones you have, make one up. But, you know, this is not an art workshop. This is like bureaucrat workshop, so you don't have to get super artistic with it. Think of this whole CMM exercise as, this is a collective learning curve from ad hoc, sort of like, kind of illiterate levels to very mature and refined capabilities. And it can go from very scattered skills, like everybody here uses Web3 wallets, I presume, but there's no coordination. It's very fragmented, but if it matures, presumably there'll be a wide conversation about which wallets are better. Maybe there already are, but there's not textbook versions of the discussion. So collective learning curve from ad hoc to mature, from scattered to integrated. And usually there's an element of codification and documentation too in building a CMM. And both the organizational structure and the people in it are learning as you go along, right? Forming procedural memories so that even if people join and leave, the organization or system still remembers, right? So if a whole bunch of people create a CMM for something and then half of them get new jobs and go and a new half comes in, the system should not break completely. The knowledge continuity should be there. So special challenges for us as kind of an open ecosystem that's decentralized and all these weird things. So CMMs are typically for top-down traditional organization where the CEO can say, hey, I'm appointing a bunch of people to impose this training program and all of you are going to go from level zero to level five. This is not that. So how do you do it in an open ecosystem context? CMMs rely on codification and documentation. In open systems, that typically happens in a much more ad hoc way. Like maybe some volunteers start documenting stuff. EIPs are a good example where it's somehow gelled into a very formal codification, but not in a corporate way. CMMs are usually technocratic, but can we actually not make this about LinkedIn middle managers and work with direct technological agency? And finally, CMMs often are about acquisition and installation of behaviors in big corporations because it's all theater. The CEO wants to have like a big program that they can brag about. Once that happens and then the big parties are done, people forget and skills are not actually maintained. So in an open ecosystem, actually the reverse emphasis is better. Like it's better to acquire and install a very limited capability that then endures for 10 years than to pretend that everybody's level five black belt and then one year later, nobody knows how to log on to the system. So you really should think in terms of maintenance of small skills. Okay, we are going to head into the first breakout session. So this is my provocation. Okay, so what is the bounding box for your chosen commons? We are going to head into the first breakout session. So this is my provocation. Okay. So what is the bounding box for your chosen commons, right? So what is the physical medium of the commons? So if you're talking forests, it's like trees and grasslands and the animals and the ecosystem. If it's software, it's the Git repository where all your software lives. So think of what the actual physical material medium is. Think about the neighbors of it, right? If your code is on GitHub like Ethereum's code is, one of your neighbors, whether you liked it or not, is Microsoft because Microsoft owns GitHub and so forth, right? So think about your neighbors. What is the nature of the boundaries and the threat and exposure you have across those boundaries? So here's an example of what I mean by a bounding box. So treat this as like straw man bullshit. I don't know if this is really any good, but you can think of like, you know, censorship resistance, Ethereum development processes. These are the boundaries for it. The fact that the code lives on GitHub, that's a common, not a threat, but a thing you have to think about, right? Everything Ethereum does is based on compute and compute relies on chips, and chips come from TSMC, and that's vulnerable to conflict, right? Quantum resistant cryptography, another threat. So all these are like the bounding conditions for what makes it possible for Ethereum to be healthy. And just to have a very different example, if you have a public lake, you have to think about things like, are there invasive species? What are the environmental regulations that are like constraining what you do? A really good example of this came up in one of our SOP research projects last year, where the people in Brooklyn trying to preserve the waterways, they discovered that the local regulation said that if the water is being used for like human activities, it has to meet higher quality standards. So an activist group started canoeing in the sewage contaminated canals of Brooklyn, and because of that, they forced by regulation, the regulators to actually start cleaning up the canals. So this is a very interesting example of stewarding the commons. So lake. Okay, so breakout number one, so this is the first one. So first form groups of three to five people, so just where you are, look at your two nearest neighbors, So this is the first one. So first form groups of three to five people. So just where you are, look at your two nearest neighbors, bring your chairs a little closer together. And SOP people, I do not want to see two SOP people in the same group, so distribute yourselves accordingly. And yeah, together choose a commons to work on, choose a target population, and maybe draw a bounding box if it's useful, then draw version one of this, you know, capability maturity model for your comments. So set of named levels, people, skills, and so forth and test for the abilities. So it will take 17 minutes to do this starting now and I think Timber is going to go around distributing paper if he hasn't already. So form your groups of three, please. Three to four. Thank you. All right. Thank you. . Yes. Thank you. Yes. Open the phone.", "eventId": "devcon-7", - "slot_start": 1731556800000, - "slot_end": 1731558600000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1vVTMx-WFRYRYIkDhxt9cWeLavDtiXTRNFX6sO0Z4Nyo", - "resources_slides": null, + "slot_start": 1731486600000, + "slot_end": 1731497400000, + "slot_roomId": "classroom-e", + "resources_presentation": "https://docs.google.com/presentation/d/1gO904DKuSqj1sNQuLtbP57gbG3NphApmqMl4sI6azOs", + "resources_slides": "https://drive.google.com/file/d/1QQFuE14oWAmsJ7yypiW4y7ouxF6nJNwI/view", "speakers": [ - "b-l" + "tim-beiko", + "venkatesh-rao" ] }, "vector": [ @@ -387779,6 +386704,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -387954,6 +386880,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -388148,9 +387075,9 @@ 0, 0, 0, + 6, 0, 0, - 6, 0, 0, 0, @@ -388533,7 +387460,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -388609,7 +387535,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -388642,6 +387567,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -388664,14 +387590,17 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -388884,10 +387813,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -389080,6 +388005,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -389087,12 +388013,6 @@ 0, 0, 2, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -389105,51 +388025,53 @@ }, { "session": { - "id": "hardening-the-commons", - "sourceId": "BMTVJK", - "title": "Hardening the Commons", - "description": "A hands-on workshop for those interested in strengthening the capture resistance and general survivability of commons under their stewardship. This session will be a sequence of guided small group discussions that will flesh out the levels of a capability maturity model for how a commons resource, whether it is a blockchain or a city, can be gradually \"hardened\" by developing and maturing capabilities at material, philosophical, skill, social, and mission levels.", - "track": "Coordination", - "type": "Workshop", - "expertise": "Beginner", - "audience": "Community", + "id": "hardhat-3-preview-overhauled-and-rust-powered", + "sourceId": "QZYQYE", + "title": "Hardhat 3 Preview: Overhauled & Rust-Powered", + "description": "The Hardhat team has been working continuously over the past two years to redesign and rewrite Hardhat from the ground up, including a major migration to Rust. This talk will explore the problems and solutions that the upcoming release of Hardhat 3 will focus on: performance, Solidity tests, correct L2 network simulation, and a comprehensive deployment system.", + "track": "Developer Experience", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "adoption", - "Censorship Resistance", - "Coordination", - "Solarpunk" + "Developer Infrastructure", + "Tooling", + "DevEx", + "solidity", + "Developer Infrastructure", + "DevEx", + "Tooling" ], "keywords": [ - "Impact", - "Commons", - "Adoption" + "Hardhat", + "Solidity" ], - "duration": 10846, + "duration": 1620, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "fbf71c10e089f8db1849642e0dee7c93c96327b3a698e74257918c7cc10f9742", + "sources_youtubeId": "slSwrZTwNn4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673492da9dbb7a90e1a406e2", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673492da9dbb7a90e1a406e2.vtt", - "transcript_text": " Hi everybody, thanks for coming. Can we actually all please come into the center of the room since it's a smaller group? Can all the people who have been part of the Summer Protocols program raise their hands or just stand up? Stand up. All right. Do not be in one cluster. Please distribute yourself. I do not want to see two SOP people in the same breakout group. So SOP people, please spread yourselves out. All right. People trickling in. As you can see from my hat, it's my birthday today, so make this workshop a success. It's not just my birthday. It's a big one. It's 50. So how this workshop goes is going to be the predictor of the rest of my life. Please make it go well. Okay. All right. Welcome. This is the Hardening the Commons workshop. If you were at Tim's talk yesterday, you heard him raise the team. It's been part of, like, many of the talks here, the Cypherpunk history session. So a whole bunch of talks had these teams going. So this is a workshop, and it'll have some talking, but it's mainly a working workshop. And this is a research workshop. We are not here to teach you anything. You are here to help us do some research. So we have what we think is a good question. It's one of the most important questions. How to harden the commons is in our opinion one of the most important research questions and this has been validated by like two years of the Summer of Protocols program. And we're hoping that in the next three hours you guys will help us come up with some really good answers, right? And this will actually, oh sorry. Can you hold this? good answers, right? And this will actually... Oh, sorry. Okay. So this will actually help us shape the agenda for the 2025 program. So this is not just going to be like, you know, you do some exercises on paper and it then vanishes into the void and nothing else happens. So please, we will be collecting the workshop output. We will be analyzing it and we'll be trying to make use of it to shape the 2025 program. If you don't know what Summer Protocols is, go to summerprotocols.com or talk to the various program alumni who are scattered around the room. All right, let's see. Okay, oops. So the goal of the workshop is to try and put together everything we know and have been talking about for years in the Ethereum community. Capture and censorship resistance, decentralized governance, permissionless innovation. How do you put this all together in a big picture model and what we're calling a capability maturity model? Raise your hand if you've heard the term capability maturity model. One, two, three, four, five. maturity model, raise your hand if you've heard the term capability maturity model. One, two, three, four, five. So if this had been one of my traditional, you know, big corporation consulting gigs and I'd asked the question, 100% of the middle managers would have raised their hand because this is like standard LinkedIn boilerplate business management stuff. It can feel very like awkward and bureaucratic, but it's a very useful way to put lots of random thoughts together in a model. So this is a capability maturity model workshop. I'll show you a few examples to see what that means. And if we do a good job, it can serve as a roadmap for a lot of people trying to adapt the technologies we've been all working on. So harden the common. So two terms there. Harden is in two senses. One,. So harden the commons, so two terms there. Harden is in two senses. One as in harden technology against threats, like radiation hardening electronics for space missions. That's one sense of harden. The other sense of harden is the ability to make strong commitments about the future, and this is something Josh Stark will talk about in his provocation in like an hour or so. And the commons, of course, you've all heard the term commons, and we have a lot of examples in our mind. So, you know, Ethereum L1 is a commons. Open source projects are commons. Forests and lakes and rivers are commons. So we want to like think broadly about commons. And Trent is here. He'll be actually leading a breakout session on how to think about commons. And Trent is here. He'll be actually leading a breakout session on how to think about commons. But here are some examples to keep in mind. And I've put this icon on some of the slides, because for the workshopping pieces, it'll be helpful to have these lists handy. So take pictures of the slides where I have these sort of like prompting things available. OK? So these are some examples of commons. Don't worry too much about the definitions. Systems for producing or stewarding shared resources is a good enough working definition for our work here, okay? All right, how many of you have heard of Eleanor Ostrom? Okay, this is impressive, like more than half the room. Nobel Prize winner, did some amazing work on how commons are built and stewarded. These are her famous principles for stewarding commons. There's a little bit of a cult or religion aspect around Ustrom, so don't take this as gospel, but take it as a thought starter. They're good ideas, but feel free to contradict and challenge them, okay? So again, another picture-taking slide. We do have a shared G drive where hopefully you'll dump your final CMM model. So you'll be working on paper. You'll be, like, making revisions of the model that you're working on. But the final one, take a picture and dump it in this folder. If you have trouble, just hand your paper model to Timber. Timber, are you here? Raise your hand. Can you stand up and show your face to everybody? So Timber is going to be running logistics in the room. Okay, so that's the housekeeping done. So the agenda is, I'll give you a quick introduction to what CMMs are. Then we'll do a set of five alternating provocations and breakouts. So short, like seven-minute lightning talk type thing, followed by 17 minutes where you do one pass through the CMM, then another talk and so on. So we'll do that. Halfway through, we'll take a little bit of a break, and then we'll come back, do some sharebacks, broad room comments, and then wrap up. So standard workshop structure, but hopefully the content is what will make this. Okay? Okay. So short TLDR version of hardening the comments problem, right? How many of you are seeing this cartoon for the first time? Raise your hands. Okay. So most of you have seen this cartoon, right? So you don't want to be too idealistic. We are not writing science fiction. We are not writing weird time machine stories. We are being realistic. So any speculation we do has to be kind of like plausible and realistic, which means, yeah, don't get into like weird fantasy scenario about cryptos. Keep the, you know, $5 wrench condition in mind. That's what it means to harden a commons against attacks. You should be able to defend against $5 wrench attacks. Okay. What's a CMM? It's a well-known model that management consultants like me and, you know, career bureaucrats use to just think about how organizations acquire and learn capabilities by rising through several levels over several years. So we need to adapt it a little bit to apply to commons and open kind of systems, but I think it works. And yes, it can be LinkedIn middle manager bullshit if it's done poorly, but I think this room can do it well, okay? Because remember, it's my birthday. You have to do it well as a gift to me. Okay, so here's like a classic. If you Google CMM, this is the kind of diagram you'll find. It's a pyramid with five levels. You start out with an initial condition of skills where maybe there's random skills. People have like crappy, disorganized ways of doing things, like all of us using Web3 wallets. I would say we are in this initial disorganized level. Managed is when there's a little bit more structure is coming in. Then defined, people have textbook definitions of what things are. Then people are beginning to measure things, so things are getting quantitative. Then finally, people are starting to optimize things, like this is the mature stage. So this is a generic CMM template. Here's another way to visualize one. You can think of it as like a ramp increasing in time. So this one happens to be about analytics. So this type of diagram was popular about 10 years ago when people were talking about analytics. So as you can see, the maturation is when your capabilities are very primitive, you can only do hindsight analytics. But as you progress through the level, you can do foresight. You can do insight, right? So this is evolving high-level capability based on the micro tool-level tactical capabilities. So you learn to use the tools better and better, and you go from being reactive in hindsight to predictive. Some organizations actually acquired capabilities this way. Other people talked a lot about analytics, but didn't actually get there. Here's another one. These are all things I pulled from Google Images, so they're real images that people presumably used in some organizations. This is an example of security capabilities, and this one is interesting because it visualizes it as growing capabilities on three vectors, technology, process, and people, so the red, blue, and green, and each of them evolves through multiple levels, right? So another good example. You've probably heard a lot about this one. If you've been following like the self-driving cars discourse, people talk about five levels of autonomy. What does it mean? Well, a little bit bureaucratic, a little bit engineering. It's a mix of both. It's technocratic. We'll have Sam doing a provocation later on challenging the ideology of technocracy. But there's a technocratic model of what self-driving should look like. Bureaucrats plus engineers putting it together. This one, actually, I just made half an hour ago. I was in a workshop that I was running called Web of Roots. And it was all about the problems of crypto adoption. And the breakout session I was in was about how there's a lack of tools in the ecosystem. And I made up this capability maturity model. So I have like five levels, AI is like a cloud around that. So you can get creative with this stuff. This one, Tim, do you want to quickly speak to this? Vitalik's roadmap is kind of like CMM. So Tim, come around and speak into the mic. Oh, we lost my slides. Okay. Yeah, so in Ethereum, like Venkat was saying, we don't have a lot of LinkedIn CMMs, and I think this is the closest we've come to as an ecosystem to mapping things out. And it's sort of like an implied CMM here, where if you think of the way Vitalik summarizes the Ethereum roadmap, 2020, it was just like, here's a bunch of stuff we have to do in the next 10 years and how they fit together. And not on this slide is the 2019 version, which is here's a bunch of stuff scattered over the internet that we have to do in the next 10 years. And it's sort of like a refinement in thinking over the years where a couple years after that, we were able to actually break this down in specific tracks, so like the merge, verge, surge, all those things. There's still sort of just like these rough pointers that we have that are quite low context. And then just before, DevCon and Vitalik actually put out a blog post for each of these tracks, sort of like going very deep into details around like, OK, what's the actual thing that needs to be done? Where is it at? What are the blockers and whatnot? And so you can see that we have this sort of evolving capability, or at least Vitalik has, and then guiding the ecosystem to articulate or understand what this entire set of things we have to do is and how they all relate to each other. Thanks, Jim. All right, so now you have a bunch of examples of CMMs to refer to. All these are real world ones, so don't worry too much about what an abstract definition of a CMM is. Keep these examples in mind. So how do you make a CMM? So one, pick a specific important commons. Two, pick a specific target social group that's trying to mature its capabilities, right? So if the commons you pick is like a lake and a community around it that wants to keep the lake ecosystem healthy, the lake is the commons and the people who are trying to keep it healthy is the social group. Try and define four to seven levels, don't go over seven, keep it actually close to four or five if you want because it gets weird. The traditional ones are initial, manage, defined, quantified, optimized, but feel free to get totally creative. Name each level and characterize it in terms of people, technologies, capabilities. Then this one is very important, number five. Try and define a test for each level. So if your CMM is about preserving a forest and you have a five-level hierarchy of how to preserve a forest and say, oh, Brazil is at level three, but Indonesia is at level two. What the hell does that mean? Define a test for level two and level three about how rainforest commons work, okay? Pick a visualization. So I gave you a few examples, pyramids, ramps, stacked blocks and stuff. So pick a good visualization. If you don't like the ones you have, make one up. But, you know, this is not an art workshop. This is like bureaucrat workshop, so you don't have to get super artistic with it. Think of this whole CMM exercise as, this is a collective learning curve from ad hoc, sort of like, kind of illiterate levels to very mature and refined capabilities. And it can go from very scattered skills, like everybody here uses Web3 wallets, I presume, but there's no coordination. It's very fragmented, but if it matures, presumably there'll be a wide conversation about which wallets are better. Maybe there already are, but there's not textbook versions of the discussion. So collective learning curve from ad hoc to mature, from scattered to integrated. And usually there's an element of codification and documentation too in building a CMM. And both the organizational structure and the people in it are learning as you go along, right? Forming procedural memories so that even if people join and leave, the organization or system still remembers, right? So if a whole bunch of people create a CMM for something and then half of them get new jobs and go and a new half comes in, the system should not break completely. The knowledge continuity should be there. So special challenges for us as kind of an open ecosystem that's decentralized and all these weird things. So CMMs are typically for top-down traditional organization where the CEO can say, hey, I'm appointing a bunch of people to impose this training program and all of you are going to go from level zero to level five. This is not that. So how do you do it in an open ecosystem context? CMMs rely on codification and documentation. In open systems, that typically happens in a much more ad hoc way. Like maybe some volunteers start documenting stuff. EIPs are a good example where it's somehow gelled into a very formal codification, but not in a corporate way. CMMs are usually technocratic, but can we actually not make this about LinkedIn middle managers and work with direct technological agency? And finally, CMMs often are about acquisition and installation of behaviors in big corporations because it's all theater. The CEO wants to have like a big program that they can brag about. Once that happens and then the big parties are done, people forget and skills are not actually maintained. So in an open ecosystem, actually the reverse emphasis is better. Like it's better to acquire and install a very limited capability that then endures for 10 years than to pretend that everybody's level five black belt and then one year later, nobody knows how to log on to the system. So you really should think in terms of maintenance of small skills. Okay, we are going to head into the first breakout session. So this is my provocation. Okay, so what is the bounding box for your chosen commons? We are going to head into the first breakout session. So this is my provocation. Okay. So what is the bounding box for your chosen commons, right? So what is the physical medium of the commons? So if you're talking forests, it's like trees and grasslands and the animals and the ecosystem. If it's software, it's the Git repository where all your software lives. So think of what the actual physical material medium is. Think about the neighbors of it, right? If your code is on GitHub like Ethereum's code is, one of your neighbors, whether you liked it or not, is Microsoft because Microsoft owns GitHub and so forth, right? So think about your neighbors. What is the nature of the boundaries and the threat and exposure you have across those boundaries? So here's an example of what I mean by a bounding box. So treat this as like straw man bullshit. I don't know if this is really any good, but you can think of like, you know, censorship resistance, Ethereum development processes. These are the boundaries for it. The fact that the code lives on GitHub, that's a common, not a threat, but a thing you have to think about, right? Everything Ethereum does is based on compute and compute relies on chips, and chips come from TSMC, and that's vulnerable to conflict, right? Quantum resistant cryptography, another threat. So all these are like the bounding conditions for what makes it possible for Ethereum to be healthy. And just to have a very different example, if you have a public lake, you have to think about things like, are there invasive species? What are the environmental regulations that are like constraining what you do? A really good example of this came up in one of our SOP research projects last year, where the people in Brooklyn trying to preserve the waterways, they discovered that the local regulation said that if the water is being used for like human activities, it has to meet higher quality standards. So an activist group started canoeing in the sewage contaminated canals of Brooklyn, and because of that, they forced by regulation, the regulators to actually start cleaning up the canals. So this is a very interesting example of stewarding the commons. So lake. Okay, so breakout number one, so this is the first one. So first form groups of three to five people, so just where you are, look at your two nearest neighbors, So this is the first one. So first form groups of three to five people. So just where you are, look at your two nearest neighbors, bring your chairs a little closer together. And SOP people, I do not want to see two SOP people in the same group, so distribute yourselves accordingly. And yeah, together choose a commons to work on, choose a target population, and maybe draw a bounding box if it's useful, then draw version one of this, you know, capability maturity model for your comments. So set of named levels, people, skills, and so forth and test for the abilities. So it will take 17 minutes to do this starting now and I think Timber is going to go around distributing paper if he hasn't already. So form your groups of three, please. Three to four. Thank you. All right. Thank you. . Yes. Thank you. Yes. Open the phone.", + "sources_streamethId": "67348e149dbb7a90e175a3e3", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67348e149dbb7a90e175a3e3.vtt", + "transcript_text": " Hey everyone, thanks for being here. My name is Patricio, I'm the co-founder and CTO of Nomic Foundation, which is a non-profit dedicated to Ethereum developers like you. At Nomic, what we do is building open source infrastructure and tooling for the Ethereum community. And the most well-known of our tools is HardHat. And during this presentation, what we are or I'm going to talk about is HardHat v3 or HardHat 3, the new version of HardHat, which is still in development because it's a whole overhaul of HardHat v3, or HardHat 3, the new version of HardHat, which is still in development, because it's a whole overhaul of HardHat with tons of changes. But first, let's take a look at HardHat 2 for a second, especially its limitations and why we decided to rewrite it. The first one, and main one, is that Ethereum changed a ton during the lifetime of Hard Hat. We started working on it almost seven years ago, about 2018. And back then, Ethereum was a single chain, a way more simple ecosystem. There was mainnet and a few testnets. Those testnets aren't even alive nowadays. And the applications were simpler, those test nets aren't even alive nowadays, and the applications were simpler, right? There wasn't even stable coins back then. Now, applications are larger, and Ethereum turned to be a single chain into an ever-increasing ecosystem of different chains, each of them with a slightly different behavior. Another problem that we have in Hard Hat 2, or Hat, is that we brought everything in JavaScript, including our network simulator, and that had several performance issues. We also focused a ton on the Node.js ecosystem because we envisioned building a platform for others to be able to customize and extend their own setups, but that focus on JavaScript only also meant that you could only write tests in JavaScript or TypeScript. And the final limitation is that when we designed Hard Hat, there was only one chain mainnet. We designed it in a way where for every single hard hat process, there is a single chain, a single network connection, you just import hard hat, you have your network connected there, most of the time it's going to be simulated by us, and all the boilerplate, all of your libraries, all your plugins get configured for you, but it's a single chain. As soon as you want to work with multiple chains, you hit this limitation and you start making workarounds. We created RPCs for switching the hard forks or forking other networks. People use different configurations and things like that. So, HARDCAD 3, we rewrote it from scratch to circumvent these limitations and offer more functionality. And how does it look? Well, it looks a bit like this. It's a bit like hard hat. It's still hard hat, but hard hat from the future or more technical hard hat. But at the end, it's just a hard hat, right? Like, it feels and looks like hard hat. So, as I mentioned, it's a complete revamp of the product. The scope is massive. Hard hat grew during these six, seven years. It has tons and tons of clients and functionality. And I can't cover everything here. I'm only going to focus on this list of things, which are the network simulator that we rewrote in Rust, the Solidity Test support that is added in hard hat 3, our deployment solutions.", "eventId": "devcon-7", - "slot_start": 1731486600000, + "slot_start": 1731495600000, "slot_end": 1731497400000, - "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/1gO904DKuSqj1sNQuLtbP57gbG3NphApmqMl4sI6azOs", - "resources_slides": null, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1XDRIhALcLD_91krtX14MMkCYoXRCN3nZ_oia1tIdaLw", + "resources_slides": "https://drive.google.com/file/d/1uwxr-7RfoVcDG2v2wG1TSWv3mR-v8G_E/view", "speakers": [ - "tim-beiko", - "venkatesh-rao" + "patricio-palladino" ] }, "vector": [ 0, 0, 0, + 6, 0, 0, 0, @@ -389158,8 +388080,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -389333,7 +388253,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -389529,9 +388448,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -389918,6 +388837,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -389926,6 +388846,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -389949,6 +388870,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -390023,7 +388945,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -390046,17 +388967,14 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, - 2, 0, 0, - 2, 0, 0, 0, @@ -390262,6 +389180,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -390458,6 +389377,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -390470,10 +389390,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -390483,51 +389399,43 @@ }, { "session": { - "id": "hardhat-3-preview-overhauled-and-rust-powered", - "sourceId": "QZYQYE", - "title": "Hardhat 3 Preview: Overhauled & Rust-Powered", - "description": "The Hardhat team has been working continuously over the past two years to redesign and rewrite Hardhat from the ground up, including a major migration to Rust. This talk will explore the problems and solutions that the upcoming release of Hardhat 3 will focus on: performance, Solidity tests, correct L2 network simulation, and a comprehensive deployment system.", - "track": "Developer Experience", - "type": "Talk", + "id": "hardware-security-from-sand-to-stone", + "sourceId": "UZDFEK", + "title": "Hardware Security: From Sand to Stone", + "description": "All software runs on hardware. The assumptions on which many of our systems rest are often shakier than we realise. This talk explores hardware security, its shortcomings and the path to a firmer foundation.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Developer Infrastructure", - "Tooling", - "DevEx", - "solidity", - "Developer Infrastructure", - "DevEx", - "Tooling" + "Decentralization", + "Hardware wallets", + "Security" ], "keywords": [ - "Hardhat", - "Solidity" + "TEE", + "Hardware Trojans" ], - "duration": 1620, + "duration": 858, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "e0116d1a6b9a82bc416ec617599f1e64550eaea718855a6d17b96d6ab8cb51bd", + "sources_youtubeId": "LDGXAd14DJY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67348e149dbb7a90e175a3e3", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67348e149dbb7a90e175a3e3.vtt", - "transcript_text": " Hey everyone, thanks for being here. My name is Patricio, I'm the co-founder and CTO of Nomic Foundation, which is a non-profit dedicated to Ethereum developers like you. At Nomic, what we do is building open source infrastructure and tooling for the Ethereum community. And the most well-known of our tools is HardHat. And during this presentation, what we are or I'm going to talk about is HardHat v3 or HardHat 3, the new version of HardHat, which is still in development because it's a whole overhaul of HardHat v3, or HardHat 3, the new version of HardHat, which is still in development, because it's a whole overhaul of HardHat with tons of changes. But first, let's take a look at HardHat 2 for a second, especially its limitations and why we decided to rewrite it. The first one, and main one, is that Ethereum changed a ton during the lifetime of Hard Hat. We started working on it almost seven years ago, about 2018. And back then, Ethereum was a single chain, a way more simple ecosystem. There was mainnet and a few testnets. Those testnets aren't even alive nowadays. And the applications were simpler, those test nets aren't even alive nowadays, and the applications were simpler, right? There wasn't even stable coins back then. Now, applications are larger, and Ethereum turned to be a single chain into an ever-increasing ecosystem of different chains, each of them with a slightly different behavior. Another problem that we have in Hard Hat 2, or Hat, is that we brought everything in JavaScript, including our network simulator, and that had several performance issues. We also focused a ton on the Node.js ecosystem because we envisioned building a platform for others to be able to customize and extend their own setups, but that focus on JavaScript only also meant that you could only write tests in JavaScript or TypeScript. And the final limitation is that when we designed Hard Hat, there was only one chain mainnet. We designed it in a way where for every single hard hat process, there is a single chain, a single network connection, you just import hard hat, you have your network connected there, most of the time it's going to be simulated by us, and all the boilerplate, all of your libraries, all your plugins get configured for you, but it's a single chain. As soon as you want to work with multiple chains, you hit this limitation and you start making workarounds. We created RPCs for switching the hard forks or forking other networks. People use different configurations and things like that. So, HARDCAD 3, we rewrote it from scratch to circumvent these limitations and offer more functionality. And how does it look? Well, it looks a bit like this. It's a bit like hard hat. It's still hard hat, but hard hat from the future or more technical hard hat. But at the end, it's just a hard hat, right? Like, it feels and looks like hard hat. So, as I mentioned, it's a complete revamp of the product. The scope is massive. Hard hat grew during these six, seven years. It has tons and tons of clients and functionality. And I can't cover everything here. I'm only going to focus on this list of things, which are the network simulator that we rewrote in Rust, the Solidity Test support that is added in hard hat 3, our deployment solutions.", + "sources_streamethId": "6735c4869dbb7a90e10712d6", "eventId": "devcon-7", - "slot_start": 1731495600000, - "slot_end": 1731497400000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1XDRIhALcLD_91krtX14MMkCYoXRCN3nZ_oia1tIdaLw", - "resources_slides": null, + "slot_start": 1731576000000, + "slot_end": 1731576600000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1wcISJi-Q9aswEj-3R97yb_9jp5DN6P_38XsaGdEq3B4", + "resources_slides": "https://drive.google.com/file/d/1QNri28c9QHvsJ_157yGgsTO3HjR4S9lm/view", "speakers": [ - "patricio-palladino" + "quintus-kilbourn" ] }, "vector": [ - 0, - 0, 0, 6, 0, @@ -390909,11 +389817,9 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -391278,6 +390184,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -391298,7 +390205,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -391307,7 +390213,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -391331,7 +390236,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -391382,6 +390286,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -391616,6 +390521,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -391642,7 +390548,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -391844,9 +390749,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -391862,45 +390767,29 @@ }, { "session": { - "id": "hardware-security-from-sand-to-stone", - "sourceId": "UZDFEK", - "title": "Hardware Security: From Sand to Stone", - "description": "All software runs on hardware. The assumptions on which many of our systems rest are often shakier than we realise. This talk explores hardware security, its shortcomings and the path to a firmer foundation.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "Intermediate", + "id": "harry-p", + "sourceId": "LXJJDW", + "title": "Harry P", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Decentralization", - "Hardware wallets", - "Security" - ], - "keywords": [ - "TEE", - "Hardware Trojans" - ], - "duration": 858, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "e0116d1a6b9a82bc416ec617599f1e64550eaea718855a6d17b96d6ab8cb51bd", - "sources_youtubeId": "LDGXAd14DJY", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6735c4869dbb7a90e10712d6", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731576000000, - "slot_end": 1731576600000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1wcISJi-Q9aswEj-3R97yb_9jp5DN6P_38XsaGdEq3B4", - "resources_slides": null, - "speakers": [ - "quintus-kilbourn" - ] + "slot_start": 1731488400000, + "slot_end": 1731492000000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1wNma7KIt9CoI1JayWZB-MIf47ge7DGlMJ3Ev9Il3pdE", + "resources_slides": "" }, "vector": [ 0, - 6, 0, 0, 0, @@ -391909,6 +390798,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -392283,7 +391173,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -392650,7 +391539,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -392752,7 +391640,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -392988,7 +391875,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -393216,7 +392102,6 @@ 2, 0, 0, - 0, 2, 0, 0, @@ -393235,36 +392120,47 @@ }, { "session": { - "id": "harry-p", - "sourceId": "LXJJDW", - "title": "Harry P", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", + "id": "hevm-or-how-i-learned-to-stop-worrying-and-love-the-symbolic-execution", + "sourceId": "YQPADR", + "title": "hevm or: How I Learned to Stop Worrying and Love the Symbolic Execution", + "description": "hevm is a symbolic execution engine for the EVM that can prove safety properties for EVM bytecode or verify semantic equivalence between two bytecode objects. It exposes a user-friendly API in Solidity that allows you to define symbolic tests using almost exactly the same syntax as usual unit tests.\r\n\r\nIn this talk, we'll present hevm, what it's useful for, and when and how to use it to help secure your digital contracts.", + "track": "Security", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Security", + "Fuzzing", + "EVM", + "Fuzzing", + "Security" + ], + "keywords": [ + "Symbolic Execution", + "EVM" + ], + "duration": 1588, "language": "en", - "speakers": [], + "sources_swarmHash": "1898b94ad33334ceee8d4ba4ec2171d30c52da2d3e647f5f194e4655ac4b226c", + "sources_youtubeId": "o89CWZc2i1w", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67358acd9dbb7a90e18c0581", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67358acd9dbb7a90e18c0581.vtt", + "transcript_text": " Hi everyone, I'm Mathis Sos and I'll be talking about HEVM and symbolic execution that we built together with Lexi, myself and Zoe. We're part of the Argo Collective. That is a recently spun out collective from the Ethereum Foundation. If you want a bit more about the Argo Collective, you can watch our talk that was given yesterday. So first of all, what is this talk going to look like? So first, I'm going to talk a little bit about what it means to symbolically execute. Then I'm going to give a bit of an overview of HEVM. Then I'll explain how HEVM can be used to secure down some of your work that you have done, hopefully. And then finally, I'm going to conclude this talk. So first of all, what is symbolic execution? So let's say that your code looks like this. If you try to do fuzzing on this code, it will likely not find the fault that is marked by assert false, which could be, for example, something that drains your contract or locks your funds or some kind of negative event. So you can keep fuzzing this forever, essentially, and nothing will happen. But if you run symbolic execution on this, it will immediately hit the assert false and will give you the solution, the two integers that you need to pass in to trigger the failure. integers that you need to pass in to trigger the failure. So this is a good sort of litmus test of what symbolic execution can do in comparison to fuzzing. Of course, it can do more, but this is a good example where fuzzing will not help you very much, and symbolic execution will definitely help you. So let's get a little bit more into the details. So here we have, I used traditional assembly. I'm old school. I could have used EVM assembly, but it's more or less the same. So what we're going to do here is I'm going to showcase a very simple straight line program where we do a move and an add with two registers. So here we have concrete execution on the top. And what you have is concrete values like 1 and 2. And then the MOV will move the value 2 to the A register. And then it will add 4 to this value. And then the final register state will be 6 and 2. Now, when it comes to symbolic execution what it will do is it will instead of initializing it with concrete values like 1 and 2 it will initialize it with a variable like v1 and v2 and then we'll try to execute these instructions but over these variables rather than the concrete values and what that will do is that as you can see step by step we we have the state evolve and eventually it will end up with V2 plus 4 on the A register and V2 on the B register. So, of course, if I substitute the concrete values into that, we'll end up exactly where we were with concrete execution. But what this means is that I can mathematically express what is the state for any input value, right? And, okay, so that sounds interesting. Where does this get hard? Because this seems relatively straightforward. So it gets kind of hard is when branching happens. So here is a program where we have a branch. And with concrete execution execution it's relatively straightforward we have both registers set as one then we're gonna check whether they are equal they are happen to be equal and if they are true then we're gonna add five to the to the register a and so the the final state is six and one right so this is quite clear but when it comes to symbolic execution we we don't know of of course, what the value of A and B is. We represent them as these symbolic variables, V1 and V2, and we need to check for both potential execution paths. In this case, when V1 is equal to V2 and when V1 is not equal to V2. And we'll end up with two different states, potentially. In this case, we do actually end up in two different symbolic states. In one case, V1 is incremented by 5, and in the other case, V1 is incremented by 4. Of course, again, if I substitute the concrete values in, then I'll get exactly the concrete execution. However, what this means is that now we branch. So now we have two states, and if there's another branch, then this can become, of course, exponentially large number of exponential large number of potential end states. If there's a loop, of course, that can be, if it's infinite loop, it's not a bounded loop, then you can have issues with this thing never terminating because we don't actually know you know if ever the loop condition is is reached and so we can potentially run forever so I'm just gonna talk a little bit about what other similar execution systems are out there before I jump into HCVM. First of all, there's more or less two types within this ecosystem as far as I understand. One of them is you have a static code analysis engine, and now you want to validate whether some of the things that it spits out are potentially false positives. And in this case, if the symbolic execution engine is not complete or doesn't understand everything, it's kind of fine because what you will do is you will just simply spit out the potential false positive and let the user deal with it. And that's fine. But in our case, we actually don't use the static code analysis engine as a precursor to HEVM, so we actually have to deal with everything that the EVM has to offer. And so it's a bit more complicated, and it's also more complete, of course. So these purely symbolic execution framework-based systems, there are a number. There's Sertor Approver, which is based on this backwards exploration and weakest precondition computation. There's ETHBMC, which is, of course, as the name suggests, a bounded model checker type system. Then there's Halmosh. It's written in Python. And then there's KEVM, which is based on the K framework and allows you to break out into K in case you need to prove some things. For example, loop termination or invariance and things like that. So just a bit of an overview of HEVM. So it started a long time ago as part of the DAPTools project. It implements the EVM semantics both concretely and symbolically, and actually Echidna fuzzer, if you know about it, it uses HEVM underneath for the concrete execution semantics. It is possible to execute any call from any potential state in the EVM. So it understands all of the EVM in terms of, like, for example, calling out to an RPC, to an archive node, to fetch state, et cetera. It runs, it basically computes a query to an SMT solver, runs the SMT solver to get the response to the query, and then interprets this response and displays it back to the user in a fashion that is more user-friendly than some SMT output. We'll see that in a moment. So there are two ways of using HEVM. One of them is for counter-example example generation and one of them is equivalence checking. So let's talk a little bit about this counter example generation for in this case and some kind of post condition that typically is written for example in FORGE test case as a FORGE test case. So here we have Solidity, Viper, whatever language you prefer, or even just pure EVM bytecode, and that gets interpreted by the internal symbolic execution engine inside, symbolic interpreter inside HEVM to produce an intermediate representation. And then this intermediate representation, together with the post post condition that you put down gets compiled into a logical formula that gets sent out to the SMT solver and SMT solver is what's called a site model theory solver so it understands for example bit vector arithmetic which is quite useful because of course there's these 256 bit bit vectors or variables in EVM that EVM operates on. And then either it proves the property, finds a counterexample, or times out, of course. That's always a potential possibility. When it comes to symbolic execution for equivalence checking, we do something very similar. But what we do eventually is that we try to compare the two executions against each other. So you have bytecode A and bytecode B. Let's say that bytecode B is like a refactored bytecode or something that is gas optimized, and you want to make sure that it's doing the same thing as the original one. And now what this will do is that it will try to act to prove that it is equivalent, find the counter example, so some input or in state where the two contracts actually disagree, or of course, there's always the potential for timeout. And let's go a little bit deeper into this kind of symbolic execution engine inside HEVM. So it operates on bytecode, which means that we're not tied to any particular compiler we're not even tied to something like you will for example understands all of the EVM stack call frame stack storage call data everything can as I mentioned it can actually run at any point in the blockchain history and it is fast against the concrete execution semantics of GATH in this case. As mentioned, like it has issues with loops and recursions because of this issue that unterminated loops, we don't know when to stop. It has some issues related to symbolic off-size and memory copy. This is purely due to SMT limitations and doesn't currently deal with symbolic gas, so it basically ignores gas when it comes to symbolic execution. In concrete execution, of course, it understands gas and will deal with it, and so Echidna will be running correctly in that sense. So just a bit of the internals. Maybe this is a little too small, but basically what it does is that it takes in as an input the EVM bytecode. Then it will step-by-step execute it and branch, if necessary, to build an intermediate representation. And then this intermediate representation is simplified in a generic way. And there are some specific simplifications related to catch-hawk and arrays and maps and things like that that are quite specific when it comes to to how the EVM handles arrays and maps and things like that so especially catch-hook which is very complicated to put into exactly represent in in SMT eventually what happens is that this IR gets compiled into a bunch of SMT queries, and these SMT queries get dispatched to the SMT solver or solvers, and then we gather all the results from the SMT solvers and then get the counterexamples, extract them, and map them back to the query that was originally dispatched and see how we can display that to the user in a way that they will be able to run this and actually trigger the fault. Because at the end of the day, it's annoying to get something like, hey, your problem is faulty. That's not going to help the user. But the user really wants some kind of counter example so they can actually run it and see how it is wrong. And then, of course, they can fix it or hopefully fix it. So that's kind of the high level of the internals. So let's talk a little bit about this intermediate representation. I'm not going to go into the details, but that's kind of at the middle of this whole box, in the previous box that I showed. So here's a simple function, right? that I showed. So here's a simple function, right? This will overflow if the variable that you put in is large enough, in particular exactly equal to 2 to the power of 256 minus 1, and b will actually not be larger than a in this particular case. I'm sure you have played around with this. It's a very trivial overflow issue. And this gets compiled into this intermediate representation where we have a proposition that a must be smaller than the a plus 1. So I mean, it's very simplistic in this space. But the point was to make it kind of readable and also human readable. This can also be compiled into a graph, as you might imagine, and it can be quite understandable to a human how the internal representation maps back to the original code. In this case, it's quite clear. Okay, so I'm going to talk a little bit about uh about how to actually run this tool and what kind of results you can expect so in this particular case we have a forge standard test that we're gonna import our contract is um is a test and then we add prove underscore in front of it you will need forge you will need Z3, which is one of the SMT solvers that we support, and you need HEVM binary that you can just download from the repository. And you put down this test, and this test will, of course, fail, again, due to overflow. And the way you run it is very, very simple. You just build with the ESC and run HVM test. And it will parse everything up and do its magic and eventually give you a counterexample, which is what you expect. So this is sort of the base way of using it. And it is as clean and tidy as it looks on the output. I did not actually change that or edit it or even the spacing is how it is. So it's quite visually representative of what are you going to test and what are the counterexamples that come out. If there's more than one counterexample, there's going to be more than one listed there. So how does HEVM equivalence work? Here I'm just going to show sort of a contracted thing. It's a very complicated code that was someone tried to use. And here I just want to show some of the edge cases, but also that we do power through the edge cases as well. So in this case, there's two codes, and we want to know if they're equivalent. You see that it emits a warning that we cannot actually explore the whole thing in this particular case due to a call into unknown code. Obviously, we don't know what's going to happen there, like what is going to get executed. But beyond that, it says, OK, well, anyway, I power through. I'm going to ignore these bits and pieces. We have 1.7 million end states that we need to check for equivalence. And then it says, OK, well, I tried my very best. There's 93 of them I couldn't do because of this memory that was a memory copy that is symbolic. I couldn't do two because of timeout. This actually had a 15 second timeout for each query. And eventually I couldn't find any discrepancies given these warnings and things that I couldn't actually explore. And if you have a look, this actually ran for like 34 minutes and 45,000 seconds, so obviously it was like", "eventId": "devcon-7", - "slot_start": 1731488400000, - "slot_end": 1731492000000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1wNma7KIt9CoI1JayWZB-MIf47ge7DGlMJ3Ev9Il3pdE" + "slot_start": 1731560400000, + "slot_end": 1731562200000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1zbKn6alKaFJ7AHUN8resSuZmq-0n4W0JbxXcZGI9Cq8", + "resources_slides": "", + "speakers": [ + "mate-soos" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -393649,6 +392545,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -394012,6 +392909,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -394217,6 +393116,17 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -394560,20 +393470,10 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, + 0, 2, 0, 0, @@ -394592,54 +393492,60 @@ }, { "session": { - "id": "hevm-or-how-i-learned-to-stop-worrying-and-love-the-symbolic-execution", - "sourceId": "YQPADR", - "title": "hevm or: How I Learned to Stop Worrying and Love the Symbolic Execution", - "description": "hevm is a symbolic execution engine for the EVM that can prove safety properties for EVM bytecode or verify semantic equivalence between two bytecode objects. It exposes a user-friendly API in Solidity that allows you to define symbolic tests using almost exactly the same syntax as usual unit tests.\r\n\r\nIn this talk, we'll present hevm, what it's useful for, and when and how to use it to help secure your digital contracts.", - "track": "Security", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "how-crypto-is-used-in-africa-today-hear-from-leading-builders", + "sourceId": "RKR9EC", + "title": "How crypto is used in Africa today? Hear from leading builders", + "description": "How are Africans using crypto at scale, and what has been the impact on society? Last year Africa saw close to $120B onchain transactions, and 10%-20% of major countries' populations used crypto. \r\n\r\nWhat problems are the top African founders solving for retail and businesses? What are the technical + non-technical friction points they face in building for the fastest growing markets in the world?\r\n\r\nHear African founders share lessons, nuances, and user behavior from the front lines.", + "track": "Real World Ethereum", + "type": "Panel", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Security", - "Fuzzing", - "EVM", - "Fuzzing", - "Security" + "Use Cases", + "Remittance", + "Ethereum for Good", + "p2p", + "Ethereum for Good", + "Remittance", + "Use Cases" ], "keywords": [ - "Symbolic Execution", - "EVM" + "Mass adoption", + "payment", + "P2P" ], - "duration": 1588, + "duration": 3381, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "123ecd0fc00f3bbef4a63b4f291024752fc482215533945b465b74cbecb2327f", + "sources_youtubeId": "GyMoif6I-cE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67358acd9dbb7a90e18c0581", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67358acd9dbb7a90e18c0581.vtt", - "transcript_text": " Hi everyone, I'm Mathis Sos and I'll be talking about HEVM and symbolic execution that we built together with Lexi, myself and Zoe. We're part of the Argo Collective. That is a recently spun out collective from the Ethereum Foundation. If you want a bit more about the Argo Collective, you can watch our talk that was given yesterday. So first of all, what is this talk going to look like? So first, I'm going to talk a little bit about what it means to symbolically execute. Then I'm going to give a bit of an overview of HEVM. Then I'll explain how HEVM can be used to secure down some of your work that you have done, hopefully. And then finally, I'm going to conclude this talk. So first of all, what is symbolic execution? So let's say that your code looks like this. If you try to do fuzzing on this code, it will likely not find the fault that is marked by assert false, which could be, for example, something that drains your contract or locks your funds or some kind of negative event. So you can keep fuzzing this forever, essentially, and nothing will happen. But if you run symbolic execution on this, it will immediately hit the assert false and will give you the solution, the two integers that you need to pass in to trigger the failure. integers that you need to pass in to trigger the failure. So this is a good sort of litmus test of what symbolic execution can do in comparison to fuzzing. Of course, it can do more, but this is a good example where fuzzing will not help you very much, and symbolic execution will definitely help you. So let's get a little bit more into the details. So here we have, I used traditional assembly. I'm old school. I could have used EVM assembly, but it's more or less the same. So what we're going to do here is I'm going to showcase a very simple straight line program where we do a move and an add with two registers. So here we have concrete execution on the top. And what you have is concrete values like 1 and 2. And then the MOV will move the value 2 to the A register. And then it will add 4 to this value. And then the final register state will be 6 and 2. Now, when it comes to symbolic execution what it will do is it will instead of initializing it with concrete values like 1 and 2 it will initialize it with a variable like v1 and v2 and then we'll try to execute these instructions but over these variables rather than the concrete values and what that will do is that as you can see step by step we we have the state evolve and eventually it will end up with V2 plus 4 on the A register and V2 on the B register. So, of course, if I substitute the concrete values into that, we'll end up exactly where we were with concrete execution. But what this means is that I can mathematically express what is the state for any input value, right? And, okay, so that sounds interesting. Where does this get hard? Because this seems relatively straightforward. So it gets kind of hard is when branching happens. So here is a program where we have a branch. And with concrete execution execution it's relatively straightforward we have both registers set as one then we're gonna check whether they are equal they are happen to be equal and if they are true then we're gonna add five to the to the register a and so the the final state is six and one right so this is quite clear but when it comes to symbolic execution we we don't know of of course, what the value of A and B is. We represent them as these symbolic variables, V1 and V2, and we need to check for both potential execution paths. In this case, when V1 is equal to V2 and when V1 is not equal to V2. And we'll end up with two different states, potentially. In this case, we do actually end up in two different symbolic states. In one case, V1 is incremented by 5, and in the other case, V1 is incremented by 4. Of course, again, if I substitute the concrete values in, then I'll get exactly the concrete execution. However, what this means is that now we branch. So now we have two states, and if there's another branch, then this can become, of course, exponentially large number of exponential large number of potential end states. If there's a loop, of course, that can be, if it's infinite loop, it's not a bounded loop, then you can have issues with this thing never terminating because we don't actually know you know if ever the loop condition is is reached and so we can potentially run forever so I'm just gonna talk a little bit about what other similar execution systems are out there before I jump into HCVM. First of all, there's more or less two types within this ecosystem as far as I understand. One of them is you have a static code analysis engine, and now you want to validate whether some of the things that it spits out are potentially false positives. And in this case, if the symbolic execution engine is not complete or doesn't understand everything, it's kind of fine because what you will do is you will just simply spit out the potential false positive and let the user deal with it. And that's fine. But in our case, we actually don't use the static code analysis engine as a precursor to HEVM, so we actually have to deal with everything that the EVM has to offer. And so it's a bit more complicated, and it's also more complete, of course. So these purely symbolic execution framework-based systems, there are a number. There's Sertor Approver, which is based on this backwards exploration and weakest precondition computation. There's ETHBMC, which is, of course, as the name suggests, a bounded model checker type system. Then there's Halmosh. It's written in Python. And then there's KEVM, which is based on the K framework and allows you to break out into K in case you need to prove some things. For example, loop termination or invariance and things like that. So just a bit of an overview of HEVM. So it started a long time ago as part of the DAPTools project. It implements the EVM semantics both concretely and symbolically, and actually Echidna fuzzer, if you know about it, it uses HEVM underneath for the concrete execution semantics. It is possible to execute any call from any potential state in the EVM. So it understands all of the EVM in terms of, like, for example, calling out to an RPC, to an archive node, to fetch state, et cetera. It runs, it basically computes a query to an SMT solver, runs the SMT solver to get the response to the query, and then interprets this response and displays it back to the user in a fashion that is more user-friendly than some SMT output. We'll see that in a moment. So there are two ways of using HEVM. One of them is for counter-example example generation and one of them is equivalence checking. So let's talk a little bit about this counter example generation for in this case and some kind of post condition that typically is written for example in FORGE test case as a FORGE test case. So here we have Solidity, Viper, whatever language you prefer, or even just pure EVM bytecode, and that gets interpreted by the internal symbolic execution engine inside, symbolic interpreter inside HEVM to produce an intermediate representation. And then this intermediate representation, together with the post post condition that you put down gets compiled into a logical formula that gets sent out to the SMT solver and SMT solver is what's called a site model theory solver so it understands for example bit vector arithmetic which is quite useful because of course there's these 256 bit bit vectors or variables in EVM that EVM operates on. And then either it proves the property, finds a counterexample, or times out, of course. That's always a potential possibility. When it comes to symbolic execution for equivalence checking, we do something very similar. But what we do eventually is that we try to compare the two executions against each other. So you have bytecode A and bytecode B. Let's say that bytecode B is like a refactored bytecode or something that is gas optimized, and you want to make sure that it's doing the same thing as the original one. And now what this will do is that it will try to act to prove that it is equivalent, find the counter example, so some input or in state where the two contracts actually disagree, or of course, there's always the potential for timeout. And let's go a little bit deeper into this kind of symbolic execution engine inside HEVM. So it operates on bytecode, which means that we're not tied to any particular compiler we're not even tied to something like you will for example understands all of the EVM stack call frame stack storage call data everything can as I mentioned it can actually run at any point in the blockchain history and it is fast against the concrete execution semantics of GATH in this case. As mentioned, like it has issues with loops and recursions because of this issue that unterminated loops, we don't know when to stop. It has some issues related to symbolic off-size and memory copy. This is purely due to SMT limitations and doesn't currently deal with symbolic gas, so it basically ignores gas when it comes to symbolic execution. In concrete execution, of course, it understands gas and will deal with it, and so Echidna will be running correctly in that sense. So just a bit of the internals. Maybe this is a little too small, but basically what it does is that it takes in as an input the EVM bytecode. Then it will step-by-step execute it and branch, if necessary, to build an intermediate representation. And then this intermediate representation is simplified in a generic way. And there are some specific simplifications related to catch-hawk and arrays and maps and things like that that are quite specific when it comes to to how the EVM handles arrays and maps and things like that so especially catch-hook which is very complicated to put into exactly represent in in SMT eventually what happens is that this IR gets compiled into a bunch of SMT queries, and these SMT queries get dispatched to the SMT solver or solvers, and then we gather all the results from the SMT solvers and then get the counterexamples, extract them, and map them back to the query that was originally dispatched and see how we can display that to the user in a way that they will be able to run this and actually trigger the fault. Because at the end of the day, it's annoying to get something like, hey, your problem is faulty. That's not going to help the user. But the user really wants some kind of counter example so they can actually run it and see how it is wrong. And then, of course, they can fix it or hopefully fix it. So that's kind of the high level of the internals. So let's talk a little bit about this intermediate representation. I'm not going to go into the details, but that's kind of at the middle of this whole box, in the previous box that I showed. So here's a simple function, right? that I showed. So here's a simple function, right? This will overflow if the variable that you put in is large enough, in particular exactly equal to 2 to the power of 256 minus 1, and b will actually not be larger than a in this particular case. I'm sure you have played around with this. It's a very trivial overflow issue. And this gets compiled into this intermediate representation where we have a proposition that a must be smaller than the a plus 1. So I mean, it's very simplistic in this space. But the point was to make it kind of readable and also human readable. This can also be compiled into a graph, as you might imagine, and it can be quite understandable to a human how the internal representation maps back to the original code. In this case, it's quite clear. Okay, so I'm going to talk a little bit about uh about how to actually run this tool and what kind of results you can expect so in this particular case we have a forge standard test that we're gonna import our contract is um is a test and then we add prove underscore in front of it you will need forge you will need Z3, which is one of the SMT solvers that we support, and you need HEVM binary that you can just download from the repository. And you put down this test, and this test will, of course, fail, again, due to overflow. And the way you run it is very, very simple. You just build with the ESC and run HVM test. And it will parse everything up and do its magic and eventually give you a counterexample, which is what you expect. So this is sort of the base way of using it. And it is as clean and tidy as it looks on the output. I did not actually change that or edit it or even the spacing is how it is. So it's quite visually representative of what are you going to test and what are the counterexamples that come out. If there's more than one counterexample, there's going to be more than one listed there. So how does HEVM equivalence work? Here I'm just going to show sort of a contracted thing. It's a very complicated code that was someone tried to use. And here I just want to show some of the edge cases, but also that we do power through the edge cases as well. So in this case, there's two codes, and we want to know if they're equivalent. You see that it emits a warning that we cannot actually explore the whole thing in this particular case due to a call into unknown code. Obviously, we don't know what's going to happen there, like what is going to get executed. But beyond that, it says, OK, well, anyway, I power through. I'm going to ignore these bits and pieces. We have 1.7 million end states that we need to check for equivalence. And then it says, OK, well, I tried my very best. There's 93 of them I couldn't do because of this memory that was a memory copy that is symbolic. I couldn't do two because of timeout. This actually had a 15 second timeout for each query. And eventually I couldn't find any discrepancies given these warnings and things that I couldn't actually explore. And if you have a look, this actually ran for like 34 minutes and 45,000 seconds, so obviously it was like", + "sources_streamethId": "6738244e4a7c30ff07f27af2", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731560400000, - "slot_end": 1731562200000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1zbKn6alKaFJ7AHUN8resSuZmq-0n4W0JbxXcZGI9Cq8", - "resources_slides": null, + "slot_start": 1731650400000, + "slot_end": 1731654000000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1-iuDsB5_A6OL9P-2eTEkEzeWPAc_YK9UNsVLwT5Pf6A", + "resources_slides": "https://drive.google.com/file/d/1Lm5ul8IpKnP9HyCSJ2vTCQvwZdiY8or9/view", "speakers": [ - "mate-soos" + "yoseph-ayele", + "yele-bademosi", + "david-nandwa", + "damaris-njambi-njoroge" ] }, "vector": [ - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -395019,6 +393925,9 @@ 0, 0, 6, + 6, + 6, + 6, 0, 0, 0, @@ -395384,15 +394293,6 @@ 0, 0, 0, - 6, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -395466,6 +394366,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -395512,6 +394413,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -395591,7 +394493,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -395687,6 +394588,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -395750,6 +394652,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -395951,12 +394854,10 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, + 2, 0, 0, 0, @@ -395969,61 +394870,57 @@ }, { "session": { - "id": "how-crypto-is-used-in-africa-today-hear-from-leading-builders", - "sourceId": "RKR9EC", - "title": "How crypto is used in Africa today? Hear from leading builders", - "description": "How are Africans using crypto at scale, and what has been the impact on society? Last year Africa saw close to $120B onchain transactions, and 10%-20% of major countries' populations used crypto. \r\n\r\nWhat problems are the top African founders solving for retail and businesses? What are the technical + non-technical friction points they face in building for the fastest growing markets in the world?\r\n\r\nHear African founders share lessons, nuances, and user behavior from the front lines.", - "track": "Real World Ethereum", - "type": "Panel", - "expertise": "Beginner", - "audience": "Community", + "id": "how-hardhat-3-will-ensure-precise-simulation-for-l2s-using-edr", + "sourceId": "G7AHS9", + "title": "How Hardhat 3 will ensure precise simulation for L2s using EDR", + "description": "As the Ethereum ecosystem shifts towards L2 solutions, developers encounter new challenges in maintaining consistency and efficiency across different chains.\r\n\r\nHardhat is powered by EDR, a reusable Ethereum Development Runtime implementation to build developer tools. This talk will show how EDR's support for L2s in Hardhat 3 will streamline the development process, improve testing accuracy, and enhance the overall developer experience.", + "track": "Developer Experience", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Use Cases", - "Remittance", - "Ethereum for Good", - "p2p", - "Ethereum for Good", - "Remittance", - "Use Cases" + "Layer 2s", + "Tooling", + "DevEx", + "optimism", + "DevEx", + "Layer 2s", + "Tooling" ], "keywords": [ - "Mass adoption", - "payment", - "P2P" + "EVM", + "Hardhat", + "Optimism" ], - "duration": 3381, + "duration": 1313, "language": "en", - "sources_swarmHash": "123ecd0fc00f3bbef4a63b4f291024752fc482215533945b465b74cbecb2327f", - "sources_youtubeId": "GyMoif6I-cE", + "sources_swarmHash": "051def2613d035b880fce6a098fdc98bd5b49ebecf4914f65c76912a6ea56741", + "sources_youtubeId": "W7y4bYZFVJ4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6738244e4a7c30ff07f27af2", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6735e4c69dbb7a90e193d498", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735e4c69dbb7a90e193d498.vtt", + "transcript_text": " Welcome everyone and thank you for staying until the end of the day. I am Wodan, a developer at the Nomic Foundation, and today I'll be talking about how Hardhat 3 will ensure precise simulation for L2s using EDR. There's a lot to unpack there, but first I wanted to comment on the Nomic Foundation. Maybe not everyone's familiar with it, but you're probably familiar with our work. We are a non-profit dedicated to Ethereum developers, you guys, and our most well-known product is likely Hardhat and the Hardhat VS Code extension. That's us. So an overview of what I'll be discussing today is I'll start off with a quick introduction to what EDR is. Then I'll go into variability between L2s. I'll look at some problems that currently exist when developers are developing for L2s using L1 tooling. Then I'll do a technical deep dive into how EDR actually simulates L2s accurately. This will also be interesting for any of you L2 developers out there, as I'll talk about the extensibility points that EDR will expose in the future. And finally, I'll touch upon a demo that shows how L2s work in Hardhat 3. This will be interesting for all of you Hardhat users, as it will show how the technical complexity of all of this variability is boiled down to a simple and straightforward user experience. So what is EDR? EDR or Ethereum Development Runtime in full is an reusable EVM development runtime library for tooling. It is a set of building blocks for a blockchain simulation and in particular it allows you to observe EVM and Solidity execution. So as such we are targeting smart contract development, the simulation, testing, and debugging thereof, and we're not targeting to be an execution layer node. If you're curious to learn more about EDR, earlier this year we had an EDR launch announcement when we integrated into Hardhat 2, and there's more information about other features, performance improvements that brought to Hardhat, and future roadmap for EDR as well. So what variability exists between L2s and L1? For the sake of this presentation and also for the implementation of EDR, we're assuming L2s that are EVM equivalent. This means that they have to comply with the EVM specification or the Ethereum white paper, if you will. When we can depend on roll-ups, any of the L2 transactions can have their own custom types. This means that when we're dealing with custom L2 transactions, that there is logic that's different, which is executed within the EVM. The way that we're dealing with rewards, the way we're dealing with fees can be different, and even the output that's returned can be different. For example, halt reasons when an exceptional halt occurs. When we go into a transaction and we look at the bytecode, the opcodes might also be different. It could be that an L2 doesn't support all of the opcodes of L1 or vice versa, but it can even be that the same opcode has a different type of behavior in the L2. For example, the block number opcode, how would we simulate this on an L2? Do we give a prediction of the L1 block that we expect to be included in, or do we return an L2 block number. Within the EVM, another thing that's different are precompiles. The set that's available in different L2s differs, as well as them being different from L1. Another thing that is different are hard forks. Every L2 will have a different set of breaking changes for which they'll have their own hard forks and something that we need to track as well is so-called hard fork activations. Those are the block numbers or timestamps at which a particular hard fork becomes activated and these will for example be needed when you do an ETH call at a particular block number. If we're forking a blockchain and we want to run a historic block, we need to know what hard fork should be activated at this point. When we then roll up everything into a L2 block, we also need to at the protocol level take into consideration fee calculation and we need to incorporate custom transaction receipts. When we are deploying our own chain for specific L2s, we need to consider their own pre-deployed contracts. These are incorporated in the Genesys state and mean that you can access these contracts at a predefined address. These differ per L2. And then finally, if we go up one more layer, you have RPCs, the RPC, which might have additional fields for methods. It could be that a method returns fields, but with a different behavior. And it can even be that one of those methods has entirely different logic altogether. All of these types of variability need to be incorporated and keeping track of them is a huge pain. So a big shout out to EVM diff and the L2 documentation that was very instrumental when we were implementing the OP stack for EDR. So we have an idea now of what variability exists, but what problems might occur? Here are some examples. So when we start off at the execution layer, when we're dealing with unknown transaction types from an L1 perspective, we're not sure how to actually execute them. It could be that they throw an error or it could be that they do execute but because the opcodes have different behavior, the result is different and as such the L2 execution will be incorrect within your L1 tooling. When we're then trying to mine a full block, we also run into issues. The RLP encoding for these unknown transactions is unknown, which would result in an incorrect tri-route. And it could be that the header has different header fields, which would also result in an incorrect block hash. Then when we look at the gas calculation these end up being incorrect as well. Let's have a look at the way that L2 transaction costs are structured. So on the left side you'll see something that's very familiar. We have our execution gas cost consisting of a gas price multiplied by gas used. This is the same as on L1 except that L2 gas prices will be lower. But we have something new which is the L1 data fee. This is the cost we have to pay for the roll-up or the part that our transaction is within that roll-up. This is the L1 gas price multiplied by the L1 gas used multiplied by the number of bytes of transaction data. Usually, this is compressed to reduce the amount of memory usage. But we somehow need to convey this cost to the user. L2s do this differently. It could be that they try and convert this factor of gas price multiplied by gas used to an L2 gas usage. Or it could be that they somehow try to change this to the L2 gas price. Each L2 will have their own strategy for doing this. Then when we're looking at debugging, for example, using debug trace transaction, what we're trying to do is we replay some block on the chain until we reach the transaction that we want to debug. And it could be that up until that point, we find some unknown transaction, and we could treat them as an EIP-155 transaction or a legacy transaction, and try to execute based on the best effort. But this might result in errors. So instead, we could choose to skip it. But this might have a negative side effect that if that transaction affected the state that our contract is also accessing, that we are getting a different result than we would on a test or mainnet. All of these examples that I gave have something in common. We're trying to build L2 smart contracts using L1 tools and hoping that it just works. It could be that the tests are passing but there are still subtle execution differences that give us a false sense of security. And this leaves room for security vulnerability once we deploy. So how does EDR circumvent this and accurately simulate L2s? So here we have an overview of the different building blocks that I mentioned outlined in black. In orange we have entry points from and into Hardhat and in purple we have our EVM which is the EVM dependency that we use. Everything outlined in green is parts that we previously had supported for L1 Ethereum but now we need to convert to be able to be multi-chain and also support different L2s. I've numbered it in two sections as they both have different requirements and we'll delve into those respectively now. So the part outlined in one was all Rust code. So we need to look at expansibility from a Rust perspective. For this we had several requirements. We wanted compile-time polymorphism. This would allow users of our crates or packages, if you will, to be able to use these interfaces or traits during compile time to generalize their types and functions. We also wanted to generate type errors at compile time. This would force L2 developers to resolve any issues with their typing, as opposed to the error bubbling up to hardhat users at runtime. Finally, we also wanted to ensure that their type definitions were reusable from a base chain to a L2 chain. For example, if we have an EIP2930 transaction, this is used in OP stack as well. So being able to reuse those types lightens the burden for L2 developers. The solution we used are the Rust traits and generics. Traits are a form of interface that can be used both at runtime and at compile time to constrain generics. And generics are just a way to generalize function definitions and type definitions across a type. For each of these traits or interfaces, we associated types with them that we consider to be a chain specification. Think of a transaction type, a block type, etc. And there are some constants which are used within the protocol. We distribute individual change using Rust crates for reusability. So if we look at the overview again here, and we start at the top right, we have a remote network clients which does RPC calls to a provider like Infior or Alchemy. Here we introduced an RPC spectrate that would define the RPC transaction or an RPC receipt, etc. Then when we go to REVM, here we introduced something called EVM wiring. We proposed changes to REVM and with the graceful help of Dragan Rakita, the maintainer, we were able to incorporate these large changes into REVM, which means means now within our EVM you can also run and extend different chain types. Here you would define a runnable transaction, a block within which it's executed, the hard fork, Hall's reasons, etc. Then we go up one level to something we call the executor. The executor is a wrapper around the EVM, which receives a signed transaction, passes it in, and while it's executing, we gather additional data, which we use for runtime observability. So things like traces for a stack trace, etc., that we expose to the end user. Here we introduce a type runtime spec and then when we're incorporating all of these transactions into a block within our block builder we need to consider parameterizations for the protocol level such as the base fee calculation. This has specific constants that need to be incorporated which differ between L1 and L2 And here we introduced a ETH header constants interface to define those. Then all of this logic is tied together within the node or provider. And here we introduced a provider spec with things such as a pool transaction. For example, for blob transactions, we also keep track of additional blob data, etc. And these are stored within the mempool. Then finally, we reach the RPC interface that's exposed to HardHat. Here what we do is we use a tool called NAPI, which can be used for generating TypeScript bindings from Rust code. And this allows us to basically from HardHat, which is TypeScript,ings from Rust code, and this allows us to basically from hardhat, which is TypeScript, call our Rust functions. And here we introduced a sync and API spec. Sync basically means we're trying to call our Rust code from a threaded environment, and this ensures that the access to that data is correct. And we also do a conversion from compile types to runtime polymorphic types, which we'll have a look at next. All in all, this encompasses six different traits or interfaces, and these are all the things that an L2 developer would have to implement in order for all of the building blocks that we have within EDR to be supported for their chain, and in addition for their chain to be usable within Hard Hat 3 in the future. So when we look at the part that was outlined on the left, we're dealing with TypeScript, so the requirements for extensibility are slightly different. We're dealing with runtime polymorphism. We're trying to minimize memory usage and load times. The goal here is that we shouldn't have to load all the possible L2 chains, only the ones that the user of hard hat in that particular configuration wants to use. And we want to avoid centralization of chain types. We don't want a repository where you have to add your specific chain type to an enum. Instead, we want you to just be able to plug and play your own chain types independently. For this, we created an NAPI wrapper around dynamic trade objects. Dynamic trade objects are basically virtual objects where you can access a specific instance of an object through an interface. And to distinguish chain types, we use a string identifier. And then finally, for distribution, we're using NPM packages. Here we have an overview of what that looks like on the TypeScript side. We have a, if we go from right to left, we have a provider interface that receives a generic JSON RPC request and returns a generic JSON RPC response. We have two different implementations, one for L1, one for Optimism. These would basically parse the input, make it a typed request for their respective backends, handle it, and then again convert the response to a generic string, which is returned to the user. In order to be able to construct these, we're just using a factory pattern. So if we move one column to the left, we have a provider factory, which would receive a generic configuration, again using strings, and we have two implementations that would parse those configurations and make them typed and construct a respective provider depending on L1 or Optimism. If we then move one column further to the left, we have something called a context. This is maintained from the start until the shutdown of your application and contains a registry of provider factories, which is a mapping of the string identifier to the instance of the provider factory. If we look at a usage example, you start your application and would register any of the requested providers based on a configuration and you store them in the registry. Then when a hardhat user would actually request to create a provider, let's say Optimism, we do a lookup in the registry, find the Optimism provider factory, pass on the config, and ask it to create an instance of the Optimism provider, which is returned to the user. This is the way that we have designed extensibility. For the hardhat beta beta we'll be releasing with the OP stack and l1 support in the future we'll also be releasing all of these api's and it would allow you guys to extend it for your own l2 now let's have a look at what this actually looks like in hardhat 3 that got a little bit complicated on a technical level but but from a user side it's actually quite straightforward to use. So here we see an example of a hardhat3 user config. At line 7 we have two networks defined, EDR-L1-Sypolia and EDR-Op-Sypolia. They both have the EDR type for the back-end simulation. The first one has a chain type for L1, whereas the second one has a chain type for Optimism. This is communicating to EDR which typing system to use. And when we switch to a hardhat script, we see on line 3, we're requesting the hardhat network manager to connect to the previously configured Opsipolia and we specified that for its types it needs to use Optimism. This is informing the TypeScript type system that the estimate L1 gas function needs to be available which would only exist for Optimism and not for L1 and we make a call to it through the public client and then we send an L2 transaction to L1, and we make a call to it through the public client, and then we send an L2 transaction to transfer one way, and we wait for the transaction received to be included in a block. So when we execute this, we're estimating L1 gas, get 1600, and then we see that our transaction that was sent is included in a block. If we then go back and we switch the network we are connecting to to EDR L1 Sopolia and use L1 typing, we see that we're getting a type error for estimate L1 gas as this is not available for L1 networks and the Veeam object knows this. So this is a sneak peek at some of the features that will be available with multi-ain and the kind of simple user experience that you get as a HardHat user. If you want to learn more, we've had several other talks already at DEF CON. We had a talk about the future vision of Nomic Foundation and the products that we're developing. We also had a talk which was a preview for Hardhat 3. And tomorrow we'll have a talk by the Slang team. Slang is a compiler as an API for developer tools. And this is basically useful for writing your own linters, doing formatters, etc. So if you're curious about that, go have a look at them tomorrow. Thank you. Alright, so we do have some extra time for QA. Feel free to scan the QR code if you have more questions for Walden. In the meantime, let's take this one. ARP has pre-deployed contracts that do not exist as EVM code on chain. Instead, instead is Go code embedded into Get. Any efforts to handle this? So the way that we could theoretically handle that is instead of having it as Go code, we could include this in the genesis state that is deployed when you're using an arbitrum L2 chain. Alright, so let's when you're using an Arbitrum L2 chain. All right. So let's wait for a few more seconds. We have another one here. Wenhar had that three. I think we don't have an announcement date yet, so stay tuned. We have an internal alpha going at the moment, but the beta will be sometime at the start of next year, but there's no definitive date yet. How does EDR compare to Foundry's Anvil? The hardhat simulation or the simulation component of it and the RPC that's exposed is comparable, like identical in that sense to Anvil. The performance is also very comparable. We have a slightly different way of maintaining our state, and as a result of that, if you're very often switching between different states, so going and doing EVE calls and jumping back and forth between a previous state, making some modifications and jumping back. In those use cases, we're often faster. But from a use case perspective, they're the same. The only difference that we might have is we haven't released our crates yet, which I think Foundry or Anvil has. But in the future, we'll be releasing a first version of EDR as well as crates on. Let's remain on the stage for a little while to see if we have more questions. SPEAKER 1 COOKIE MONICA DUNCAN- All right, if you have any questions, you can also meet me afterwards. And our CTO is also here who can answer any questions about Hardhat3. Alright, that would be great. Thank you guys so much and let's give a round of applause to Ruwanan.", "eventId": "devcon-7", - "slot_start": 1731650400000, - "slot_end": 1731654000000, + "slot_start": 1731582000000, + "slot_end": 1731583800000, "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1-iuDsB5_A6OL9P-2eTEkEzeWPAc_YK9UNsVLwT5Pf6A", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/19L7dj6AAC2bhxtksWRYlrJuOv3Xc6aF5iQmk5DGFVbA", + "resources_slides": "https://drive.google.com/file/d/17Al2kaNEfmbsqahJzMayjt1eMDg3CWR5/view", "speakers": [ - "damaris-njambi-njoroge", - "david-nandwa", - "yele-bademosi", - "yoseph-ayele" + "wodann" ] }, "vector": [ 0, 0, 0, + 6, 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -396402,14 +395299,11 @@ 0, 0, 0, - 6, - 6, - 6, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -396789,6 +395683,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -396797,6 +395692,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -396832,6 +395728,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -396846,7 +395743,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -396893,7 +395789,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -397025,6 +395920,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -397069,7 +395965,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -397133,7 +396028,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -397329,6 +396223,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -397339,8 +396234,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -397352,53 +396245,52 @@ }, { "session": { - "id": "how-hardhat-3-will-ensure-precise-simulation-for-l2s-using-edr", - "sourceId": "G7AHS9", - "title": "How Hardhat 3 will ensure precise simulation for L2s using EDR", - "description": "As the Ethereum ecosystem shifts towards L2 solutions, developers encounter new challenges in maintaining consistency and efficiency across different chains.\r\n\r\nHardhat is powered by EDR, a reusable Ethereum Development Runtime implementation to build developer tools. This talk will show how EDR's support for L2s in Hardhat 3 will streamline the development process, improve testing accuracy, and enhance the overall developer experience.", - "track": "Developer Experience", - "type": "Talk", - "expertise": "Intermediate", + "id": "how-i-audit", + "sourceId": "3NRXP9", + "title": "How I Audit", + "description": "Dom, a former security researcher at Trail of Bits, is going to give a peek of what it's like to be an auditor in 2024. Some of the techniques and tools discussed:\r\n\r\n* How to prepare for an audit?\r\n* How to hand over the resources?\r\n* What is the first thing auditors do?\r\n* How to communicate with auditors?\r\n* How I use the following tools, and their evaluation:\r\n * Codebase visualization with Wake and Neovim\r\n * Static analysis tools\r\n * Fuzzing (and debugging)\r\n * Manual review", + "track": "Security", + "type": "Workshop", + "expertise": "Expert", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", "Tooling", - "DevEx", - "optimism", - "DevEx", - "Layer 2s", + "Security", + "Auditing", + "analysis", + "static", + "Auditing", + "Security", "Tooling" ], "keywords": [ - "EVM", - "Hardhat", - "Optimism" + "Solidity", + "Frameworks", + "Program analysis", + "Static Analysis" ], - "duration": 1313, + "duration": 5302, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "4427212cb7b20f8a44407a4b5401b5dbe9d985867ab725d409048614bac75dfd", + "sources_youtubeId": "8uC1QvsYeu0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735e4c69dbb7a90e193d498", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735e4c69dbb7a90e193d498.vtt", - "transcript_text": " Welcome everyone and thank you for staying until the end of the day. I am Wodan, a developer at the Nomic Foundation, and today I'll be talking about how Hardhat 3 will ensure precise simulation for L2s using EDR. There's a lot to unpack there, but first I wanted to comment on the Nomic Foundation. Maybe not everyone's familiar with it, but you're probably familiar with our work. We are a non-profit dedicated to Ethereum developers, you guys, and our most well-known product is likely Hardhat and the Hardhat VS Code extension. That's us. So an overview of what I'll be discussing today is I'll start off with a quick introduction to what EDR is. Then I'll go into variability between L2s. I'll look at some problems that currently exist when developers are developing for L2s using L1 tooling. Then I'll do a technical deep dive into how EDR actually simulates L2s accurately. This will also be interesting for any of you L2 developers out there, as I'll talk about the extensibility points that EDR will expose in the future. And finally, I'll touch upon a demo that shows how L2s work in Hardhat 3. This will be interesting for all of you Hardhat users, as it will show how the technical complexity of all of this variability is boiled down to a simple and straightforward user experience. So what is EDR? EDR or Ethereum Development Runtime in full is an reusable EVM development runtime library for tooling. It is a set of building blocks for a blockchain simulation and in particular it allows you to observe EVM and Solidity execution. So as such we are targeting smart contract development, the simulation, testing, and debugging thereof, and we're not targeting to be an execution layer node. If you're curious to learn more about EDR, earlier this year we had an EDR launch announcement when we integrated into Hardhat 2, and there's more information about other features, performance improvements that brought to Hardhat, and future roadmap for EDR as well. So what variability exists between L2s and L1? For the sake of this presentation and also for the implementation of EDR, we're assuming L2s that are EVM equivalent. This means that they have to comply with the EVM specification or the Ethereum white paper, if you will. When we can depend on roll-ups, any of the L2 transactions can have their own custom types. This means that when we're dealing with custom L2 transactions, that there is logic that's different, which is executed within the EVM. The way that we're dealing with rewards, the way we're dealing with fees can be different, and even the output that's returned can be different. For example, halt reasons when an exceptional halt occurs. When we go into a transaction and we look at the bytecode, the opcodes might also be different. It could be that an L2 doesn't support all of the opcodes of L1 or vice versa, but it can even be that the same opcode has a different type of behavior in the L2. For example, the block number opcode, how would we simulate this on an L2? Do we give a prediction of the L1 block that we expect to be included in, or do we return an L2 block number. Within the EVM, another thing that's different are precompiles. The set that's available in different L2s differs, as well as them being different from L1. Another thing that is different are hard forks. Every L2 will have a different set of breaking changes for which they'll have their own hard forks and something that we need to track as well is so-called hard fork activations. Those are the block numbers or timestamps at which a particular hard fork becomes activated and these will for example be needed when you do an ETH call at a particular block number. If we're forking a blockchain and we want to run a historic block, we need to know what hard fork should be activated at this point. When we then roll up everything into a L2 block, we also need to at the protocol level take into consideration fee calculation and we need to incorporate custom transaction receipts. When we are deploying our own chain for specific L2s, we need to consider their own pre-deployed contracts. These are incorporated in the Genesys state and mean that you can access these contracts at a predefined address. These differ per L2. And then finally, if we go up one more layer, you have RPCs, the RPC, which might have additional fields for methods. It could be that a method returns fields, but with a different behavior. And it can even be that one of those methods has entirely different logic altogether. All of these types of variability need to be incorporated and keeping track of them is a huge pain. So a big shout out to EVM diff and the L2 documentation that was very instrumental when we were implementing the OP stack for EDR. So we have an idea now of what variability exists, but what problems might occur? Here are some examples. So when we start off at the execution layer, when we're dealing with unknown transaction types from an L1 perspective, we're not sure how to actually execute them. It could be that they throw an error or it could be that they do execute but because the opcodes have different behavior, the result is different and as such the L2 execution will be incorrect within your L1 tooling. When we're then trying to mine a full block, we also run into issues. The RLP encoding for these unknown transactions is unknown, which would result in an incorrect tri-route. And it could be that the header has different header fields, which would also result in an incorrect block hash. Then when we look at the gas calculation these end up being incorrect as well. Let's have a look at the way that L2 transaction costs are structured. So on the left side you'll see something that's very familiar. We have our execution gas cost consisting of a gas price multiplied by gas used. This is the same as on L1 except that L2 gas prices will be lower. But we have something new which is the L1 data fee. This is the cost we have to pay for the roll-up or the part that our transaction is within that roll-up. This is the L1 gas price multiplied by the L1 gas used multiplied by the number of bytes of transaction data. Usually, this is compressed to reduce the amount of memory usage. But we somehow need to convey this cost to the user. L2s do this differently. It could be that they try and convert this factor of gas price multiplied by gas used to an L2 gas usage. Or it could be that they somehow try to change this to the L2 gas price. Each L2 will have their own strategy for doing this. Then when we're looking at debugging, for example, using debug trace transaction, what we're trying to do is we replay some block on the chain until we reach the transaction that we want to debug. And it could be that up until that point, we find some unknown transaction, and we could treat them as an EIP-155 transaction or a legacy transaction, and try to execute based on the best effort. But this might result in errors. So instead, we could choose to skip it. But this might have a negative side effect that if that transaction affected the state that our contract is also accessing, that we are getting a different result than we would on a test or mainnet. All of these examples that I gave have something in common. We're trying to build L2 smart contracts using L1 tools and hoping that it just works. It could be that the tests are passing but there are still subtle execution differences that give us a false sense of security. And this leaves room for security vulnerability once we deploy. So how does EDR circumvent this and accurately simulate L2s? So here we have an overview of the different building blocks that I mentioned outlined in black. In orange we have entry points from and into Hardhat and in purple we have our EVM which is the EVM dependency that we use. Everything outlined in green is parts that we previously had supported for L1 Ethereum but now we need to convert to be able to be multi-chain and also support different L2s. I've numbered it in two sections as they both have different requirements and we'll delve into those respectively now. So the part outlined in one was all Rust code. So we need to look at expansibility from a Rust perspective. For this we had several requirements. We wanted compile-time polymorphism. This would allow users of our crates or packages, if you will, to be able to use these interfaces or traits during compile time to generalize their types and functions. We also wanted to generate type errors at compile time. This would force L2 developers to resolve any issues with their typing, as opposed to the error bubbling up to hardhat users at runtime. Finally, we also wanted to ensure that their type definitions were reusable from a base chain to a L2 chain. For example, if we have an EIP2930 transaction, this is used in OP stack as well. So being able to reuse those types lightens the burden for L2 developers. The solution we used are the Rust traits and generics. Traits are a form of interface that can be used both at runtime and at compile time to constrain generics. And generics are just a way to generalize function definitions and type definitions across a type. For each of these traits or interfaces, we associated types with them that we consider to be a chain specification. Think of a transaction type, a block type, etc. And there are some constants which are used within the protocol. We distribute individual change using Rust crates for reusability. So if we look at the overview again here, and we start at the top right, we have a remote network clients which does RPC calls to a provider like Infior or Alchemy. Here we introduced an RPC spectrate that would define the RPC transaction or an RPC receipt, etc. Then when we go to REVM, here we introduced something called EVM wiring. We proposed changes to REVM and with the graceful help of Dragan Rakita, the maintainer, we were able to incorporate these large changes into REVM, which means means now within our EVM you can also run and extend different chain types. Here you would define a runnable transaction, a block within which it's executed, the hard fork, Hall's reasons, etc. Then we go up one level to something we call the executor. The executor is a wrapper around the EVM, which receives a signed transaction, passes it in, and while it's executing, we gather additional data, which we use for runtime observability. So things like traces for a stack trace, etc., that we expose to the end user. Here we introduce a type runtime spec and then when we're incorporating all of these transactions into a block within our block builder we need to consider parameterizations for the protocol level such as the base fee calculation. This has specific constants that need to be incorporated which differ between L1 and L2 And here we introduced a ETH header constants interface to define those. Then all of this logic is tied together within the node or provider. And here we introduced a provider spec with things such as a pool transaction. For example, for blob transactions, we also keep track of additional blob data, etc. And these are stored within the mempool. Then finally, we reach the RPC interface that's exposed to HardHat. Here what we do is we use a tool called NAPI, which can be used for generating TypeScript bindings from Rust code. And this allows us to basically from HardHat, which is TypeScript,ings from Rust code, and this allows us to basically from hardhat, which is TypeScript, call our Rust functions. And here we introduced a sync and API spec. Sync basically means we're trying to call our Rust code from a threaded environment, and this ensures that the access to that data is correct. And we also do a conversion from compile types to runtime polymorphic types, which we'll have a look at next. All in all, this encompasses six different traits or interfaces, and these are all the things that an L2 developer would have to implement in order for all of the building blocks that we have within EDR to be supported for their chain, and in addition for their chain to be usable within Hard Hat 3 in the future. So when we look at the part that was outlined on the left, we're dealing with TypeScript, so the requirements for extensibility are slightly different. We're dealing with runtime polymorphism. We're trying to minimize memory usage and load times. The goal here is that we shouldn't have to load all the possible L2 chains, only the ones that the user of hard hat in that particular configuration wants to use. And we want to avoid centralization of chain types. We don't want a repository where you have to add your specific chain type to an enum. Instead, we want you to just be able to plug and play your own chain types independently. For this, we created an NAPI wrapper around dynamic trade objects. Dynamic trade objects are basically virtual objects where you can access a specific instance of an object through an interface. And to distinguish chain types, we use a string identifier. And then finally, for distribution, we're using NPM packages. Here we have an overview of what that looks like on the TypeScript side. We have a, if we go from right to left, we have a provider interface that receives a generic JSON RPC request and returns a generic JSON RPC response. We have two different implementations, one for L1, one for Optimism. These would basically parse the input, make it a typed request for their respective backends, handle it, and then again convert the response to a generic string, which is returned to the user. In order to be able to construct these, we're just using a factory pattern. So if we move one column to the left, we have a provider factory, which would receive a generic configuration, again using strings, and we have two implementations that would parse those configurations and make them typed and construct a respective provider depending on L1 or Optimism. If we then move one column further to the left, we have something called a context. This is maintained from the start until the shutdown of your application and contains a registry of provider factories, which is a mapping of the string identifier to the instance of the provider factory. If we look at a usage example, you start your application and would register any of the requested providers based on a configuration and you store them in the registry. Then when a hardhat user would actually request to create a provider, let's say Optimism, we do a lookup in the registry, find the Optimism provider factory, pass on the config, and ask it to create an instance of the Optimism provider, which is returned to the user. This is the way that we have designed extensibility. For the hardhat beta beta we'll be releasing with the OP stack and l1 support in the future we'll also be releasing all of these api's and it would allow you guys to extend it for your own l2 now let's have a look at what this actually looks like in hardhat 3 that got a little bit complicated on a technical level but but from a user side it's actually quite straightforward to use. So here we see an example of a hardhat3 user config. At line 7 we have two networks defined, EDR-L1-Sypolia and EDR-Op-Sypolia. They both have the EDR type for the back-end simulation. The first one has a chain type for L1, whereas the second one has a chain type for Optimism. This is communicating to EDR which typing system to use. And when we switch to a hardhat script, we see on line 3, we're requesting the hardhat network manager to connect to the previously configured Opsipolia and we specified that for its types it needs to use Optimism. This is informing the TypeScript type system that the estimate L1 gas function needs to be available which would only exist for Optimism and not for L1 and we make a call to it through the public client and then we send an L2 transaction to L1, and we make a call to it through the public client, and then we send an L2 transaction to transfer one way, and we wait for the transaction received to be included in a block. So when we execute this, we're estimating L1 gas, get 1600, and then we see that our transaction that was sent is included in a block. If we then go back and we switch the network we are connecting to to EDR L1 Sopolia and use L1 typing, we see that we're getting a type error for estimate L1 gas as this is not available for L1 networks and the Veeam object knows this. So this is a sneak peek at some of the features that will be available with multi-ain and the kind of simple user experience that you get as a HardHat user. If you want to learn more, we've had several other talks already at DEF CON. We had a talk about the future vision of Nomic Foundation and the products that we're developing. We also had a talk which was a preview for Hardhat 3. And tomorrow we'll have a talk by the Slang team. Slang is a compiler as an API for developer tools. And this is basically useful for writing your own linters, doing formatters, etc. So if you're curious about that, go have a look at them tomorrow. Thank you. Alright, so we do have some extra time for QA. Feel free to scan the QR code if you have more questions for Walden. In the meantime, let's take this one. ARP has pre-deployed contracts that do not exist as EVM code on chain. Instead, instead is Go code embedded into Get. Any efforts to handle this? So the way that we could theoretically handle that is instead of having it as Go code, we could include this in the genesis state that is deployed when you're using an arbitrum L2 chain. Alright, so let's when you're using an Arbitrum L2 chain. All right. So let's wait for a few more seconds. We have another one here. Wenhar had that three. I think we don't have an announcement date yet, so stay tuned. We have an internal alpha going at the moment, but the beta will be sometime at the start of next year, but there's no definitive date yet. How does EDR compare to Foundry's Anvil? The hardhat simulation or the simulation component of it and the RPC that's exposed is comparable, like identical in that sense to Anvil. The performance is also very comparable. We have a slightly different way of maintaining our state, and as a result of that, if you're very often switching between different states, so going and doing EVE calls and jumping back and forth between a previous state, making some modifications and jumping back. In those use cases, we're often faster. But from a use case perspective, they're the same. The only difference that we might have is we haven't released our crates yet, which I think Foundry or Anvil has. But in the future, we'll be releasing a first version of EDR as well as crates on. Let's remain on the stage for a little while to see if we have more questions. SPEAKER 1 COOKIE MONICA DUNCAN- All right, if you have any questions, you can also meet me afterwards. And our CTO is also here who can answer any questions about Hardhat3. Alright, that would be great. Thank you guys so much and let's give a round of applause to Ruwanan.", + "sources_streamethId": "673863e020d1f9ac48bd215e", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731582000000, - "slot_end": 1731583800000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/19L7dj6AAC2bhxtksWRYlrJuOv3Xc6aF5iQmk5DGFVbA", - "resources_slides": null, + "slot_start": 1731646800000, + "slot_end": 1731652200000, + "slot_roomId": "classroom-b", + "resources_presentation": "https://docs.google.com/presentation/d/1cJm-toCXN2UU4rbGe04A5r8Ki0Mu2kurnbC7eBJWsbQ", + "resources_slides": "", "speakers": [ - "wodann" + "dominik-teiml" ] }, "vector": [ - 0, - 0, - 0, 6, 0, 0, @@ -397786,20 +396678,10 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, + 6, 0, 0, 0, @@ -398157,9 +397039,11 @@ 0, 0, 0, + 6, 0, 0, 0, + 6, 0, 0, 0, @@ -398168,7 +397052,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -398213,28 +397096,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -398330,6 +397191,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -398405,7 +397267,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -398544,6 +397405,22 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -398710,10 +397587,23 @@ 0, 0, 0, - 2, 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, 2, 0, 0, @@ -398732,57 +397622,52 @@ }, { "session": { - "id": "how-i-audit", - "sourceId": "3NRXP9", - "title": "How I Audit", - "description": "Dom, a former security researcher at Trail of Bits, is going to give a peek of what it's like to be an auditor in 2024. Some of the techniques and tools discussed:\r\n\r\n* How to prepare for an audit?\r\n* How to hand over the resources?\r\n* What is the first thing auditors do?\r\n* How to communicate with auditors?\r\n* How I use the following tools, and their evaluation:\r\n * Codebase visualization with Wake and Neovim\r\n * Static analysis tools\r\n * Fuzzing (and debugging)\r\n * Manual review", - "track": "Security", - "type": "Workshop", - "expertise": "Expert", + "id": "how-long-non-finality-could-kill-ethereum", + "sourceId": "U9E7PD", + "title": "How long non-finality could kill Ethereum", + "description": "After the merge, Ethereum has a finality gadget to provide an economic assurance that transactions will never be reverted. When 2/3 of the validator set are online and agree, we finalize. Otherwise, we enter a period of non-finality which can be very long, up to a few weeks. Long non-finality has never happened in Ethereum's history and could trigger a cascade of failures that will kill liveness. How can we harden the network against this? How high are the stakes?", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Tooling", - "Security", - "Auditing", - "analysis", - "static", - "Auditing", + "Consensus", + "Decentralization", "Security", - "Tooling" + "Consensus", + "Decentralization", + "Security" ], "keywords": [ - "Solidity", - "Frameworks", - "Program analysis", - "Static Analysis" + "-" ], - "duration": 5302, + "duration": 961, "language": "en", - "sources_swarmHash": "4427212cb7b20f8a44407a4b5401b5dbe9d985867ab725d409048614bac75dfd", - "sources_youtubeId": "8uC1QvsYeu0", + "sources_swarmHash": "9e0bf1af55bb735c4733ddce1734ff6b26ea4a77f944d0d92806e80333fb04b7", + "sources_youtubeId": "z2jafwPFLaQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673863e020d1f9ac48bd215e", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6736c6b89dbb7a90e1cd35b3", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736c6b89dbb7a90e1cd35b3.vtt", + "transcript_text": " Hello, hello, thank you. Let me drop the water. So hello everyone, I'm Dap Lyon, Ethereum Cordep at Consensus at Sigma Prime. And today I'm going to talk about how long a fanatic could kill Ethereum. Or some healthy dose of fear mongering to get all the core devs here to care a little bit more about this issue. So just to start, let's get everyone on the same page. This talk is not labeled as an expert, so we'll start with what the hell is finality. So Ethereum has a strong crypto-economical guarantee where if a block is finalized, it will not get reverted unless two-thirds of the stake, so an insane amount of money gets destroyed. For the purpose of today, what you have to know is when we finalize something, everything that descends from it is a possible block or state. Everything else we don't care about. So when the network fails to reach finality, in other words, two-thirds of the chain agree on something, then this section of blocks and states just keeps growing. Because the finalised checkpoint doesn't get updated and it just keeps going in the past. And in the past. So today we're going to run through an experiment. I'm going to show a hypothetical failure mode of the Bitcoin chain and we'll talk on all these possible concepts and how they tie together into something that could actually kill Ethereum. So let's start with some finalized checkpoint and here are some descendant block A. But this is a longer chain. I'm just representing here shortly for succinctness. Next, oops, some client has a bug. With more than one-third of the state, say like a consensus issue where they reject the block that they shouldn't. And this causes a chain split. So the faulty client cannot produce on top of A, so it just goes on its own fork and mines B prime, X prime, so on and so on. Everyone else stays on the majority fork, so B and X, and with these three dots, I'm representing a long chain. If we're talking about one day of non-finality, this could be 7,200 blocks, so quite a lot. The first thing that happens is most clients spend not significantly, but a decent amount more of this space during non-finality as the amount of blocks and states that they have to keep for fast access is higher. Then, now it's when things get interesting. Let's assume that one of the faulty clients, for some reason, be another bug or some network problem, they lose out of sync or they crash. And they think that B prime is the head. They will act on it and produce this block, X prime prime, which forks from B prime. This block is very expensive to process. And we'll show now why. And everyone else must process. Everyone in the fork X prime and everyone in the fork X. So these blocks are expensive because in Ethereum, states are not only dependent on the parent but also on time. When you want to process block X prime, you have to load the state at B prime and perform something that's called a state advance. You have to run this code, which is relatively expensive and increases linearly with state size, a lot of times. So in this case, if it's one day of non-finality, you have to do this 225 times plus hashing. So I don't know the numbers, but we are talking a few minutes, potentially. And we can have many of these blocks. And these blocks are very expensive, and what they can cause is that now some nodes get overwhelmed. They trigger resource exhaustion. So let's consider now one of the non-faulty nodes at x. They have to ingest these expensive blocks, and now they also fall out of sync. So they start to produce these annoying blocks that, again, are expensive to process. And here you can start to see how things spiral out of control. Now that we have a decent amount of forking, disk space really shoots up. Because, again, we are extending the space of possible blocks and states not only in the dimension of time, but also in the dimension of forking. And now clients can start, again, depending on the client and how they run, to run out of space. If a client has no space, it stalls. It cannot progress. It needs manual intervention. So because of that, let's consider one of these forks. Now it doesn't have enough participation, because clients are losing disk and going out of sync. Then you have reorgs, which are expensive. And another interesting failure mode is that some ELs cannot handle reorgs past a certain death. If that happens, they stall because they don't have enough state to process the chain. So more clients going offline. Yada, yada, yada, yada. Also, other things to consider, clients could crash with OEM depending on how their state cache on memory works. And again, triggering sync issues. So in these accidental, again, accidental failure modes, any sort of bug can spiral out of control as it causes resource obstruction, which causes faulty blocks, which causes resource obstruction, and so on. And this behavior is quadratic with time to finality. Like the longer finality goes on, the more expensive these operations can become. But that can be triggered by an attacker. All of this so far has been accidental. And accidental failure mode is problematic because it has a lot of stake. But if an attacker wants to exploit this and has significantly less stake, potentially like some hundred keys, it can truly wreak havoc. So the problem, as I was saying, is these very expensive blocks that do a lot of skips. And you can create a lot of objects that exploit this. So for blocks, you need stake, you need to have a valid proposal signature, but you can brute force valid proposal slots. If you have maybe like 100, so like the crusader, the low-carb crusader could easily attack the chain in the finality of like one day and create about some hundreds of blocks, which again, consider each one takes one or two minutes to process, you can do the network like that. Aggregated stations also require stake. You have to brute force the aggreter duty. And unaggregated stations, you don't require stake as you can just produce an invalid at the station and it will still force nodes to compute the shuffling. The good news is that we are fixing this last attack vector on Electra and you will still force nodes to compute the shuffling. The good news is that we are fixing this last attack vector on Electra, and it will also require stake. So look at some history. We have had some issues with finality in the past. The most relevant one happened in 2023, where we lost finality for about four epochs. What happened is something very close to this failure case where some nodes considered to be synced, and they propose a very expensive, not as expensive as the ones we're talking about, but some attestations that take some time to process. Unfortunately, Prism had some caching issue where it did this work over and over. And it didn't have a well-structured queueing system to protect themselves against clogging. So any relevant work to progress the chain, like blocks and attestations, just didn't happen and that resulted in the loss of finality. Also Medalla is the quintessential event of nonfinality. I'm not going to talk in detail here, but we can talk about it later. Also GoEarly suffered a very brutal death with a bunch of cascading effects. And funny enough, the peer-dashed devnet that we ran recently had a massive reorg of 130,000 blocks that triggered this edge case where none of the ELs could continue because they stalled without state. So yeah. What's worrying about failure mode regarding non-finalities is that it's very spirally. Things cause things and it gets pretty out of hand. So far we have talked about accidental issues that relate to client bugs, network partitions, these sort of things. But the worst case that we should consider is what's the possible longest non-finality that we could see on the beacon chain? And that's related to this failure case where say that we have supermajority geth, they have a consensus issue where they mint infinite eth. So it's not a fork that we can canalize. In that case, if they finalize the wrong fork, they will be stuck there. So on the correct fork, in this case chain B, we will have to wait for them to leak out. That's going to take a long time. in this case, chain B, we will have to wait for them to leak out. That's going to take a long time. So if Geth is like 70%, we are looking at 32 days. So that's in my opinion, that's the worst case that we should aim for. And this is realistically something that could happen if for some reason Geth gets a significant amount of market share. So what can we do? What can we do? Easy, just don't do bugs. Don't release clients with bugs, then we don't have non-fidelity and easy, not a problem. Now for real, there are no strong mitigations, it's just we need to harden the client. So don't run out of this space, don't run out of memory, don't get exhausted. And third, we're going to look and explore if we can actually reject some of these useless network objects to protect against the most blatant cases of those. All of these solutions, they work on this. It's not actually a trilemma, but it's a trade-off space. In Ethereum, we work really, really hard to preserve liveness. That's why we have this hybrid consensus mechanism where the chain goes into this mode justfidelity mode just because we want to preserve liveness. We don't want to be in a situation where no operator intervention is mandatory to recover the chain. At the same time, we don't want to die and we want to process everything in a timely fashion. So the first point and and the most dangerous, is running out of disk space. Here, there are a bunch of easy rules that I think most clients follow. I think Lighthouse is one of the ones that don't follow all of them, but we are working on that. And in Lighthouse, we are now introducing this very interesting optimization, which is storing everything as divs. That's commonly known in consensus folklore producing this very interesting optimisation, which is storing everything as divs. That's commonly known in consensus folklore as three states, and that's going to be rolled out in the next version, but it only affects the freezer database. What we want to do next is apply the same principles, but for unfanelised states. And that's going to be massive, because now every time we want to store a state, instead of spending 200 mechs, we can just spend half. And we have really good compute and apply times for a bunch of different divs. How this is going to look like, we'll have this hierarchical div structure where if you want to recuperate, say, the state in red, you load snapshot and then iteratively load the div, apply, load, apply. This is very, very efficient in terms of disk space, and as you saw the numbers here, it's actually pretty fast and even faster than replay. The complication with the unfinalized section of the chain is that we have moving finality, which complicates a bit the design. But if we just extend the diffs into some finalized range, keeping at least one per layer, the design works. And pruning is not that complicated. For memory, there is not much you can do. You just have to make sure that your state cache doesn't blow up in these circumstances. And again, three states, memory helps a lot. And this is already in production in Lighthouse. And the last one, and I think this is the most important, you need to have some strong queuing system where you don't let yourself get exhausted from this garbage that can come from the network. Looking at the main net incident that we talked about, if instead of having this queue, you split it into sections, one that has higher priority for things that are useful to you, in this case, let's say, the sending of heads, and then everything else gets processed with lower priority, we should be safe in this case. Lighthouse at the moment is working on revamping all our queuing system, and we're going to do something with this, also with fairness, so P0 doesn't exhaust P1. P0 doesn't exhaust P1. And the last point is, okay, we know that these blocks are really bad. Can we just ignore it? That would be really nice. And unfortunately, no. If we want to preserve liveness, there are a lot of edge cases where if we start to ignore blocks, then the chain could stall. So I think this is the only one I've seen that I like, and it's on Prism in production. This is by POTUS. If you get things that extend something previous to justified, it's more complex than this, but just to simplify, you can ignore them. And this would have protected Prism from the issue that we saw before. Unfortunately, this is just one of the many failure cases that the chain can have. So it's not like a bulletproof. So what are the next steps? Well, testing, testing, testing, and testing. The issue with non-finality is that we have to do Pektra, we have to do Pirdas, we have to do so many things that unfortunately dealing with non-finality is that we have to do Pektra, we have to do so many things that unfortunately dealing with non-finality just goes like priority number three or four. I talk with the Ethereum DevOps team, the PandaOps, and what we should do is have some cyclic tests, either continuously or maybe like quarterly, where we test non-finality. We want to uncover these bugs ahead of time and not when finality dies either in an important DevNet or main net. Also in Sigma Prime, we have a tool that we have been using with great success in this source of attack nets, and it's insanely good at killing networks. So we are with Michael in the process of revamping this tool, and we'll definitely use it in these tests. So yeah. But, yeah, I mean, just to not be too fear-mongering here, the beacon chain has been exceptionally stable through its lifetime. And the only time it lost finality was for epochs, which is nothing. We have a robust set of operators who are very diligent, hands on, the same for core devs. We have triaged and fixed issues really quickly. So again, we have to work on this. It's my mission as a core dev to make sure that Ethereum never loses liveness. But yeah, we've been doing great so far and we should be proud of it.", "eventId": "devcon-7", - "slot_start": 1731646800000, - "slot_end": 1731652200000, - "slot_roomId": "classroom-b", - "resources_presentation": "https://docs.google.com/presentation/d/1cJm-toCXN2UU4rbGe04A5r8Ki0Mu2kurnbC7eBJWsbQ", - "resources_slides": null, + "slot_start": 1731568200000, + "slot_end": 1731570000000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1ALLMSUfx7xTKyChAX-LGEzcu_42YB7z9HKrLLPQ0-cc", + "resources_slides": "https://drive.google.com/file/d/1-JNjLzVUFDVVLsX76XF50vjI2FzLjUbC/view", "speakers": [ - "dominik-teiml" + "dapplion" ] }, "vector": [ - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -399526,15 +398411,12 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -399550,7 +398432,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -399632,6 +398513,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -399681,7 +398563,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -399896,7 +398777,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -400092,11 +398972,11 @@ 0, 0, 0, + 2, 0, 0, 0, 2, - 2, 0, 0, 0, @@ -400114,44 +398994,48 @@ }, { "session": { - "id": "how-long-non-finality-could-kill-ethereum", - "sourceId": "U9E7PD", - "title": "How long non-finality could kill Ethereum", - "description": "After the merge, Ethereum has a finality gadget to provide an economic assurance that transactions will never be reverted. When 2/3 of the validator set are online and agree, we finalize. Otherwise, we enter a period of non-finality which can be very long, up to a few weeks. Long non-finality has never happened in Ethereum's history and could trigger a cascade of failures that will kill liveness. How can we harden the network against this? How high are the stakes?", + "id": "how-model-checking-can-help-build-trust-in-the-design-of-distributed-protocols-like-single-slot-finality", + "sourceId": "89M7ME", + "title": "How model checking can help build trust in the design of distributed protocols like Single Slot Finality", + "description": "Ethereum is a lively place for developing distributed protocols. Getting a distributed protocol right is a notoriously difficult task. When it comes to developing the Ethereum CL, the community follows two pragmatic approaches: Writing pen & paper proofs and writing executable specs in Python. We show how model checking can confirm our intuition about the behavior of consensus protocols or disprove it. We do so by applying our method to one of the recently proposed Single Slot Finality protocols", "track": "Core Protocol", - "type": "Talk", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ "Consensus", - "Decentralization", - "Security", + "Protocol Design", + "Formal Verification", + "apalache", "Consensus", - "Decentralization", - "Security" + "Formal Verification", + "Protocol Design" ], "keywords": [ - "-" + "model checking", + "TLA+", + "Apalache" ], - "duration": 961, + "duration": 379, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "1bddf56010a39f73da7d59c09b393729d459d8abbbfad177e9a41ce45d6fc3fd", + "sources_youtubeId": "9IqwdXnVnsE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736c6b89dbb7a90e1cd35b3", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736c6b89dbb7a90e1cd35b3.vtt", - "transcript_text": " Hello, hello, thank you. Let me drop the water. So hello everyone, I'm Dap Lyon, Ethereum Cordep at Consensus at Sigma Prime. And today I'm going to talk about how long a fanatic could kill Ethereum. Or some healthy dose of fear mongering to get all the core devs here to care a little bit more about this issue. So just to start, let's get everyone on the same page. This talk is not labeled as an expert, so we'll start with what the hell is finality. So Ethereum has a strong crypto-economical guarantee where if a block is finalized, it will not get reverted unless two-thirds of the stake, so an insane amount of money gets destroyed. For the purpose of today, what you have to know is when we finalize something, everything that descends from it is a possible block or state. Everything else we don't care about. So when the network fails to reach finality, in other words, two-thirds of the chain agree on something, then this section of blocks and states just keeps growing. Because the finalised checkpoint doesn't get updated and it just keeps going in the past. And in the past. So today we're going to run through an experiment. I'm going to show a hypothetical failure mode of the Bitcoin chain and we'll talk on all these possible concepts and how they tie together into something that could actually kill Ethereum. So let's start with some finalized checkpoint and here are some descendant block A. But this is a longer chain. I'm just representing here shortly for succinctness. Next, oops, some client has a bug. With more than one-third of the state, say like a consensus issue where they reject the block that they shouldn't. And this causes a chain split. So the faulty client cannot produce on top of A, so it just goes on its own fork and mines B prime, X prime, so on and so on. Everyone else stays on the majority fork, so B and X, and with these three dots, I'm representing a long chain. If we're talking about one day of non-finality, this could be 7,200 blocks, so quite a lot. The first thing that happens is most clients spend not significantly, but a decent amount more of this space during non-finality as the amount of blocks and states that they have to keep for fast access is higher. Then, now it's when things get interesting. Let's assume that one of the faulty clients, for some reason, be another bug or some network problem, they lose out of sync or they crash. And they think that B prime is the head. They will act on it and produce this block, X prime prime, which forks from B prime. This block is very expensive to process. And we'll show now why. And everyone else must process. Everyone in the fork X prime and everyone in the fork X. So these blocks are expensive because in Ethereum, states are not only dependent on the parent but also on time. When you want to process block X prime, you have to load the state at B prime and perform something that's called a state advance. You have to run this code, which is relatively expensive and increases linearly with state size, a lot of times. So in this case, if it's one day of non-finality, you have to do this 225 times plus hashing. So I don't know the numbers, but we are talking a few minutes, potentially. And we can have many of these blocks. And these blocks are very expensive, and what they can cause is that now some nodes get overwhelmed. They trigger resource exhaustion. So let's consider now one of the non-faulty nodes at x. They have to ingest these expensive blocks, and now they also fall out of sync. So they start to produce these annoying blocks that, again, are expensive to process. And here you can start to see how things spiral out of control. Now that we have a decent amount of forking, disk space really shoots up. Because, again, we are extending the space of possible blocks and states not only in the dimension of time, but also in the dimension of forking. And now clients can start, again, depending on the client and how they run, to run out of space. If a client has no space, it stalls. It cannot progress. It needs manual intervention. So because of that, let's consider one of these forks. Now it doesn't have enough participation, because clients are losing disk and going out of sync. Then you have reorgs, which are expensive. And another interesting failure mode is that some ELs cannot handle reorgs past a certain death. If that happens, they stall because they don't have enough state to process the chain. So more clients going offline. Yada, yada, yada, yada. Also, other things to consider, clients could crash with OEM depending on how their state cache on memory works. And again, triggering sync issues. So in these accidental, again, accidental failure modes, any sort of bug can spiral out of control as it causes resource obstruction, which causes faulty blocks, which causes resource obstruction, and so on. And this behavior is quadratic with time to finality. Like the longer finality goes on, the more expensive these operations can become. But that can be triggered by an attacker. All of this so far has been accidental. And accidental failure mode is problematic because it has a lot of stake. But if an attacker wants to exploit this and has significantly less stake, potentially like some hundred keys, it can truly wreak havoc. So the problem, as I was saying, is these very expensive blocks that do a lot of skips. And you can create a lot of objects that exploit this. So for blocks, you need stake, you need to have a valid proposal signature, but you can brute force valid proposal slots. If you have maybe like 100, so like the crusader, the low-carb crusader could easily attack the chain in the finality of like one day and create about some hundreds of blocks, which again, consider each one takes one or two minutes to process, you can do the network like that. Aggregated stations also require stake. You have to brute force the aggreter duty. And unaggregated stations, you don't require stake as you can just produce an invalid at the station and it will still force nodes to compute the shuffling. The good news is that we are fixing this last attack vector on Electra and you will still force nodes to compute the shuffling. The good news is that we are fixing this last attack vector on Electra, and it will also require stake. So look at some history. We have had some issues with finality in the past. The most relevant one happened in 2023, where we lost finality for about four epochs. What happened is something very close to this failure case where some nodes considered to be synced, and they propose a very expensive, not as expensive as the ones we're talking about, but some attestations that take some time to process. Unfortunately, Prism had some caching issue where it did this work over and over. And it didn't have a well-structured queueing system to protect themselves against clogging. So any relevant work to progress the chain, like blocks and attestations, just didn't happen and that resulted in the loss of finality. Also Medalla is the quintessential event of nonfinality. I'm not going to talk in detail here, but we can talk about it later. Also GoEarly suffered a very brutal death with a bunch of cascading effects. And funny enough, the peer-dashed devnet that we ran recently had a massive reorg of 130,000 blocks that triggered this edge case where none of the ELs could continue because they stalled without state. So yeah. What's worrying about failure mode regarding non-finalities is that it's very spirally. Things cause things and it gets pretty out of hand. So far we have talked about accidental issues that relate to client bugs, network partitions, these sort of things. But the worst case that we should consider is what's the possible longest non-finality that we could see on the beacon chain? And that's related to this failure case where say that we have supermajority geth, they have a consensus issue where they mint infinite eth. So it's not a fork that we can canalize. In that case, if they finalize the wrong fork, they will be stuck there. So on the correct fork, in this case chain B, we will have to wait for them to leak out. That's going to take a long time. in this case, chain B, we will have to wait for them to leak out. That's going to take a long time. So if Geth is like 70%, we are looking at 32 days. So that's in my opinion, that's the worst case that we should aim for. And this is realistically something that could happen if for some reason Geth gets a significant amount of market share. So what can we do? What can we do? Easy, just don't do bugs. Don't release clients with bugs, then we don't have non-fidelity and easy, not a problem. Now for real, there are no strong mitigations, it's just we need to harden the client. So don't run out of this space, don't run out of memory, don't get exhausted. And third, we're going to look and explore if we can actually reject some of these useless network objects to protect against the most blatant cases of those. All of these solutions, they work on this. It's not actually a trilemma, but it's a trade-off space. In Ethereum, we work really, really hard to preserve liveness. That's why we have this hybrid consensus mechanism where the chain goes into this mode justfidelity mode just because we want to preserve liveness. We don't want to be in a situation where no operator intervention is mandatory to recover the chain. At the same time, we don't want to die and we want to process everything in a timely fashion. So the first point and and the most dangerous, is running out of disk space. Here, there are a bunch of easy rules that I think most clients follow. I think Lighthouse is one of the ones that don't follow all of them, but we are working on that. And in Lighthouse, we are now introducing this very interesting optimization, which is storing everything as divs. That's commonly known in consensus folklore producing this very interesting optimisation, which is storing everything as divs. That's commonly known in consensus folklore as three states, and that's going to be rolled out in the next version, but it only affects the freezer database. What we want to do next is apply the same principles, but for unfanelised states. And that's going to be massive, because now every time we want to store a state, instead of spending 200 mechs, we can just spend half. And we have really good compute and apply times for a bunch of different divs. How this is going to look like, we'll have this hierarchical div structure where if you want to recuperate, say, the state in red, you load snapshot and then iteratively load the div, apply, load, apply. This is very, very efficient in terms of disk space, and as you saw the numbers here, it's actually pretty fast and even faster than replay. The complication with the unfinalized section of the chain is that we have moving finality, which complicates a bit the design. But if we just extend the diffs into some finalized range, keeping at least one per layer, the design works. And pruning is not that complicated. For memory, there is not much you can do. You just have to make sure that your state cache doesn't blow up in these circumstances. And again, three states, memory helps a lot. And this is already in production in Lighthouse. And the last one, and I think this is the most important, you need to have some strong queuing system where you don't let yourself get exhausted from this garbage that can come from the network. Looking at the main net incident that we talked about, if instead of having this queue, you split it into sections, one that has higher priority for things that are useful to you, in this case, let's say, the sending of heads, and then everything else gets processed with lower priority, we should be safe in this case. Lighthouse at the moment is working on revamping all our queuing system, and we're going to do something with this, also with fairness, so P0 doesn't exhaust P1. P0 doesn't exhaust P1. And the last point is, okay, we know that these blocks are really bad. Can we just ignore it? That would be really nice. And unfortunately, no. If we want to preserve liveness, there are a lot of edge cases where if we start to ignore blocks, then the chain could stall. So I think this is the only one I've seen that I like, and it's on Prism in production. This is by POTUS. If you get things that extend something previous to justified, it's more complex than this, but just to simplify, you can ignore them. And this would have protected Prism from the issue that we saw before. Unfortunately, this is just one of the many failure cases that the chain can have. So it's not like a bulletproof. So what are the next steps? Well, testing, testing, testing, and testing. The issue with non-finality is that we have to do Pektra, we have to do Pirdas, we have to do so many things that unfortunately dealing with non-finality is that we have to do Pektra, we have to do so many things that unfortunately dealing with non-finality just goes like priority number three or four. I talk with the Ethereum DevOps team, the PandaOps, and what we should do is have some cyclic tests, either continuously or maybe like quarterly, where we test non-finality. We want to uncover these bugs ahead of time and not when finality dies either in an important DevNet or main net. Also in Sigma Prime, we have a tool that we have been using with great success in this source of attack nets, and it's insanely good at killing networks. So we are with Michael in the process of revamping this tool, and we'll definitely use it in these tests. So yeah. But, yeah, I mean, just to not be too fear-mongering here, the beacon chain has been exceptionally stable through its lifetime. And the only time it lost finality was for epochs, which is nothing. We have a robust set of operators who are very diligent, hands on, the same for core devs. We have triaged and fixed issues really quickly. So again, we have to work on this. It's my mission as a core dev to make sure that Ethereum never loses liveness. But yeah, we've been doing great so far and we should be proud of it.", + "sources_streamethId": "6736d63474749a4b892f230d", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d63474749a4b892f230d.vtt", + "transcript_text": " Hello, thank you. So my name is Igor Konov. Here are the slides. Happy to be here. Actually, this is work done by myself, Tan Haidran, who is also here in the room, Jure Kukovic, Thomas Pani, who is also here, and Roberto Saltini. And we are happy to have done this work supported by Ethereum Foundation. So if you look at consensus, Ethereum consensus, you probably see a lot of algorithms starting as Casper, Gasper. Now we have single-slot finality. And recently on the block, we have three slot finality. You can check the recent report by Francesco, Roberto, Tanhai, and Luca. And if you look into these things, you'll see that there are a lot of definitions. I cannot explain all of you, all these definitions to you. You'll see that there are chain blocks slots checkpoints justified checkpoints finalized checkpoints votes by validators ffg votes that connect these checkpoints and so on so forth so these are not simple algorithms so in our work we don't uh we don't have uh basically time budget to to do everything so we focus on accountable safety which means roughly speaking that if you have a fork in consensus you should be able to identify at least one-third of the validators that are basically that produce this fork and they should be slashed. So we focus on accountable safety here. How would we be able to check that these protocols, namely three-slot finality, satisfy accountable safety? Well, in science fiction solution, when it's all good, we will take the code in Python, we will take the executable spec in Python, produce some examples maybe to convince ourselves that it kind of works, but also we would also produce an automatic proof of accountable safety. Unfortunately that's a bit of science fiction nowadays, there are no over-the-shelf solutions that would take executable Python spec and reason about such complex algorithms as consensus. So what we have been doing in this project, we actually were writing specifications in temporal logic of actions. That's a language invented by Les Lampert some time ago for reason about concurrent and distributed systems. And we did produce specifications by hand because there are no tools that would be able to do that, although we have been thinking how we could automate that. So basically the first specification we wrote was just too complex for the model checkers. We gradually produced abstractions using this specification and essentially produced like four levels of abstractions here so the model checker could handle the complexity of the algorithm at the end. We used the model checker Apalache, which is offloading the verification task to the SMT solver D3. And in addition to that, as things were a bit slow, we also wrote a specification in Alloy, which is also a well-known model checker that is backed by a sat solver and in addition to that we wrote smt constraints in cvc5 using the theory of finite sets and cardinalities so we kind of did a lot of experiments here to check accountable safety under different uh using different tools so as as I told you, model checking could help you. How can it help you? The first thing it can help you is actually you can query for interesting states. If you have a large protocol, it's not easy to produce examples. And that's what model checking is good about. For instance, here, I'm just writing an invariant saying there are no two conflicting blocks. Basically, there is no fork. And challenge the model checker writing an invariant saying there are no two conflicting blocks, basically there is no fork, and challenge the model checker with this false invariant. Then the model checker comes back in several minutes and shows me an example. So actually these tools work as a good communication tool for protocol designers. Here's an example of such an execution. You don't have to read it, it's just long, but it's machine readable and it's an actual execution in the in this specification so the second thing where these tools can help you is to show some properties not for all kinds of values but for small scopes for small parameters for instance here we have experiments for five blocks seven checkpointspoints, 24 votes. And as you can see, when we increase the parameter space, the tools slow down dramatically. However, we have some evidence that these properties hold true, at least for the small parameters. And that's, again, fast feedback that you can get without proving things in heavy tools. So to come to the summary of our work, we believe that model checking actually helps in ensuring correctness of protocols. We still need humans in the loops, unfortunately. We still need us, basically, to construct these abstractions and specifications. Tune in for the upcoming technical report. We are going to publish all of it and you'll see it. And thank you Ethereum Foundation for giving us a grant. Thanks a lot. Thank you, Igor. Does anybody have any questions? Okay. Okay. I think it was very clear. Nobody has anything. You have two quick and come back again at 10. .", "eventId": "devcon-7", - "slot_start": 1731568200000, - "slot_end": 1731570000000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1ALLMSUfx7xTKyChAX-LGEzcu_42YB7z9HKrLLPQ0-cc", - "resources_slides": null, + "slot_start": 1731641400000, + "slot_end": 1731642000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1Xd-R_4o4lETYbwbQd-AVQI0TPre950m6puMNTO8psWk", + "resources_slides": "https://drive.google.com/file/d/1ioS-bm94TRo0-wW3To7f9ZE4aKNmDRzH/view", "speakers": [ - "dapplion" + "igor-konnov", + "thanh-hai-tran" ] }, "vector": [ @@ -400313,6 +399197,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -400512,6 +399397,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -400547,10 +399433,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -400906,7 +399788,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -400952,6 +399833,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -401008,11 +399890,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -401107,6 +399984,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -401276,6 +400154,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -401473,8 +400352,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -401491,53 +400370,50 @@ }, { "session": { - "id": "how-model-checking-can-help-build-trust-in-the-design-of-distributed-protocols-like-single-slot-finality", - "sourceId": "89M7ME", - "title": "How model checking can help build trust in the design of distributed protocols like Single Slot Finality", - "description": "Ethereum is a lively place for developing distributed protocols. Getting a distributed protocol right is a notoriously difficult task. When it comes to developing the Ethereum CL, the community follows two pragmatic approaches: Writing pen & paper proofs and writing executable specs in Python. We show how model checking can confirm our intuition about the behavior of consensus protocols or disprove it. We do so by applying our method to one of the recently proposed Single Slot Finality protocols", - "track": "Core Protocol", - "type": "Lightning Talk", - "expertise": "Intermediate", + "id": "how-much-security-does-your-restaking-protocol-really-need", + "sourceId": "QDDV9C", + "title": "How much security does your restaking protocol really need?", + "description": "Restaking protocols have aggregated millions of ETH with the hope of securing new infrastructure on Ethereum. These services, such as ZK provers and oracles, require restaking ETH to enforce custom slashing rules. But how much ETH do these services need? And how much risk do these services place on Ethereum L1? We will formulate a mathematical model for answering these questions and present an empirical analysis of cascading risks from restaking services to Ethereum, with a positive outlook!", + "track": "Cryptoeconomics", + "type": "Talk", + "expertise": "Expert", "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Consensus", - "Protocol Design", - "Formal Verification", - "apalache", - "Consensus", - "Formal Verification", - "Protocol Design" + "Staking", + "Censorship Resistance", + "Economics", + "Restaking", + "proof-of", + "Censorship Resistance", + "Economics", + "Restaking" ], "keywords": [ - "model checking", - "TLA+", - "Apalache" + "Matching Markets", + "Proof of Stake" ], - "duration": 379, + "duration": 1521, "language": "en", - "sources_swarmHash": "1bddf56010a39f73da7d59c09b393729d459d8abbbfad177e9a41ce45d6fc3fd", - "sources_youtubeId": "9IqwdXnVnsE", + "sources_swarmHash": "5fe34799b6933fd32c8f52dc76f488c7257cafc29d4fe0ab5a8a4564e3294d0d", + "sources_youtubeId": "ikYZ2dMUTyw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736d63474749a4b892f230d", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d63474749a4b892f230d.vtt", - "transcript_text": " Hello, thank you. So my name is Igor Konov. Here are the slides. Happy to be here. Actually, this is work done by myself, Tan Haidran, who is also here in the room, Jure Kukovic, Thomas Pani, who is also here, and Roberto Saltini. And we are happy to have done this work supported by Ethereum Foundation. So if you look at consensus, Ethereum consensus, you probably see a lot of algorithms starting as Casper, Gasper. Now we have single-slot finality. And recently on the block, we have three slot finality. You can check the recent report by Francesco, Roberto, Tanhai, and Luca. And if you look into these things, you'll see that there are a lot of definitions. I cannot explain all of you, all these definitions to you. You'll see that there are chain blocks slots checkpoints justified checkpoints finalized checkpoints votes by validators ffg votes that connect these checkpoints and so on so forth so these are not simple algorithms so in our work we don't uh we don't have uh basically time budget to to do everything so we focus on accountable safety which means roughly speaking that if you have a fork in consensus you should be able to identify at least one-third of the validators that are basically that produce this fork and they should be slashed. So we focus on accountable safety here. How would we be able to check that these protocols, namely three-slot finality, satisfy accountable safety? Well, in science fiction solution, when it's all good, we will take the code in Python, we will take the executable spec in Python, produce some examples maybe to convince ourselves that it kind of works, but also we would also produce an automatic proof of accountable safety. Unfortunately that's a bit of science fiction nowadays, there are no over-the-shelf solutions that would take executable Python spec and reason about such complex algorithms as consensus. So what we have been doing in this project, we actually were writing specifications in temporal logic of actions. That's a language invented by Les Lampert some time ago for reason about concurrent and distributed systems. And we did produce specifications by hand because there are no tools that would be able to do that, although we have been thinking how we could automate that. So basically the first specification we wrote was just too complex for the model checkers. We gradually produced abstractions using this specification and essentially produced like four levels of abstractions here so the model checker could handle the complexity of the algorithm at the end. We used the model checker Apalache, which is offloading the verification task to the SMT solver D3. And in addition to that, as things were a bit slow, we also wrote a specification in Alloy, which is also a well-known model checker that is backed by a sat solver and in addition to that we wrote smt constraints in cvc5 using the theory of finite sets and cardinalities so we kind of did a lot of experiments here to check accountable safety under different uh using different tools so as as I told you, model checking could help you. How can it help you? The first thing it can help you is actually you can query for interesting states. If you have a large protocol, it's not easy to produce examples. And that's what model checking is good about. For instance, here, I'm just writing an invariant saying there are no two conflicting blocks. Basically, there is no fork. And challenge the model checker writing an invariant saying there are no two conflicting blocks, basically there is no fork, and challenge the model checker with this false invariant. Then the model checker comes back in several minutes and shows me an example. So actually these tools work as a good communication tool for protocol designers. Here's an example of such an execution. You don't have to read it, it's just long, but it's machine readable and it's an actual execution in the in this specification so the second thing where these tools can help you is to show some properties not for all kinds of values but for small scopes for small parameters for instance here we have experiments for five blocks seven checkpointspoints, 24 votes. And as you can see, when we increase the parameter space, the tools slow down dramatically. However, we have some evidence that these properties hold true, at least for the small parameters. And that's, again, fast feedback that you can get without proving things in heavy tools. So to come to the summary of our work, we believe that model checking actually helps in ensuring correctness of protocols. We still need humans in the loops, unfortunately. We still need us, basically, to construct these abstractions and specifications. Tune in for the upcoming technical report. We are going to publish all of it and you'll see it. And thank you Ethereum Foundation for giving us a grant. Thanks a lot. Thank you, Igor. Does anybody have any questions? Okay. Okay. I think it was very clear. Nobody has anything. You have two quick and come back again at 10. .", + "sources_streamethId": "6735812c9dbb7a90e14536da", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736ea1d1b0f83434d40f7be.vtt", + "transcript_text": " Thank you, Miss Purple. Yeah, so the title of the talk is Protocol Guild Funding the Ethereum Commons. My name is Trent Van Epps. I'm a member and one of the organizers of this tool. And just a shout out to the DEF CON organizers. I've presented at a number of events in the Ethereum community and it never gets old. So thank you for putting on an incredible event. And hi to any family or people that are watching on the live stream. So let's start off with some crowd engagement. Raise your hand if you think that there are 25 to 100 Ethereum core contributors. Show of hands. Okay, and by core contributors, these are people who are working on client development, research, people that help with testnets, people that do support for testing and security, specifically related to Ethereum mainnet. That's what I mean by core protocol contributor. All right. So not too many hands. Raise your hand if you think it's 100 to 300, roughly. Okay. Okay. And then raise your hand if you think it's the last option, 300 to 1,000. All right. Most of you were right, so good job. I think roughly, in my estimation, there's probably 100 to 300 people who are involved, mostly in a full-time and probably some in a part-time capacity, working on the software that underpins Ethereum. Another question. Raise your hand if you wanted to buy some of them coffees. How many of them could you name today? So, 1 to 10. How many of you could name 1 to 10, you think? Okay, put your hand down if you thought of Vitalik or Justin. Okay. Alright, how about 10 to 30? You think you could name 10 to 30 core Ethereum contributors? And then finally, 30 to 100. All right, if your hand is still up, come talk to me. I want to meet you after this, if I don't already know you. Why does this matter? This matters because Ethereum is a software commons. And the term I like to use, if you're not familiar with it, is commons. And the definition I use is, this is a system of peer production which manages shared resources. And this is really important for how Ethereum is stewarded over time to think of it in this commons frame. And when we know a system's peer contributors, we can do specific things with it. For one, we can get more certainty around the nature of the shared resources that it produces, and specifically the production process. Secondly, we can fund their work, help to avoid capture from malicious entities coming and engaging with the stewardship process. And finally, we can stabilize this contributor set with particular incentives. And maybe if you are familiar with the commons term, you've heard of it in the context of a fishery or a forest or a pasture a pasture that people share together like natural resources that people figure out how to maintain a standard of the resource and then they all benefit collectively but this can also be applied to digital goods or intangible goods like the internet is a common infrastructure that we all use today we're using it right now for the live stream, and I would be surprised if there's many people who don't use the internet every day, but they don't necessarily think about it as this shared infrastructure. Similarly, for something like Wikipedia or Linux, this is software infrastructure. Software is oftentimes produced within this commons context. And when we think about Ethereum, we might think about different parts of it, but we don't necessarily think about it as this holistic commons kind of mechanism. So the three resource types, from my perspective, I think there are three main ones. The first is the network and derivative forms of it. So the chain state, the chain history, different forks of mainnet. And layer twos would, I think, fit into this resource type of a distributed system. Secondly, the asset in the bottom right side of the diagram. Ether, we all use it for paying for transactions. And then it can be put into different forms, whether wrapped or restaked or bridged to different networks. And then finally, one that maybe you don't think about as being a resource type is media. And this can be the specifications which define bits of the protocol or research about what Ethereum can look like in the future, software generally. But it also includes things like the transcripts of governance calls, all Cordev's calls, or the EIPs which describe future changes. I bucket these all within the term media. Hopefully these three resource types make sense to you as well. And all three of them are interwoven in a very unique way. They can't necessarily exist without the other, and so there's this interlocking where each of them refers to and is informed by the other three. Each of these resources is also produced through protocols, and by protocols here I mean like rule sets or guidelines for how the resource comes to be. So for the network, there's specific rule sets, technical rule sets for the fork choice rule or blobs if layer twos are using EIP4844 or P2P gossip for nodes coming to consensus and passing data to each other. The asset, you know, it's minted through proof-of-stake issuance, and then it's destroyed as part of the 1559 burn. These are rule sets which describe how the asset, this resource, operates. And then finally, the media is produced through open-source norms, permissive licensing, public-by public by default governance, and rough consensus. And these rule sets are really important for defining how the actors engaging in the production of this work really operate. It defines what they can do and what they can't, the constraints which guide their behavior. So this brings us to the question of who and in what context is the software media actually produced? Some of you, I know, had your hand raised until the very end, but sometimes this isn't necessarily something we think about. Sometimes, because Ethereum is so dependable, we don't necessarily think about something which just works by default. But it really is an interesting question to start to consider is who's actually making this stuff? And this is a map. I'm not going to go too deep into it, but this is my perspective of who's actually working on mainnet these days. A range of different project sizes, different project types, there's commercial entities, there's non-profits like the Ethereum Foundation. It's a really broad array. Today, probably around 10 different, 12 different independent institutions, and I would say maybe 30 different projects within that. And maybe you knew some of these, but maybe there's others that you don't. And in the middle, there's like this whole range of other org affiliations. Hopefully, if you're not listed here, you don't get offended. Consider yourself part of the middle there. And this is what comprises the main net stewardship, this set of projects that make up the software that then powers the network that we all use in some way or are aware of, right? So what Protocol Guild does is it takes the individuals that are working within these team contexts, within these projects, and it surfaces them to a different level of legibility. So it makes them visible from a longer distance, let's say. So the Protocol Guild mechanism binds these individuals together in a collective mechanism that then you can do interesting things with. You can fund it, you can set common standards. We'll get into that. So Protocol Guild today is a list of 187 of these main net contributors. And collectively, they have more than 600 years of cumulative contributions, which is, when you put it that way, it's pretty incredible, right? You have people who've been here a long time, people who have just shown up. But together, it's a lot of brainpower that's working on this software. And generally, you can also think of it as just a collective funding mechanism, like I mentioned. There are a couple of things which make Protocol Guild very, very unique in the landscape of the Ethereum space, specifically regarding funding protocols. And I'm going to go through them here. The first is that it has a very narrow mandate. It's only concerned with maintaining the mechanism and driving funding to it. It doesn't engage in protocol governance. It doesn't direct the day-to-day efforts of these individuals. It has a very narrow mandate for what it's supposed to do, and that way we can make sure that we're focused on that mission and we don't have this scope, which expands over time and then the mechanism loses its focus. It also focuses on a very narrow domain, specifically the core protocol set of contributors. It doesn't think of anything much beyond that and tries to approximate where the edge or where the boundary condition of the edge of the main net protocol actually is. Whereas things like Gitcoin or Optimism Retrofunding, they have a much broader domain that they try to put funding to. Protocol Guild specifically focuses on the individuals doing this specific work. The second thing which is unique is it has an open membership of individuals. So to compare it to like a non-profit or a for-profit entity, which, you know, they hire and fire people depending on their budgets, we've committed to having an open membership. So if you meet the eligibility requirements, you show up and, you know, you've been doing this work for six months and you expect to continue doing it, the eligibility is obligated to add you. And this is how we build up legitimacy over time by not necessarily gating the membership, which could be dangerous long term. And we focus on individuals instead of what many mechanisms do for their funding approach, which is focusing on the team or the project itself, we think it's really important to return as much agency to the people themselves that are working on this software and make sure that they have as much flexibility for how they engage in this commons production process. Third, it has a comprehensive quarterly curation. So we're regularly updating the mechanism. The set isn't static. Every quarter, we go around and try to find anyone that's missed that now meets the eligibility, and we add them to the membership if they want to. Ultimately, it's opt-in. We do have core contributors who decide not to be part of this. It's totally fine. So it's an opt-in mechanism. Into this particular domain and not necessarily seeing somebody who's doing valuable work that's maybe lower profile or harder to make legible at a certain scale. The fourth unique aspect for this mechanism, this tool, is that all of the funding goes through a four-year on-chain vesting contract. And this is different. It's an opinionated frame for how funding should be distributed. We use time waiting to allocate funding so that if the earlier you show up and the longer you stick around, the more funding you'll get through the mechanism. And this creates assurances in a number of ways for both the funders, the members, and the community that's observing this mechanism and engaging with it. Funders know for sure that anything that they allocate will be there in the future. It's going to sit in this immutable contract on-chain, and they know for sure that it's going to vest linearly each block i can't run off with it they can't take it back so there's a there's a nice characteristic that we get by having it in this immutable contract on chain um and then for for members they know for sure that there's going to be funding available to them if they stick up and show around and all of these touch in some way, again, the commons frame. Software doesn't spring from the earth like these natural resources, a fishery or a forest. It doesn't come out of the ground naturally. Of course, you can enhance the development. You can make sure it has good conditions. But software requires humans to create it and steward it long term. And these really recognize the commons nature. It's going to take time to do things. Ethereum is where we are today, but there's still a lot to do over the next five to ten years. And the way we've structured this mechanism deeply considers the reality that this stuff takes time and you need people there to steward this common software over time. And so to that point, we can look at all this on-chain stuff through a Dune dashboard. If you're familiar, I recommend go check out our Dune dashboard. We try to surface as much information as we can there. So we've been building Protocol Guild, myself and a number of others and the rest of the membership since 2021. And today we have $57,000 per year that goes to the median member. I know we can project out, you can see in the bottom left, there's a big lump sum of about $40 million that vests over four years. However, markets are volatile. I can't really guarantee that that level of funding will necessarily be available at that US dollar value. So yeah, this is a great accomplishment, but I think we probably need to 10x this, and I'll get into a little bit more around the incentives and why we think this needs to be higher. But definitely go check out the Dune dashboard. It's a great way that we can lean into this radical transparency because all of the funding coming in, all of the funding going out, the memberships change, and number of individuals, it's all tracked in this Dune dashboard. So please do check it out. And now that this thing exists, we've been able to fundraise to it. And we've been really honored to have many large projects and small projects throughout the Ethereum community recognize the value of this common scale mechanism. And I just want to give a shout out to them really quickly. So massive, massive thank you to EtherFi, Tycho, Layer Zero, the Arbitrum community, Lido, Optimism, Uniswap, ENS, ZK-Sync, and MolochDAO for really seeing the value in this kind of thing and not just making a future commitment or saying, you know, this is a good idea, but actually committing significant funding to it. So yeah, thank you. And other things we can do with the mechanism now that it's on chain and leaning into this transparency is we can start to say, okay, as funding is coming in, how can we ensure that the distribution of funding is separated out or we have a good diversity of assets that are funding these individuals doing the work. So we have this diversity score that compares it relative to all past funding. So another cool thing that we can do when the funding lives on chain. And one thing we introduced earlier this year is the 1% pledge. I recommend go and read the full post, tim.mirror.xyz. It's a great post. But one of the things that's really great is this diagram on the right, which visually displays the incentive imbalance that you have to work on the core protocol. So in the top right, you have a crypto project founder, right? They have a lot of risk going into something like this, but there's potential for significant reward. And when you're working on the L1 work, it's stable. You'll get a salary. You might get equity in some company, but you don't have this exposure to the broader ecosystem success or there's not going to be a new token for the Ethereum blockchain. Some people have the misconception that people who are core developers, they've been around since the ICO, and they just have a huge stack of ETH. I'm here to tell you this is not the case. Many people, for whatever reason, they didn't have it. Or if they did, they've sold it since then to pay for living expenses. And there's always new people showing up. So just to set that straight, core devs are relatively undercompensated to the broader industry. And one thing we can do through the mechanism is shift the incentive imbalance. We're not going to ever be able to match what you could get at a, let's say, a VC startup or a layer two that has a new token. Some of these newer projects that you join early, it's clear that you can have significant incentives. You'll never really be able to match that fully, but we believe it's important to at least slide slightly up on the reward curve and give people more consistency while maintaining the same level of risk. Because ultimately, at the end of the day, this work is really undervalued and less visible than a lot of the more high-profile stuff. So another shout-out. There's been a couple of projects which have really leapt into this idea of the 1% pledge. And again, shout-out to Etherfy, Pondau, and Tyco Labs for early on in this year saying, you know, we recognize the importance of this thing. We're going to donate 1% of our tokens to the mechanism, and that's been the bulk of the funding this year. So really thankful that they have taken this early thing and been some of the first people to actually engage with the mechanism in this way. And like I said, we do need significant funding to come into this mechanism to rebalance the incentives. There's risks around the commons being captured long term. This is something you always have to be vigilant about and paying attention to, because as I said, the software doesn't just come from the ground, fully formed. It's a human process. It's a political and social process to maintain and steward this over time. And like I said, there's still so much to do in Ethereum that we need to start putting ourselves in a posture of recognizing that over time, the commons needs to be maintained and stewarded and funded. And while there are risks, there's also significant successes in the natural commons space. Fishing practices, communal rice terraces, different farming systems have lasted for thousands of years. And then in the digital context, the internet, Linux, Wikipedia, they've had early successes, but we can start to see the challenges that they're experiencing as larger pools of capital show up on the edges or the margins of the commons production system and the challenges that they're experiencing now. And so we should really start to think of this. Next year, Ethereum turns 10, which is incredible to think about. There's still so much to do. But what can we do to start to take on this posture of funding the commons at the scale where it's best recognized and suited to fund these individuals who are doing this important work? So I'll end with this. Is it a core dev UBI? Is it a software standards org? Is it a union? Is it a compensation package? Probably to some degree, it has little bits of these woven together. But above all, it's a call to action. It's an invitation for the broader community to take on the responsibility to recognize their role of participants in this ecosystem. You know, the EF is not going to be the sole funder. It never was, but it should not be looked to as, you know, it's just the EF will take care of it or, you know, these large organizations will take care of it. All of you, if you're part of a project or even as individuals, you have an obligation to think about deeply the ways that we steward and we participate in these commons production processes. So my DMs are open if you think this applies to you. Thank you. Thank you. Thank you so much for that. And on your statement, actually, you mentioned that core devs are not paid enough compared to industry. So how much is enough? That's a good question. I think I showed $57,000 per the median member. They do get salaries in addition to this as part of their hosting org, whether non-profit or for-profit. So this is in addition to that. It depends on what part of the market cycle we're in, but we're aiming to 10x this and provide at least 500,000 to the median members. Some people will get a lot more, and some people will get a lot less, but this is kind of what we're aiming for for now. And yeah, keep in mind that any funding that goes into it is distributed over four years. So it's not, you might see this really large number on the Dune dashboard, but keep in mind that a lot of these tokens are very volatile and it'll shift over time. And who decides who's eligible for the Protocol Guild? So the members have an eligibility framework, which we try to make as explicit as possible with the awareness that, you know, things change over time, right? I keep referencing how the commons is stewarded over time. What we think is the edge today may not be the edge of Ethereum core protocol in 10 years, right? We need to have the humility to recognize, okay, we think we have a good picture now, but it's always going to change. It's always going to shift a little bit. So the members themselves are deeply engaged in this decision-making process around where the edge of eligibility is. Yeah. What benefits do donators have for giving away 1% of their token supply for free. What's the pledge used for? So for free is maybe a bit of a bad way to think about it because what you get is the continued operation of Ethereum, right? The maintenance of this software, which you're building a business on, you're building a project on. And again, like I mentioned, we often take Ethereum mainnet for granted because it's so stable and because it really doesn't have issues and in addition to that We're we're getting much over time. We've getting much much better at Doing network upgrades improving the scalability improving usability and security So people often take all of this work which happens maybe behind the scenes or out of the public visibility, they take it for granted. So I wouldn't say you're getting it for free, like you just contribute it and you don't really get anything in return. You're getting something in return already. So I'd first say that. But the money, so what is the pledge used for? It goes directly to the individuals that are listed. There's no discretionary budget where we decide, okay, we're going to fund this particular project or this software initiative. If you meet the eligibility, you get a weight, and then the funding goes directly to you. And we don't make any obligation. You could start to get into the ideas of credible neutrality. You don't want funders coming up and saying, I'll give you this if you do this. We don't make any guarantees to people that contribute funding. We only have time for one more question. How do you make sure contributions are fairly rewarded, especially when individual impact can be so varied? Fair is subjective at the end of the day, right? So we take an opinionated stance that ultimately it's the collective body of software developers that ultimately steward Ethereum in the long term. Of course there are people that are objectively more impactful or more valuable to the commons, but what we can do is pull up everybody together. The rising tide lifts all boats. And that's really what we're focused on. Yeah. All right. Thank you once again. Thank you.", "eventId": "devcon-7", - "slot_start": 1731641400000, - "slot_end": 1731642000000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Xd-R_4o4lETYbwbQd-AVQI0TPre950m6puMNTO8psWk", - "resources_slides": null, + "slot_start": 1731556800000, + "slot_end": 1731558600000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1pXSBtge-cUH6xweP8_EkxdNV7HFwwguB4oabzfh2UJ4", + "resources_slides": "https://drive.google.com/file/d/1k3AfN23Q4M-YJRYtzJqKfmAqQIY2BM_p/view", "speakers": [ - "igor-konnov", - "thanh-hai-tran" + "tarun-chitra" ] }, "vector": [ - 0, - 0, 0, 0, 6, @@ -401694,16 +400570,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -401895,8 +400761,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -401942,6 +400806,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -402292,7 +401157,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -402427,6 +401291,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -402445,6 +401310,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -402484,27 +401350,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -402655,7 +401500,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -402686,6 +401530,34 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -402850,13 +401722,14 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 2, 0, + 2, + 0, 0, 0, 0, @@ -402872,52 +401745,49 @@ }, { "session": { - "id": "how-much-security-does-your-restaking-protocol-really-need", - "sourceId": "QDDV9C", - "title": "How much security does your restaking protocol really need?", - "description": "Restaking protocols have aggregated millions of ETH with the hope of securing new infrastructure on Ethereum. These services, such as ZK provers and oracles, require restaking ETH to enforce custom slashing rules. But how much ETH do these services need? And how much risk do these services place on Ethereum L1? We will formulate a mathematical model for answering these questions and present an empirical analysis of cascading risks from restaking services to Ethereum, with a positive outlook!", - "track": "Cryptoeconomics", - "type": "Talk", - "expertise": "Expert", + "id": "how-to-audit-smart-contract-languages-brief-intro", + "sourceId": "HMYRTU", + "title": "How to Audit Smart Contract Languages: Brief Intro", + "description": "In this talk, we’ll dive into the unique challenges and considerations when auditing a smart contract language, as opposed to auditing individual smart contracts. We’ll cover:\r\n\r\n- Things to Look For: Key aspects of a smart contract language that need review.\r\n- Mindset Difference: Shifting from a contract-centric to a language-centric perspective, focusing on broader systemic issues rather than isolated contract logic.", + "track": "Security", + "type": "Lightning Talk", + "expertise": "Intermediate", "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Staking", - "Censorship Resistance", - "Economics", - "Restaking", - "proof-of", - "Censorship Resistance", - "Economics", - "Restaking" + "Languages", + "Security", + "Auditing", + "language", + "Auditing", + "Languages", + "Security" ], "keywords": [ - "Matching Markets", - "Proof of Stake" + "Language", + "Security" ], - "duration": 1521, + "duration": 513, "language": "en", - "sources_swarmHash": "5fe34799b6933fd32c8f52dc76f488c7257cafc29d4fe0ab5a8a4564e3294d0d", - "sources_youtubeId": "ikYZ2dMUTyw", + "sources_swarmHash": "ea65855daab21ae72bb48591cb29b363cb11fb4dec22797ab7fa4ce4d568891a", + "sources_youtubeId": "rVFTbSDb8NQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735812c9dbb7a90e14536da", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736ea1d1b0f83434d40f7be.vtt", - "transcript_text": " Thank you, Miss Purple. Yeah, so the title of the talk is Protocol Guild Funding the Ethereum Commons. My name is Trent Van Epps. I'm a member and one of the organizers of this tool. And just a shout out to the DEF CON organizers. I've presented at a number of events in the Ethereum community and it never gets old. So thank you for putting on an incredible event. And hi to any family or people that are watching on the live stream. So let's start off with some crowd engagement. Raise your hand if you think that there are 25 to 100 Ethereum core contributors. Show of hands. Okay, and by core contributors, these are people who are working on client development, research, people that help with testnets, people that do support for testing and security, specifically related to Ethereum mainnet. That's what I mean by core protocol contributor. All right. So not too many hands. Raise your hand if you think it's 100 to 300, roughly. Okay. Okay. And then raise your hand if you think it's the last option, 300 to 1,000. All right. Most of you were right, so good job. I think roughly, in my estimation, there's probably 100 to 300 people who are involved, mostly in a full-time and probably some in a part-time capacity, working on the software that underpins Ethereum. Another question. Raise your hand if you wanted to buy some of them coffees. How many of them could you name today? So, 1 to 10. How many of you could name 1 to 10, you think? Okay, put your hand down if you thought of Vitalik or Justin. Okay. Alright, how about 10 to 30? You think you could name 10 to 30 core Ethereum contributors? And then finally, 30 to 100. All right, if your hand is still up, come talk to me. I want to meet you after this, if I don't already know you. Why does this matter? This matters because Ethereum is a software commons. And the term I like to use, if you're not familiar with it, is commons. And the definition I use is, this is a system of peer production which manages shared resources. And this is really important for how Ethereum is stewarded over time to think of it in this commons frame. And when we know a system's peer contributors, we can do specific things with it. For one, we can get more certainty around the nature of the shared resources that it produces, and specifically the production process. Secondly, we can fund their work, help to avoid capture from malicious entities coming and engaging with the stewardship process. And finally, we can stabilize this contributor set with particular incentives. And maybe if you are familiar with the commons term, you've heard of it in the context of a fishery or a forest or a pasture a pasture that people share together like natural resources that people figure out how to maintain a standard of the resource and then they all benefit collectively but this can also be applied to digital goods or intangible goods like the internet is a common infrastructure that we all use today we're using it right now for the live stream, and I would be surprised if there's many people who don't use the internet every day, but they don't necessarily think about it as this shared infrastructure. Similarly, for something like Wikipedia or Linux, this is software infrastructure. Software is oftentimes produced within this commons context. And when we think about Ethereum, we might think about different parts of it, but we don't necessarily think about it as this holistic commons kind of mechanism. So the three resource types, from my perspective, I think there are three main ones. The first is the network and derivative forms of it. So the chain state, the chain history, different forks of mainnet. And layer twos would, I think, fit into this resource type of a distributed system. Secondly, the asset in the bottom right side of the diagram. Ether, we all use it for paying for transactions. And then it can be put into different forms, whether wrapped or restaked or bridged to different networks. And then finally, one that maybe you don't think about as being a resource type is media. And this can be the specifications which define bits of the protocol or research about what Ethereum can look like in the future, software generally. But it also includes things like the transcripts of governance calls, all Cordev's calls, or the EIPs which describe future changes. I bucket these all within the term media. Hopefully these three resource types make sense to you as well. And all three of them are interwoven in a very unique way. They can't necessarily exist without the other, and so there's this interlocking where each of them refers to and is informed by the other three. Each of these resources is also produced through protocols, and by protocols here I mean like rule sets or guidelines for how the resource comes to be. So for the network, there's specific rule sets, technical rule sets for the fork choice rule or blobs if layer twos are using EIP4844 or P2P gossip for nodes coming to consensus and passing data to each other. The asset, you know, it's minted through proof-of-stake issuance, and then it's destroyed as part of the 1559 burn. These are rule sets which describe how the asset, this resource, operates. And then finally, the media is produced through open-source norms, permissive licensing, public-by public by default governance, and rough consensus. And these rule sets are really important for defining how the actors engaging in the production of this work really operate. It defines what they can do and what they can't, the constraints which guide their behavior. So this brings us to the question of who and in what context is the software media actually produced? Some of you, I know, had your hand raised until the very end, but sometimes this isn't necessarily something we think about. Sometimes, because Ethereum is so dependable, we don't necessarily think about something which just works by default. But it really is an interesting question to start to consider is who's actually making this stuff? And this is a map. I'm not going to go too deep into it, but this is my perspective of who's actually working on mainnet these days. A range of different project sizes, different project types, there's commercial entities, there's non-profits like the Ethereum Foundation. It's a really broad array. Today, probably around 10 different, 12 different independent institutions, and I would say maybe 30 different projects within that. And maybe you knew some of these, but maybe there's others that you don't. And in the middle, there's like this whole range of other org affiliations. Hopefully, if you're not listed here, you don't get offended. Consider yourself part of the middle there. And this is what comprises the main net stewardship, this set of projects that make up the software that then powers the network that we all use in some way or are aware of, right? So what Protocol Guild does is it takes the individuals that are working within these team contexts, within these projects, and it surfaces them to a different level of legibility. So it makes them visible from a longer distance, let's say. So the Protocol Guild mechanism binds these individuals together in a collective mechanism that then you can do interesting things with. You can fund it, you can set common standards. We'll get into that. So Protocol Guild today is a list of 187 of these main net contributors. And collectively, they have more than 600 years of cumulative contributions, which is, when you put it that way, it's pretty incredible, right? You have people who've been here a long time, people who have just shown up. But together, it's a lot of brainpower that's working on this software. And generally, you can also think of it as just a collective funding mechanism, like I mentioned. There are a couple of things which make Protocol Guild very, very unique in the landscape of the Ethereum space, specifically regarding funding protocols. And I'm going to go through them here. The first is that it has a very narrow mandate. It's only concerned with maintaining the mechanism and driving funding to it. It doesn't engage in protocol governance. It doesn't direct the day-to-day efforts of these individuals. It has a very narrow mandate for what it's supposed to do, and that way we can make sure that we're focused on that mission and we don't have this scope, which expands over time and then the mechanism loses its focus. It also focuses on a very narrow domain, specifically the core protocol set of contributors. It doesn't think of anything much beyond that and tries to approximate where the edge or where the boundary condition of the edge of the main net protocol actually is. Whereas things like Gitcoin or Optimism Retrofunding, they have a much broader domain that they try to put funding to. Protocol Guild specifically focuses on the individuals doing this specific work. The second thing which is unique is it has an open membership of individuals. So to compare it to like a non-profit or a for-profit entity, which, you know, they hire and fire people depending on their budgets, we've committed to having an open membership. So if you meet the eligibility requirements, you show up and, you know, you've been doing this work for six months and you expect to continue doing it, the eligibility is obligated to add you. And this is how we build up legitimacy over time by not necessarily gating the membership, which could be dangerous long term. And we focus on individuals instead of what many mechanisms do for their funding approach, which is focusing on the team or the project itself, we think it's really important to return as much agency to the people themselves that are working on this software and make sure that they have as much flexibility for how they engage in this commons production process. Third, it has a comprehensive quarterly curation. So we're regularly updating the mechanism. The set isn't static. Every quarter, we go around and try to find anyone that's missed that now meets the eligibility, and we add them to the membership if they want to. Ultimately, it's opt-in. We do have core contributors who decide not to be part of this. It's totally fine. So it's an opt-in mechanism. Into this particular domain and not necessarily seeing somebody who's doing valuable work that's maybe lower profile or harder to make legible at a certain scale. The fourth unique aspect for this mechanism, this tool, is that all of the funding goes through a four-year on-chain vesting contract. And this is different. It's an opinionated frame for how funding should be distributed. We use time waiting to allocate funding so that if the earlier you show up and the longer you stick around, the more funding you'll get through the mechanism. And this creates assurances in a number of ways for both the funders, the members, and the community that's observing this mechanism and engaging with it. Funders know for sure that anything that they allocate will be there in the future. It's going to sit in this immutable contract on-chain, and they know for sure that it's going to vest linearly each block i can't run off with it they can't take it back so there's a there's a nice characteristic that we get by having it in this immutable contract on chain um and then for for members they know for sure that there's going to be funding available to them if they stick up and show around and all of these touch in some way, again, the commons frame. Software doesn't spring from the earth like these natural resources, a fishery or a forest. It doesn't come out of the ground naturally. Of course, you can enhance the development. You can make sure it has good conditions. But software requires humans to create it and steward it long term. And these really recognize the commons nature. It's going to take time to do things. Ethereum is where we are today, but there's still a lot to do over the next five to ten years. And the way we've structured this mechanism deeply considers the reality that this stuff takes time and you need people there to steward this common software over time. And so to that point, we can look at all this on-chain stuff through a Dune dashboard. If you're familiar, I recommend go check out our Dune dashboard. We try to surface as much information as we can there. So we've been building Protocol Guild, myself and a number of others and the rest of the membership since 2021. And today we have $57,000 per year that goes to the median member. I know we can project out, you can see in the bottom left, there's a big lump sum of about $40 million that vests over four years. However, markets are volatile. I can't really guarantee that that level of funding will necessarily be available at that US dollar value. So yeah, this is a great accomplishment, but I think we probably need to 10x this, and I'll get into a little bit more around the incentives and why we think this needs to be higher. But definitely go check out the Dune dashboard. It's a great way that we can lean into this radical transparency because all of the funding coming in, all of the funding going out, the memberships change, and number of individuals, it's all tracked in this Dune dashboard. So please do check it out. And now that this thing exists, we've been able to fundraise to it. And we've been really honored to have many large projects and small projects throughout the Ethereum community recognize the value of this common scale mechanism. And I just want to give a shout out to them really quickly. So massive, massive thank you to EtherFi, Tycho, Layer Zero, the Arbitrum community, Lido, Optimism, Uniswap, ENS, ZK-Sync, and MolochDAO for really seeing the value in this kind of thing and not just making a future commitment or saying, you know, this is a good idea, but actually committing significant funding to it. So yeah, thank you. And other things we can do with the mechanism now that it's on chain and leaning into this transparency is we can start to say, okay, as funding is coming in, how can we ensure that the distribution of funding is separated out or we have a good diversity of assets that are funding these individuals doing the work. So we have this diversity score that compares it relative to all past funding. So another cool thing that we can do when the funding lives on chain. And one thing we introduced earlier this year is the 1% pledge. I recommend go and read the full post, tim.mirror.xyz. It's a great post. But one of the things that's really great is this diagram on the right, which visually displays the incentive imbalance that you have to work on the core protocol. So in the top right, you have a crypto project founder, right? They have a lot of risk going into something like this, but there's potential for significant reward. And when you're working on the L1 work, it's stable. You'll get a salary. You might get equity in some company, but you don't have this exposure to the broader ecosystem success or there's not going to be a new token for the Ethereum blockchain. Some people have the misconception that people who are core developers, they've been around since the ICO, and they just have a huge stack of ETH. I'm here to tell you this is not the case. Many people, for whatever reason, they didn't have it. Or if they did, they've sold it since then to pay for living expenses. And there's always new people showing up. So just to set that straight, core devs are relatively undercompensated to the broader industry. And one thing we can do through the mechanism is shift the incentive imbalance. We're not going to ever be able to match what you could get at a, let's say, a VC startup or a layer two that has a new token. Some of these newer projects that you join early, it's clear that you can have significant incentives. You'll never really be able to match that fully, but we believe it's important to at least slide slightly up on the reward curve and give people more consistency while maintaining the same level of risk. Because ultimately, at the end of the day, this work is really undervalued and less visible than a lot of the more high-profile stuff. So another shout-out. There's been a couple of projects which have really leapt into this idea of the 1% pledge. And again, shout-out to Etherfy, Pondau, and Tyco Labs for early on in this year saying, you know, we recognize the importance of this thing. We're going to donate 1% of our tokens to the mechanism, and that's been the bulk of the funding this year. So really thankful that they have taken this early thing and been some of the first people to actually engage with the mechanism in this way. And like I said, we do need significant funding to come into this mechanism to rebalance the incentives. There's risks around the commons being captured long term. This is something you always have to be vigilant about and paying attention to, because as I said, the software doesn't just come from the ground, fully formed. It's a human process. It's a political and social process to maintain and steward this over time. And like I said, there's still so much to do in Ethereum that we need to start putting ourselves in a posture of recognizing that over time, the commons needs to be maintained and stewarded and funded. And while there are risks, there's also significant successes in the natural commons space. Fishing practices, communal rice terraces, different farming systems have lasted for thousands of years. And then in the digital context, the internet, Linux, Wikipedia, they've had early successes, but we can start to see the challenges that they're experiencing as larger pools of capital show up on the edges or the margins of the commons production system and the challenges that they're experiencing now. And so we should really start to think of this. Next year, Ethereum turns 10, which is incredible to think about. There's still so much to do. But what can we do to start to take on this posture of funding the commons at the scale where it's best recognized and suited to fund these individuals who are doing this important work? So I'll end with this. Is it a core dev UBI? Is it a software standards org? Is it a union? Is it a compensation package? Probably to some degree, it has little bits of these woven together. But above all, it's a call to action. It's an invitation for the broader community to take on the responsibility to recognize their role of participants in this ecosystem. You know, the EF is not going to be the sole funder. It never was, but it should not be looked to as, you know, it's just the EF will take care of it or, you know, these large organizations will take care of it. All of you, if you're part of a project or even as individuals, you have an obligation to think about deeply the ways that we steward and we participate in these commons production processes. So my DMs are open if you think this applies to you. Thank you. Thank you. Thank you so much for that. And on your statement, actually, you mentioned that core devs are not paid enough compared to industry. So how much is enough? That's a good question. I think I showed $57,000 per the median member. They do get salaries in addition to this as part of their hosting org, whether non-profit or for-profit. So this is in addition to that. It depends on what part of the market cycle we're in, but we're aiming to 10x this and provide at least 500,000 to the median members. Some people will get a lot more, and some people will get a lot less, but this is kind of what we're aiming for for now. And yeah, keep in mind that any funding that goes into it is distributed over four years. So it's not, you might see this really large number on the Dune dashboard, but keep in mind that a lot of these tokens are very volatile and it'll shift over time. And who decides who's eligible for the Protocol Guild? So the members have an eligibility framework, which we try to make as explicit as possible with the awareness that, you know, things change over time, right? I keep referencing how the commons is stewarded over time. What we think is the edge today may not be the edge of Ethereum core protocol in 10 years, right? We need to have the humility to recognize, okay, we think we have a good picture now, but it's always going to change. It's always going to shift a little bit. So the members themselves are deeply engaged in this decision-making process around where the edge of eligibility is. Yeah. What benefits do donators have for giving away 1% of their token supply for free. What's the pledge used for? So for free is maybe a bit of a bad way to think about it because what you get is the continued operation of Ethereum, right? The maintenance of this software, which you're building a business on, you're building a project on. And again, like I mentioned, we often take Ethereum mainnet for granted because it's so stable and because it really doesn't have issues and in addition to that We're we're getting much over time. We've getting much much better at Doing network upgrades improving the scalability improving usability and security So people often take all of this work which happens maybe behind the scenes or out of the public visibility, they take it for granted. So I wouldn't say you're getting it for free, like you just contribute it and you don't really get anything in return. You're getting something in return already. So I'd first say that. But the money, so what is the pledge used for? It goes directly to the individuals that are listed. There's no discretionary budget where we decide, okay, we're going to fund this particular project or this software initiative. If you meet the eligibility, you get a weight, and then the funding goes directly to you. And we don't make any obligation. You could start to get into the ideas of credible neutrality. You don't want funders coming up and saying, I'll give you this if you do this. We don't make any guarantees to people that contribute funding. We only have time for one more question. How do you make sure contributions are fairly rewarded, especially when individual impact can be so varied? Fair is subjective at the end of the day, right? So we take an opinionated stance that ultimately it's the collective body of software developers that ultimately steward Ethereum in the long term. Of course there are people that are objectively more impactful or more valuable to the commons, but what we can do is pull up everybody together. The rising tide lifts all boats. And that's really what we're focused on. Yeah. All right. Thank you once again. Thank you.", + "sources_streamethId": "6737481f1b0f83434d55627c", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6737481f1b0f83434d55627c.vtt", + "transcript_text": " Can you guys hear me okay? Okay, so thank you for that introduction. That was great. I want to start this presentation with a quick poll. So please raise your hand if you're an auditor. Okay, nice. Now, raise your hand if you ever audit a smart contract language, like Solidity, Stylus, Viper, or any other. Oh, nice, I see a few hands. But probably not as many as the one who raised their hand first. And the reason is because it's not as common as auditing the smart contracts, for sure. And to be honest, I haven't found good resources to know how to do it. Fortunately, I had the opportunity to audit this type of codebases sometimes, like a few. And I learned many things, and that's basically the main motivation for today's talk, to share with you some of the things that I learned many things and that's basically the main motivation for today's talk, to share with you some of the things that I learned. So one second, we already did it all, fantastic. Now let's start by asking ourselves why do we need to audit the languages? Because of this. Basically here you can see a bunch of headlines and one meme that highlight some of the like incidents that happen because of issues at the language level, right? I'm pretty sure you guys remember from last year what happened with Viper, the reentrancy guard that basically like in this case attackers were able to bypass the guard even though the guards were properly implemented within the contracts. The issue was at the compiler level, right? And that causes many losses in core finance. What I want to convey here with this is that basically those are not issues that are rare those are actually more common than you might think. Okay so now I want to get into the differences between auditing a smart contract and auditing the languages behind them right so I have four key differences that I want to share with you guys. The first one, the focus areas. When we're doing an audit of a smart contract, we tend to look for logic, like business logic flaws, right? It's normally what we look for when we are doing the manual audit. But when we're doing the review of the language we tend to see for things like undefined behaviors, features that could be misused, and overall we try to validate that the design choices of the language are correct. Okay, second the complexity of a scope. Of course, when you're doing an audit of a language, you need to be aware of the whole ecosystem, compiler, libraries, tooling, whereas in smart contracts, it's mostly like the contracts and the dependencies. Okay, third, the severity and risk mitigation, propagation. So if you ever find an issue at the language level, it's probably going to impact the whole ecosystem. So it's quite important for you to mitigate it. And it's an actual task that is way difficult, right? Because you need to reach out to many stakeholders, which is complicated. Whereas when you find issues in a contract, it's mostly isolated within the contract, right? Okay, maintenance aspects. Normally, contracts are immutable. Normally, of course, if they're upgradable, that's another case, but the majority are immutable. Normally, of course, if they're upgradable, that's another case. But the majority are immutable. And on the other hand, we have languages. They basically evolve constantly. So you need to be aware about the fact that you need to keep track of both the past and the future of the language. So the mindset, of course, is way different. So what are the things that we can look for when we're doing this type of audits? One example is function identifier management. What I mean by this is that if you're like checking the dispatcher of the language you need to make sure that if for some reason within your contract you have two functions that share the same selector or identifier, the compiler must throw one error. If that's not the case, you basically will end up with a function that is going to be unreachable, right? Also, try to check if for some reason the language have something that is basically custom selectors. This feature is actually like at least for me not a good feature to have because it's quite prone for scammers to create hard to detect scams and to be honest the benefits of having this type of issues are not clear to me. Okay. Another thing, the storage layout implementation. This must be consistent, and I want to take into, like, the meeting one particular issue that we find in Stylus, which basically, like, they manage the inheritance fields different from Solidity, right? But the thing is that they were promoting, like, you could jump from a contract that is upgradable, that basically you can have an implementation, right? You switch from Solidity to a Stylus, right? So they were, like, pushing this type of things, but as I just mentioned, they manage the inherent fields different, so this is going to mess up the layout of your contracts. So there are many things to look for. Of course, we cannot go into details because this is a lightning talk, but what I did is that I create this repository, which is basically a checker. But what I did is that I create this repository, which is basically a checker with everything that I know about auditing languages, right? So I'm pretty sure that it will be valuable for you guys if you ever need to face this type of audit. So please give it a look. I'm pretty sure that there is still a lot of room for improvement. So please contribute to it, and hopefully you find value on it. So that's it. Thank you so much for your time, for your attention. Big round of applause for our speaker. If you have questions, I'd love to hear them. Check the repository, please. And thank you for doing the hard work. Questions? Any hand? Hand? Okay. Thank you. Yeah, I'm from random verification, and I'm very curious about the language auditing. Is that equivalent to compiler verification? Not actually. But in our case, we had to take many approaches because it's not only languages that were written from scratch, but also EDSL languages. So there are many types, right? So in our case, we didn't audit the compiler, for example, for Xilus and neither for Ink, because it's Rust. So we validate that for example, the macros were properly implemented, the WASM was correctly generated by the code that they've written. But we do not do a formal verification first. We actually first do the manual review and then we jump into formal verification. That's basically like the framework that we normally do. Thank you. Any other question? There is one guy over there. Let's be very quick or no we are out of time. Okay. Sorry. So please catch up with the speaker. Reach out. Thank you again. Bye-bye. Another round of applause.", "eventId": "devcon-7", - "slot_start": 1731556800000, - "slot_end": 1731558600000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1pXSBtge-cUH6xweP8_EkxdNV7HFwwguB4oabzfh2UJ4", - "resources_slides": null, + "slot_start": 1731655800000, + "slot_end": 1731656400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1r4V8Ln3v53MiKcUcMCQ8Cs-LG2p8VboqrQ6RHXvL-DY", + "resources_slides": "https://drive.google.com/file/d/1A--SERxJQQIFGFSauBczqDVQSiTT7b2M/view", "speakers": [ - "tarun-chitra" + "nicolas-garcia" ] }, "vector": [ - 0, - 0, 6, 0, 0, @@ -403309,9 +402179,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -403666,6 +402536,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -403702,7 +402573,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -403763,6 +402633,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -403796,7 +402667,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -403815,10 +402685,10 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, 0, 0, 0, @@ -404037,9 +402907,6 @@ 0, 0, 2, - 2, - 0, - 0, 0, 0, 0, @@ -404230,10 +403097,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 2, 0, @@ -404252,50 +403119,51 @@ }, { "session": { - "id": "how-to-audit-smart-contract-languages-brief-intro", - "sourceId": "HMYRTU", - "title": "How to Audit Smart Contract Languages: Brief Intro", - "description": "In this talk, we’ll dive into the unique challenges and considerations when auditing a smart contract language, as opposed to auditing individual smart contracts. We’ll cover:\r\n\r\n- Things to Look For: Key aspects of a smart contract language that need review.\r\n- Mindset Difference: Shifting from a contract-centric to a language-centric perspective, focusing on broader systemic issues rather than isolated contract logic.", - "track": "Security", - "type": "Lightning Talk", - "expertise": "Intermediate", + "id": "how-to-coordinate-an-epistemic-revolution", + "sourceId": "DNJMER", + "title": "How to coordinate an epistemic revolution", + "description": "Amid widespread misinformation, division, and fractured consensus, we face an epistemic crisis. This talk unifies learning and governance theory, organizational design, social consensus tools, AI, and prediction markets. We will explore how DAOs and Ethereum can serve as decentralized platforms for collective intelligence and planetary-scale problem-solving, guiding us toward an epistemic revolution at this critical time.", + "track": "Coordination", + "type": "Talk", + "expertise": "Beginner", "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Languages", - "Security", - "Auditing", - "language", - "Auditing", - "Languages", - "Security" + "Consensus", + "Quadratic Voting", + "Collective Intelligence", + "citizens", + "assembly", + "Collective Intelligence", + "Consensus", + "Quadratic Voting" ], "keywords": [ - "Language", - "Security" + "Semantic Governance", + "Identity", + "Citizens Assembly" ], - "duration": 513, + "duration": 1524, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "8af47220dd27fe4b3dd7d5ceaf2f7a995e25ae4ead83ba92484fdd4f428d8f94", + "sources_youtubeId": "7PjZyQsscQE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6737481f1b0f83434d55627c", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6737481f1b0f83434d55627c.vtt", - "transcript_text": " Can you guys hear me okay? Okay, so thank you for that introduction. That was great. I want to start this presentation with a quick poll. So please raise your hand if you're an auditor. Okay, nice. Now, raise your hand if you ever audit a smart contract language, like Solidity, Stylus, Viper, or any other. Oh, nice, I see a few hands. But probably not as many as the one who raised their hand first. And the reason is because it's not as common as auditing the smart contracts, for sure. And to be honest, I haven't found good resources to know how to do it. Fortunately, I had the opportunity to audit this type of codebases sometimes, like a few. And I learned many things, and that's basically the main motivation for today's talk, to share with you some of the things that I learned many things and that's basically the main motivation for today's talk, to share with you some of the things that I learned. So one second, we already did it all, fantastic. Now let's start by asking ourselves why do we need to audit the languages? Because of this. Basically here you can see a bunch of headlines and one meme that highlight some of the like incidents that happen because of issues at the language level, right? I'm pretty sure you guys remember from last year what happened with Viper, the reentrancy guard that basically like in this case attackers were able to bypass the guard even though the guards were properly implemented within the contracts. The issue was at the compiler level, right? And that causes many losses in core finance. What I want to convey here with this is that basically those are not issues that are rare those are actually more common than you might think. Okay so now I want to get into the differences between auditing a smart contract and auditing the languages behind them right so I have four key differences that I want to share with you guys. The first one, the focus areas. When we're doing an audit of a smart contract, we tend to look for logic, like business logic flaws, right? It's normally what we look for when we are doing the manual audit. But when we're doing the review of the language we tend to see for things like undefined behaviors, features that could be misused, and overall we try to validate that the design choices of the language are correct. Okay, second the complexity of a scope. Of course, when you're doing an audit of a language, you need to be aware of the whole ecosystem, compiler, libraries, tooling, whereas in smart contracts, it's mostly like the contracts and the dependencies. Okay, third, the severity and risk mitigation, propagation. So if you ever find an issue at the language level, it's probably going to impact the whole ecosystem. So it's quite important for you to mitigate it. And it's an actual task that is way difficult, right? Because you need to reach out to many stakeholders, which is complicated. Whereas when you find issues in a contract, it's mostly isolated within the contract, right? Okay, maintenance aspects. Normally, contracts are immutable. Normally, of course, if they're upgradable, that's another case, but the majority are immutable. Normally, of course, if they're upgradable, that's another case. But the majority are immutable. And on the other hand, we have languages. They basically evolve constantly. So you need to be aware about the fact that you need to keep track of both the past and the future of the language. So the mindset, of course, is way different. So what are the things that we can look for when we're doing this type of audits? One example is function identifier management. What I mean by this is that if you're like checking the dispatcher of the language you need to make sure that if for some reason within your contract you have two functions that share the same selector or identifier, the compiler must throw one error. If that's not the case, you basically will end up with a function that is going to be unreachable, right? Also, try to check if for some reason the language have something that is basically custom selectors. This feature is actually like at least for me not a good feature to have because it's quite prone for scammers to create hard to detect scams and to be honest the benefits of having this type of issues are not clear to me. Okay. Another thing, the storage layout implementation. This must be consistent, and I want to take into, like, the meeting one particular issue that we find in Stylus, which basically, like, they manage the inheritance fields different from Solidity, right? But the thing is that they were promoting, like, you could jump from a contract that is upgradable, that basically you can have an implementation, right? You switch from Solidity to a Stylus, right? So they were, like, pushing this type of things, but as I just mentioned, they manage the inherent fields different, so this is going to mess up the layout of your contracts. So there are many things to look for. Of course, we cannot go into details because this is a lightning talk, but what I did is that I create this repository, which is basically a checker. But what I did is that I create this repository, which is basically a checker with everything that I know about auditing languages, right? So I'm pretty sure that it will be valuable for you guys if you ever need to face this type of audit. So please give it a look. I'm pretty sure that there is still a lot of room for improvement. So please contribute to it, and hopefully you find value on it. So that's it. Thank you so much for your time, for your attention. Big round of applause for our speaker. If you have questions, I'd love to hear them. Check the repository, please. And thank you for doing the hard work. Questions? Any hand? Hand? Okay. Thank you. Yeah, I'm from random verification, and I'm very curious about the language auditing. Is that equivalent to compiler verification? Not actually. But in our case, we had to take many approaches because it's not only languages that were written from scratch, but also EDSL languages. So there are many types, right? So in our case, we didn't audit the compiler, for example, for Xilus and neither for Ink, because it's Rust. So we validate that for example, the macros were properly implemented, the WASM was correctly generated by the code that they've written. But we do not do a formal verification first. We actually first do the manual review and then we jump into formal verification. That's basically like the framework that we normally do. Thank you. Any other question? There is one guy over there. Let's be very quick or no we are out of time. Okay. Sorry. So please catch up with the speaker. Reach out. Thank you again. Bye-bye. Another round of applause.", + "sources_streamethId": "6736f77574749a4b8998ffb0", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731655800000, - "slot_end": 1731656400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1r4V8Ln3v53MiKcUcMCQ8Cs-LG2p8VboqrQ6RHXvL-DY", - "resources_slides": null, + "slot_start": 1731643800000, + "slot_end": 1731645600000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1sq5KPHZmGsWxfhQtVwIBL6Wm8XGy-QAB5wFPQck9lO4", + "resources_slides": "https://drive.google.com/file/d/1S9B7KM8koAEtRLuu7YlVtbMqWu9rHjKN/view", "speakers": [ - "nicolas-garcia" + "nick-almond" ] }, "vector": [ - 6, 0, 0, 0, @@ -404307,6 +403175,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -405046,12 +403915,9 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -405080,6 +403946,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -405143,7 +404010,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -405198,7 +404064,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -405256,6 +404121,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -405418,7 +404284,7 @@ 0, 0, 2, - 0, + 2, 0, 0, 0, @@ -405612,8 +404478,6 @@ 2, 0, 0, - 0, - 0, 2, 0, 0, @@ -405631,57 +404495,48 @@ }, { "session": { - "id": "how-to-coordinate-an-epistemic-revolution", - "sourceId": "DNJMER", - "title": "How to coordinate an epistemic revolution", - "description": "Amid widespread misinformation, division, and fractured consensus, we face an epistemic crisis. This talk unifies learning and governance theory, organizational design, social consensus tools, AI, and prediction markets. We will explore how DAOs and Ethereum can serve as decentralized platforms for collective intelligence and planetary-scale problem-solving, guiding us toward an epistemic revolution at this critical time.", - "track": "Coordination", + "id": "how-to-destroy-a-network-offboarding-the-mainstream", + "sourceId": "XNCFRL", + "title": "How To Destroy A Network: Offboarding The Mainstream", + "description": "Crafting Ethereum into a setting (both technically and reputationally) where The Institutions feel comfortable participating in it at scale has been the life work of hundreds of people over the last nine years. And yet, for our success, many feel that the victory has come at a cost too heavy to bear: our losing focus as to why we built the global computer in the first place. If you feel the same way, join me for a brief exploration of what would need to happen for us to cut the cord.", + "track": "Cypherpunk & Privacy", "type": "Talk", - "expertise": "Beginner", - "audience": "Research", + "expertise": "Intermediate", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Consensus", - "Quadratic Voting", - "Collective Intelligence", - "citizens", - "assembly", - "Collective Intelligence", - "Consensus", - "Quadratic Voting" + "Network State", + "Privacy", + "Anonymity", + "Digital Sovereignty", + "value", + "Anonymity", + "Digital Sovereignty", + "Network State", + "Privacy" ], "keywords": [ - "Semantic Governance", - "Identity", - "Citizens Assembly" + "Values" ], - "duration": 1524, + "duration": 1441, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "5562c301da48c32f85eb28983b8b07f898e24606054da60482c5cd878fcf3584", + "sources_youtubeId": "axoRHSVU9KU", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736f77574749a4b8998ffb0", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "673469329dbb7a90e18ab8ef", "eventId": "devcon-7", - "slot_start": 1731643800000, - "slot_end": 1731645600000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1sq5KPHZmGsWxfhQtVwIBL6Wm8XGy-QAB5wFPQck9lO4", - "resources_slides": null, + "slot_start": 1731484200000, + "slot_end": 1731486000000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1mVbPl6HPZouYDklCGe84dKqjwtSkE7VTKOYNdWU6URc", + "resources_slides": "https://drive.google.com/file/d/1suVsjiBN2otsA7oL_GggfpGKLOGBIHsJ/view", "speakers": [ - "nick-almond" + "laurence-day" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -406071,17 +404926,13 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -406432,7 +405283,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -406482,8 +405332,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -406549,6 +405401,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -406636,27 +405489,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -406799,8 +405631,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -406828,6 +405658,28 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -406992,7 +405844,6 @@ 0, 0, 0, - 2, 0, 0, 2, @@ -407004,6 +405855,11 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -407012,45 +405868,50 @@ }, { "session": { - "id": "how-to-destroy-a-network-offboarding-the-mainstream", - "sourceId": "XNCFRL", - "title": "How To Destroy A Network: Offboarding The Mainstream", - "description": "Crafting Ethereum into a setting (both technically and reputationally) where The Institutions feel comfortable participating in it at scale has been the life work of hundreds of people over the last nine years. And yet, for our success, many feel that the victory has come at a cost too heavy to bear: our losing focus as to why we built the global computer in the first place. If you feel the same way, join me for a brief exploration of what would need to happen for us to cut the cord.", - "track": "Cypherpunk & Privacy", - "type": "Talk", + "id": "how-to-do-something-to-some-state-in-some-contract", + "sourceId": "HECBJV", + "title": "How to do something to some state in some contract", + "description": "Smart contracts are changing. \r\n\r\nSo far, they needed every transaction to be public in order for nodes to agree. Zero-Knowledge came in to change things a bit: you can actually make your transaction client-side and just broadcast a proof.\r\n\r\nIn this workshop, we will use Noir and write a simple Aztec and/or Ethereum contract that allows for most of the execution and state to remain private.", + "track": "Applied Cryptography", + "type": "Workshop", "expertise": "Intermediate", - "audience": "Community", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Network State", + "DevEx", "Privacy", - "Anonymity", - "Digital Sovereignty", - "value", - "Anonymity", - "Digital Sovereignty", - "Network State", + "Decentralization", + "Cryptography", + "Mobile", + "proving", + "Cryptography", + "Decentralization", + "DevEx", "Privacy" ], "keywords": [ - "Values" + "ZKDSL", + "DevOps", + "Mobile Proving" ], - "duration": 1441, + "duration": 4706, "language": "en", - "sources_swarmHash": "5562c301da48c32f85eb28983b8b07f898e24606054da60482c5cd878fcf3584", - "sources_youtubeId": "axoRHSVU9KU", + "sources_swarmHash": "bb0f866e09f66552c94ef7529b2d57baa5498b11f4c4838e64ea3795d3696642", + "sources_youtubeId": "QD9ijtKRsWM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673469329dbb7a90e18ab8ef", + "sources_streamethId": "6738674520d1f9ac48c64e89", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731484200000, - "slot_end": 1731486000000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1mVbPl6HPZouYDklCGe84dKqjwtSkE7VTKOYNdWU6URc", - "resources_slides": null, + "slot_start": 1731638700000, + "slot_end": 1731644100000, + "slot_roomId": "classroom-d", + "resources_presentation": "https://docs.google.com/presentation/d/1V-PhhZNdNFgdu0_mbGXOQJjINihO5JLwJV7DDAJh4nc", + "resources_slides": "https://drive.google.com/file/d/1PP9P8J0c_MVSkIu5cBKpRMXpobIHJQdt/view", "speakers": [ - "laurence-day" + "jose-pedro-sousa-or-zpedro" ] }, "vector": [ @@ -407059,12 +405920,12 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -407815,6 +406676,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -407824,7 +406686,7 @@ 0, 0, 0, - 0, + 2, 0, 0, 0, @@ -407852,13 +406714,6 @@ 0, 0, 0, - 2, - 0, - 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -407910,6 +406765,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -407923,6 +406779,7 @@ 0, 2, 0, + 2, 0, 0, 0, @@ -408179,7 +407036,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -408372,12 +407228,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -408390,50 +407246,39 @@ }, { "session": { - "id": "how-to-do-something-to-some-state-in-some-contract", - "sourceId": "HECBJV", - "title": "How to do something to some state in some contract", - "description": "Smart contracts are changing. \r\n\r\nSo far, they needed every transaction to be public in order for nodes to agree. Zero-Knowledge came in to change things a bit: you can actually make your transaction client-side and just broadcast a proof.\r\n\r\nIn this workshop, we will use Noir and write a simple Aztec and/or Ethereum contract that allows for most of the execution and state to remain private.", + "id": "how-to-hallucinate-a-server", + "sourceId": "QNFTYG", + "title": "How To Hallucinate A Server", + "description": "A Hallucinated Server is a virtual server whose execution is cryptographically simulated by users, using \"multiplayer\" privacy technologies like multi-party computation or fully homomorphic encryption. Today, thanks to recent advancements in MPC and FHE, we have the technology to build the first fully Turing-complete hallucinated servers. We discuss the underlying technologies, and how we've used them to build several proof-of-concept applications.", "track": "Applied Cryptography", - "type": "Workshop", + "type": "Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "DevEx", - "Privacy", - "Decentralization", - "Cryptography", - "Mobile", - "proving", - "Cryptography", - "Decentralization", - "DevEx", - "Privacy" + "Homomorphic Encryption", + "MPC" ], "keywords": [ - "ZKDSL", - "DevOps", - "Mobile Proving" + "MPFHE", + "Hallucinated Server" ], - "duration": 4706, + "duration": 1366, "language": "en", - "sources_swarmHash": "bb0f866e09f66552c94ef7529b2d57baa5498b11f4c4838e64ea3795d3696642", - "sources_youtubeId": "QD9ijtKRsWM", + "sources_swarmHash": "3a887806e3d602aeb13383ce6beea87c101495b2a54f22bfc5f8a9bf48c4e0b5", + "sources_youtubeId": "0f9IIYvmb6M", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6738674520d1f9ac48c64e89", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "673572269dbb7a90e19a37f9", "eventId": "devcon-7", - "slot_start": 1731638700000, - "slot_end": 1731644100000, - "slot_roomId": "classroom-d", - "resources_presentation": "https://docs.google.com/presentation/d/1V-PhhZNdNFgdu0_mbGXOQJjINihO5JLwJV7DDAJh4nc", - "resources_slides": null, + "slot_start": 1731552300000, + "slot_end": 1731554100000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1wOtuuxn-pV_UdYT74yaBeuoxLyXyxkk_KW0-5GBqLJk", + "resources_slides": "https://drive.google.com/file/d/1VNKkS-0x_dF9c7qLCNhHe2Rt51k3Kwo9/view", "speakers": [ - "jose-pedro-sousa-or-zpedro" + "gubsheep" ] }, "vector": [ @@ -408620,6 +407465,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -408834,7 +407680,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -409201,8 +408046,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -409211,14 +408054,12 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -409278,6 +408119,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -409290,7 +408133,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -409302,9 +408144,7 @@ 0, 0, 0, - 2, 0, - 2, 0, 0, 0, @@ -409773,39 +408613,45 @@ }, { "session": { - "id": "how-to-hallucinate-a-server", - "sourceId": "QNFTYG", - "title": "How To Hallucinate A Server", - "description": "A Hallucinated Server is a virtual server whose execution is cryptographically simulated by users, using \"multiplayer\" privacy technologies like multi-party computation or fully homomorphic encryption. Today, thanks to recent advancements in MPC and FHE, we have the technology to build the first fully Turing-complete hallucinated servers. We discuss the underlying technologies, and how we've used them to build several proof-of-concept applications.", - "track": "Applied Cryptography", - "type": "Talk", + "id": "how-to-onboard-22-million-users-overnight-using-non-conventional-cryptography", + "sourceId": "SDPVVF", + "title": "How to onboard 22 million users overnight using non-conventional cryptography", + "description": "Since 2004, the Mexican tax administration started to issue digital identity certificates that linked government IDs to sovereign private keys. These has facilitated the electronic invoicing system that is designed around a public key infrastructure maintained by the central bank.\r\n\r\nThis infrastructure has provided with private keys to over 22 million people. We're onboarding all of those using Account Abstraction in a friendly-manner.", + "track": "Real World Ethereum", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Homomorphic Encryption", - "MPC" + "Identity", + "Cryptography", + "Account Abstraction", + "pki", + "Account Abstraction", + "Cryptography", + "Identity" ], "keywords": [ - "MPFHE", - "Hallucinated Server" + "ERC-4337", + "RSA", + "PKI" ], - "duration": 1366, + "duration": 495, "language": "en", - "sources_swarmHash": "3a887806e3d602aeb13383ce6beea87c101495b2a54f22bfc5f8a9bf48c4e0b5", - "sources_youtubeId": "0f9IIYvmb6M", + "sources_swarmHash": "27f0ff51b8cf2bd6235c4f7c336e2d44d2515212562c231633e54fcb571a19f5", + "sources_youtubeId": "DKJYpdXsOwQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673572269dbb7a90e19a37f9", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731552300000, - "slot_end": 1731554100000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1wOtuuxn-pV_UdYT74yaBeuoxLyXyxkk_KW0-5GBqLJk", - "resources_slides": null, + "slot_start": 1731479400000, + "slot_end": 1731480000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/131bdLWEGmE-yZLMUwmeE98y-D2mP5uniqwKdaak6J1c", + "resources_slides": "https://drive.google.com/file/d/1Arr2Kme_G-7vSGBTo2fcaraKJ-x-zs4k/view", "speakers": [ - "gubsheep" + "ernesto-garcia" ] }, "vector": [ @@ -409815,10 +408661,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -409924,6 +408766,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -409990,47 +408833,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -410614,6 +409416,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -410650,17 +409453,8 @@ 0, 0, 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, + 2, 0, 0, 0, @@ -410983,6 +409777,50 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -411123,7 +409961,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -411136,6 +409973,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -411145,54 +409986,54 @@ }, { "session": { - "id": "how-to-onboard-22-million-users-overnight-using-non-conventional-cryptography", - "sourceId": "SDPVVF", - "title": "How to onboard 22 million users overnight using non-conventional cryptography", - "description": "Since 2004, the Mexican tax administration started to issue digital identity certificates that linked government IDs to sovereign private keys. These has facilitated the electronic invoicing system that is designed around a public key infrastructure maintained by the central bank.\r\n\r\nThis infrastructure has provided with private keys to over 22 million people. We're onboarding all of those using Account Abstraction in a friendly-manner.", - "track": "Real World Ethereum", + "id": "how-to-raise-the-gas-limit-use-ultra-high-resolution-data", + "sourceId": "UASADN", + "title": "How to Raise the Gas Limit: Use Ultra High Resolution Data", + "description": "Recent advances in EVM data processing enable a more rigorous approach for understanding and enacting Ethereum’s scaling roadmap. In the past, discussions around whether to raise Ethereum’s gas limit have been held back by imprecise terminology and a lack of detailed quantitative evidence. The debate is often “vibes-based”. Leveraging ultra high resolution datasets enables a more scientific understanding of the gas limit, including issues like state growth, hardware bottlenecks, and gas pricing.", + "track": "Core Protocol", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Research", "featured": false, "doNotRecord": false, - "tags": [ - "Identity", - "Cryptography", - "Account Abstraction", - "pki", - "Account Abstraction", - "Cryptography", - "Identity" - ], "keywords": [ - "ERC-4337", - "RSA", - "PKI" + "Gas limit", + "State growth", + "History growth", + "Bandwidth" + ], + "tags": [ + "Layer 1", + "Gas", + "Scalability", + "bandwidth", + "Gas", + "Layer 1", + "Scalability" ], - "duration": 495, "language": "en", - "sources_swarmHash": "27f0ff51b8cf2bd6235c4f7c336e2d44d2515212562c231633e54fcb571a19f5", - "sources_youtubeId": "DKJYpdXsOwQ", + "sources_swarmHash": "f6ba379fd50f4248137d7ab71d565254d9a3693763205e40b77494654d3d3d96", + "sources_youtubeId": "v8Po5KOutow", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731479400000, - "slot_end": 1731480000000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/131bdLWEGmE-yZLMUwmeE98y-D2mP5uniqwKdaak6J1c", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "ernesto-garcia" - ] + "storm-slivkoff" + ], + "eventId": "devcon-7", + "slot_start": 1731569400000, + "slot_end": 1731570000000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1EM_PJu06t3IYa4m6iVVoVQ2AnVXrc2iJ4B-8uWHtzAE", + "resources_slides": "https://drive.google.com/file/d/1L1fZ1zoN0AySCbNzxuigHMSkXcN2f25e/view" }, "vector": [ 0, 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -411298,8 +410139,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -411588,6 +410427,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -411951,9 +410791,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -411987,9 +410827,7 @@ 0, 0, 0, - 2, 0, - 2, 0, 0, 0, @@ -412080,6 +410918,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -412166,6 +411005,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -412499,18 +411339,16 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, 0, + 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -412523,47 +411361,55 @@ }, { "session": { - "id": "how-to-raise-the-gas-limit-use-ultra-high-resolution-data", - "sourceId": "UASADN", - "title": "How to Raise the Gas Limit: Use Ultra High Resolution Data", - "description": "Recent advances in EVM data processing enable a more rigorous approach for understanding and enacting Ethereum’s scaling roadmap. In the past, discussions around whether to raise Ethereum’s gas limit have been held back by imprecise terminology and a lack of detailed quantitative evidence. The debate is often “vibes-based”. Leveraging ultra high resolution datasets enables a more scientific understanding of the gas limit, including issues like state growth, hardware bottlenecks, and gas pricing.", - "track": "Core Protocol", + "id": "how-to-steal-dollar11m-from-lending-market-in-15-minutes", + "sourceId": "TJ833L", + "title": "How to steal $1.1M from lending market in 15 minutes", + "description": "In may 2024 I found multiple bugs in lending market which allowed to steal $1.1 mln. The exploit itself was very complicated and required multiple steps, including exploitation of liquidation process of unhealthy loan which worked very similar to flash loan. \r\nI'll tell the story of how I decided to check this project source code to finding an issue, contacting with owners of platform and fixing it. I'll also share the best tips how to avoid and prevent such issues in other projects.", + "track": "Security", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Gas limit", - "State growth", - "History growth", - "Bandwidth" - ], "tags": [ - "Layer 1", - "Gas", - "Scalability", - "bandwidth", - "Gas", - "Layer 1", - "Scalability" + "Security", + "Auditing", + "Bug", + "exploits", + "Auditing", + "Bug", + "Security" ], - "language": "en", - "speakers": [ - "storm-slivkoff" + "keywords": [ + "defi", + "lending protocols", + "exploit" ], + "duration": 567, + "language": "en", + "sources_swarmHash": "6cde5ac1ecaca00d4450ae8c2ce10c9bbd1fe846da5fd23f8f6b0827c9a3ce4e", + "sources_youtubeId": "JNPu1km8Pxg", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673749fc1b0f83434d8b3f52", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673749fc1b0f83434d8b3f52.vtt", + "transcript_text": " We have... One, one. Oh, hey, everyone. Now it's working. I'm going to tell you about one of my favorite issues, and I will show you how I was theoretically able to steal $1.1 million in 15 minutes. So how the story started? A few months ago, I was browsing the interesting applications on the blockchain, and I found a very interesting lending market. They were offering 82% returns in USDC, in stables. So a crazy good deal. So there's something wrong with this? Or is the best deal ever? Or is it scam? I decided to check it. So I did what I do the best, and I started to checking the source code. Fortunately, it was open source, so I checked everything, how the platforms works, if they're really having the loans or not, and I decided in the end to verify all the loans, collaterals, debt in the system. So in the lending market, the important thing is healthy and unhealthy loan. So basically, when you take a loan, you need to give some collateral. So let's say you want to borrow $50, you need to give $100 of collateral. And whenever the debt becomes too high, let's say it grows by 20% because value of token went up, the platform needs to liquidate the loan. Because if it didn't, then there's just not enough collateral to pay for the loan. Most of the platforms usually require pretty high collateral. In this case, let's assume it was a 2 to 1 ratio. So if I want to borrow $100, I need to give $200 in collateral. Okay, so I created a simple script to verify all the loans, assets, and collaterals. Everything was fine with the platform, but I decided to go even deeper. So what I noticed is that the platform was very complex. They have very complex logic, the liquidation, the flash loans, support for very untypical loans, support for multiple assets. And from my experience as an auditor, I quickly noticed it's very easy to make a mistake, especially with liquidation process. So I already was able to see some low severity issues and I decided to dig deeper if I can achieve something more with them. And the most interesting part was liquidation of unhealthy loan because it worked like a flash loan. If you want to liquidate the loan, first you get a collateral, and then you repay the debt in the end of the transaction. So it works like the flash loan, you get token X, but you are forced to repay the token Y by the end of the transaction. So this kind of logic is very complex. It's very easy to make a mistake. So I started to search, okay, how can I abuse it to get some critical issue? And there was another very complex thing, which was linked loans. Basically, you are able to have multiple loans for a single collateral. The reason was, you know, if you want to have a loan for different things, you can have it. So you just create one account with all the collateral and then create multiple loans linked to it. And the important part was how to create a linked loan. So you could do it by creating a new empty loan with no collateral, and then you linked it to the other loan which already had collateral. But I noticed they only check if the loan has no collateral. They didn't check if the loan has any debt. So that may be an issue. But the question is, is it even possible to create a loan with only debt and no collateral? And actually, there is. You know, this liquidation process from before. I noticed that this is the only place which is taking the collateral from the loan. And then, for a moment, the collateral is lower than it should be. Of course, normally, the collateral is pretty high, so you cannot take everything, but here's where they made a mistake. They allowed to start multiple liquidations of the same loan in the same transaction. So I was able to start liquidation twice and create a loan which has, for a moment, no collateral. This transaction would fail, because then when I would like to repay $100 of debt, there wouldn't be enough debt in a smart contract to actually repay it. So the transaction would fail. But fortunately, I mean, unfortunately for them, I found a way to fix this issue. So the idea is, OK, I have too much debt, which I need to repay. So to fix it, I simply need to take, after linking this loan to another one, I simply need to take more debt. And then by the end of transaction, when I try to repay $100 of debt, it's possible. So I managed to make this transaction work. And what does it accomplish? So first of all, I am able to link a loan which has only a debt to another one. And the problem with this is they assume that this loan, which I'm linking, has no debt. So whenever I was repaying, linking the loan, the debt wasn't added to the master loan. However, when I was repaying this, repaying the debt, it was being removed from the master loan. So in the end, by doing this complex transaction, I was able to remove some debt from the system, from the master loan. So with every transaction, I was able to steal some amount of the money. So now I knew I had an exploit. I decided to code it, spend the next 30 hours, basically during the two days, to write the exploit. It was hard and unexpected because they had a lot of running issues, so it wasn't so simple to remove all collateral to be exactly zero, but they managed to do it. So when I ran the exploit, it was, of course, on a testnet, which I created by copying the main database. In 15 minutes, by doing 1,000 transactions, I was able to extract everything. And yeah, it was $1.1 million. So 15 minutes. And of course, then I contacted the and then I contacted the project and we fixed the issue so it worked well for everyone and then I put a lot of money there because the returns were crazy high so last thing my recommendations to how to avoid this, first of all don't go into such complex logic if you don't need to keep your projects as simple as possible especially if you can't test something just don't write it have a developer with experience in cyber security of course they can audit have a bounty programmed they didn't but i contacted them on Telegram and we found out the way how to cooperate on that. Write as many tests and use platforms which can help you to stop attacks if they can be detected. If you would like to talk more about this issue, I have LabHotTop with me. We have talked an hour about this because it was quite more complex than I'm showing here, then feel free to catch me. I'm easy to find. So thank you. Any questions? So we are short on time. We're going to take one question. It's the flying mic. If a user is sending ERC20 tokens to a contract which is not designed to receive them, then these tokens are lost. This cannot happen with an ERC223 token, NFT, or native currency. There are $115 million lost because of the trust problem now. Do you think about it? I don't know what to think about it. I mean, I would need more time to see this in details. I'll show you the details. Yeah, sure, sure. We can talk about it in a second. Okay, so another round of applause for Bartosz. Time is up.", "eventId": "devcon-7", - "slot_start": 1731569400000, - "slot_end": 1731570000000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1EM_PJu06t3IYa4m6iVVoVQ2AnVXrc2iJ4B-8uWHtzAE" + "slot_start": 1731657600000, + "slot_end": 1731658200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1_JwwqcHhRqpyNIOuusmiEAr-roI7bAxIOH-9iiMKSaM", + "resources_slides": "https://drive.google.com/file/d/1TX4KoKY-4zWQwhSTrUlVhu1GXmbz6LrI/view", + "speakers": [ + "bartosz-barwikowski" + ] }, "vector": [ + 6, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -413307,6 +412153,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -413325,10 +412172,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -413450,7 +412293,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -413463,6 +412305,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -413537,7 +412380,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -413588,6 +412430,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -413871,13 +412714,10 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, - 0, 2, 0, 0, @@ -413890,62 +412730,58 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "how-to-steal-dollar11m-from-lending-market-in-15-minutes", - "sourceId": "TJ833L", - "title": "How to steal $1.1M from lending market in 15 minutes", - "description": "In may 2024 I found multiple bugs in lending market which allowed to steal $1.1 mln. The exploit itself was very complicated and required multiple steps, including exploitation of liquidation process of unhealthy loan which worked very similar to flash loan. \r\nI'll tell the story of how I decided to check this project source code to finding an issue, contacting with owners of platform and fixing it. I'll also share the best tips how to avoid and prevent such issues in other projects.", - "track": "Security", - "type": "Lightning Talk", + "id": "how-web3-and-rwas-unlock-exponential-wealth-via-a-computable-economy", + "sourceId": "GFAA97", + "title": "How Web3 and RWAs Unlock Exponential Wealth via a Computable Economy.", + "description": "Keynote based on Justin Banon And Prof. Jason Potts academic paper: How Web3 enables the transition to a new computable economy and exponential growth in economic complexity, wealth, and prosperity by extending the reliability and programmability of on-chain transactions to the entire economy via RWA tokenization. Web3 is not just a new information technology, it is a new institutional technology on the scale of language, writing and code.", + "track": "Real World Ethereum", + "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Business", "featured": false, "doNotRecord": false, "tags": [ - "Security", - "Auditing", - "Bug", - "exploits", - "Auditing", - "Bug", - "Security" + "RWA", + "Economics", + "web3", + "Economics", + "RWA" ], "keywords": [ - "defi", - "lending protocols", - "exploit" + "Web3" ], - "duration": 567, + "duration": 1461, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "7973ee90af2d2e28084a7ccfb9e884a2054adff1ea6d872dfcad14f5d7a07916", + "sources_youtubeId": "Mf8KUbNrO58", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673749fc1b0f83434d8b3f52", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673749fc1b0f83434d8b3f52.vtt", - "transcript_text": " We have... One, one. Oh, hey, everyone. Now it's working. I'm going to tell you about one of my favorite issues, and I will show you how I was theoretically able to steal $1.1 million in 15 minutes. So how the story started? A few months ago, I was browsing the interesting applications on the blockchain, and I found a very interesting lending market. They were offering 82% returns in USDC, in stables. So a crazy good deal. So there's something wrong with this? Or is the best deal ever? Or is it scam? I decided to check it. So I did what I do the best, and I started to checking the source code. Fortunately, it was open source, so I checked everything, how the platforms works, if they're really having the loans or not, and I decided in the end to verify all the loans, collaterals, debt in the system. So in the lending market, the important thing is healthy and unhealthy loan. So basically, when you take a loan, you need to give some collateral. So let's say you want to borrow $50, you need to give $100 of collateral. And whenever the debt becomes too high, let's say it grows by 20% because value of token went up, the platform needs to liquidate the loan. Because if it didn't, then there's just not enough collateral to pay for the loan. Most of the platforms usually require pretty high collateral. In this case, let's assume it was a 2 to 1 ratio. So if I want to borrow $100, I need to give $200 in collateral. Okay, so I created a simple script to verify all the loans, assets, and collaterals. Everything was fine with the platform, but I decided to go even deeper. So what I noticed is that the platform was very complex. They have very complex logic, the liquidation, the flash loans, support for very untypical loans, support for multiple assets. And from my experience as an auditor, I quickly noticed it's very easy to make a mistake, especially with liquidation process. So I already was able to see some low severity issues and I decided to dig deeper if I can achieve something more with them. And the most interesting part was liquidation of unhealthy loan because it worked like a flash loan. If you want to liquidate the loan, first you get a collateral, and then you repay the debt in the end of the transaction. So it works like the flash loan, you get token X, but you are forced to repay the token Y by the end of the transaction. So this kind of logic is very complex. It's very easy to make a mistake. So I started to search, okay, how can I abuse it to get some critical issue? And there was another very complex thing, which was linked loans. Basically, you are able to have multiple loans for a single collateral. The reason was, you know, if you want to have a loan for different things, you can have it. So you just create one account with all the collateral and then create multiple loans linked to it. And the important part was how to create a linked loan. So you could do it by creating a new empty loan with no collateral, and then you linked it to the other loan which already had collateral. But I noticed they only check if the loan has no collateral. They didn't check if the loan has any debt. So that may be an issue. But the question is, is it even possible to create a loan with only debt and no collateral? And actually, there is. You know, this liquidation process from before. I noticed that this is the only place which is taking the collateral from the loan. And then, for a moment, the collateral is lower than it should be. Of course, normally, the collateral is pretty high, so you cannot take everything, but here's where they made a mistake. They allowed to start multiple liquidations of the same loan in the same transaction. So I was able to start liquidation twice and create a loan which has, for a moment, no collateral. This transaction would fail, because then when I would like to repay $100 of debt, there wouldn't be enough debt in a smart contract to actually repay it. So the transaction would fail. But fortunately, I mean, unfortunately for them, I found a way to fix this issue. So the idea is, OK, I have too much debt, which I need to repay. So to fix it, I simply need to take, after linking this loan to another one, I simply need to take more debt. And then by the end of transaction, when I try to repay $100 of debt, it's possible. So I managed to make this transaction work. And what does it accomplish? So first of all, I am able to link a loan which has only a debt to another one. And the problem with this is they assume that this loan, which I'm linking, has no debt. So whenever I was repaying, linking the loan, the debt wasn't added to the master loan. However, when I was repaying this, repaying the debt, it was being removed from the master loan. So in the end, by doing this complex transaction, I was able to remove some debt from the system, from the master loan. So with every transaction, I was able to steal some amount of the money. So now I knew I had an exploit. I decided to code it, spend the next 30 hours, basically during the two days, to write the exploit. It was hard and unexpected because they had a lot of running issues, so it wasn't so simple to remove all collateral to be exactly zero, but they managed to do it. So when I ran the exploit, it was, of course, on a testnet, which I created by copying the main database. In 15 minutes, by doing 1,000 transactions, I was able to extract everything. And yeah, it was $1.1 million. So 15 minutes. And of course, then I contacted the and then I contacted the project and we fixed the issue so it worked well for everyone and then I put a lot of money there because the returns were crazy high so last thing my recommendations to how to avoid this, first of all don't go into such complex logic if you don't need to keep your projects as simple as possible especially if you can't test something just don't write it have a developer with experience in cyber security of course they can audit have a bounty programmed they didn't but i contacted them on Telegram and we found out the way how to cooperate on that. Write as many tests and use platforms which can help you to stop attacks if they can be detected. If you would like to talk more about this issue, I have LabHotTop with me. We have talked an hour about this because it was quite more complex than I'm showing here, then feel free to catch me. I'm easy to find. So thank you. Any questions? So we are short on time. We're going to take one question. It's the flying mic. If a user is sending ERC20 tokens to a contract which is not designed to receive them, then these tokens are lost. This cannot happen with an ERC223 token, NFT, or native currency. There are $115 million lost because of the trust problem now. Do you think about it? I don't know what to think about it. I mean, I would need more time to see this in details. I'll show you the details. Yeah, sure, sure. We can talk about it in a second. Okay, so another round of applause for Bartosz. Time is up.", + "sources_streamethId": "6733353b3a168eb535cc568c", "eventId": "devcon-7", - "slot_start": 1731657600000, - "slot_end": 1731658200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1_JwwqcHhRqpyNIOuusmiEAr-roI7bAxIOH-9iiMKSaM", - "resources_slides": null, + "slot_start": 1731405600000, + "slot_end": 1731407400000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1rY0yIyNGkdtc2aIioukR3vUzIU0ERrllvWthuyIH1UU", + "resources_slides": "https://drive.google.com/file/d/1XPGaufR9IeaZorFFzBSiuM5zplJyZOVM/view", "speakers": [ - "bartosz-barwikowski" + "justin-banon", + "jason-potts" ] }, "vector": [ - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -414339,6 +413175,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -414690,7 +413527,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -414722,6 +413558,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -414840,9 +413677,17 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, - 2, 0, 0, 0, @@ -414968,18 +413813,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -415251,16 +414084,14 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, - 2, 0, 0, 0, + 2, 0, 0, 0, @@ -415275,43 +414106,47 @@ }, { "session": { - "id": "how-web3-and-rwas-unlock-exponential-wealth-via-a-computable-economy", - "sourceId": "GFAA97", - "title": "How Web3 and RWAs Unlock Exponential Wealth via a Computable Economy.", - "description": "Keynote based on Justin Banon And Prof. Jason Potts academic paper: How Web3 enables the transition to a new computable economy and exponential growth in economic complexity, wealth, and prosperity by extending the reliability and programmability of on-chain transactions to the entire economy via RWA tokenization. Web3 is not just a new information technology, it is a new institutional technology on the scale of language, writing and code.", + "id": "human-stories-of-real-world-ethereum-next-billion-fellows-ef", + "sourceId": "7SXGVX", + "title": "Human stories of real world Ethereum - Next Billion Fellows (EF)", + "description": "Next Billion Fellows work on projects that give a glimpse of what Ethereum means to everyday people. Through their lens, we can see what human coordination might look like someday. Come discuss the realworld, tangible impact of Ethereum on Fellows’ communities and explore the challenges they face along the way.", "track": "Real World Ethereum", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Business", + "type": "Workshop", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, - "tags": [ - "RWA", - "Economics", - "web3", - "Economics", - "RWA" - ], "keywords": [ - "Web3" + "real", + "world", + "usecases" + ], + "tags": [ + "Free Speech", + "Not financial", + "Public good", + "Quadratic Voting", + "Use Cases" ], - "duration": 1461, "language": "en", - "sources_swarmHash": "7973ee90af2d2e28084a7ccfb9e884a2054adff1ea6d872dfcad14f5d7a07916", - "sources_youtubeId": "Mf8KUbNrO58", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6733353b3a168eb535cc568c", - "eventId": "devcon-7", - "slot_start": 1731405600000, - "slot_end": 1731407400000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1rY0yIyNGkdtc2aIioukR3vUzIU0ERrllvWthuyIH1UU", - "resources_slides": null, "speakers": [ - "justin-banon", - "jason-potts" - ] + "team-next-billion-ef", + "david-uzochukwu", + "eddie-kago", + "guo-liu", + "mercedes-rodriguez-simon", + "valeriia-panina", + "karam-alhamad", + "tomislav-mamic", + "rebecca-mqamelo", + "lefteris-arapakis" + ], + "eventId": "devcon-7", + "slot_start": 1731486600000, + "slot_end": 1731497400000, + "slot_roomId": "breakout-2", + "resources_presentation": "https://docs.google.com/presentation/d/1cnh924lOiBxB_1BdOH0enegLlg7UzzZ8tJU5R7Qt-wI", + "resources_slides": "https://drive.google.com/file/d/1yboVz8dzE4QSNeurAq-wVHr0poDOuraH/view" }, "vector": [ 0, @@ -415394,6 +414229,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -415681,7 +414517,7 @@ 0, 0, 0, - 0, + 6, 0, 0, 0, @@ -415716,6 +414552,12 @@ 0, 6, 6, + 6, + 6, + 6, + 6, + 6, + 6, 0, 0, 0, @@ -416082,6 +414924,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -416100,19 +414943,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -416146,6 +414976,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -416175,6 +415006,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -416219,7 +415051,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -416275,6 +415106,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -416302,6 +415134,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -416443,7 +415276,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -416643,61 +415475,62 @@ 0, 0, 0, - 0, - 0, 0 ] }, { "session": { - "id": "human-stories-of-real-world-ethereum-next-billion-fellows-ef", - "sourceId": "7SXGVX", - "title": "Human stories of real world Ethereum - Next Billion Fellows (EF)", - "description": "Next Billion Fellows work on projects that give a glimpse of what Ethereum means to everyday people. Through their lens, we can see what human coordination might look like someday. Come discuss the realworld, tangible impact of Ethereum on Fellows’ communities and explore the challenges they face along the way.", - "track": "Real World Ethereum", + "id": "hunt-the-bug-save-the-chain-uncovering-bugs-in-eip-implementations", + "sourceId": "UQ8MWW", + "title": "Hunt the Bug, Save the Chain: Uncovering Bugs in EIP Implementations", + "description": "In this workshop you can find a bug in an EIP implementation on a test network!\r\n\r\nThe Ethereum Foundation Testing Team oversees cross-client execution specification testing, which is critical to avoid consensus issues at the smart-contract execution level.\r\n\r\nYou'll implement tests for a new EIP from scratch using the ethereum/execution-spec-tests framework and execute them on a local test network with a faulty client. Anyone attending has the chance to find the issue and break the network!", + "track": "Core Protocol", "type": "Workshop", - "expertise": "Beginner", - "audience": "Community", - "featured": false, + "expertise": "Intermediate", + "audience": "Engineering", + "featured": true, "doNotRecord": false, - "keywords": [ - "real", - "world", - "usecases" - ], "tags": [ - "Free Speech", - "Not financial", - "Public good", - "Quadratic Voting", - "Use Cases" + "Core Protocol", + "Security", + "Testing", + "python", + "pytest", + "specs", + "Core Protocol", + "Security", + "Testing" ], - "language": "en", - "speakers": [ - "team-next-billion-ef", - "david-uzochukwu", - "eddie-kago", - "guo-liu", - "mercedes-rodriguez-simon", - "valeriia-panina", - "karam-alhamad", - "tomislav-mamic", - "rebecca-mqamelo", - "lefteris-arapakis" + "keywords": [ + "Python", + "Pytest", + "Specs" ], + "duration": 6666, + "language": "en", + "sources_swarmHash": "d6bd3d078ed4fb9ae1b8c781897264b4738782f469f0d370dea340349c2b9587", + "sources_youtubeId": "K0pQ7bRuJOk", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731486600000, - "slot_end": 1731497400000, - "slot_roomId": "breakout-2", - "resources_presentation": "https://docs.google.com/presentation/d/1cnh924lOiBxB_1BdOH0enegLlg7UzzZ8tJU5R7Qt-wI" + "slot_start": 1731472200000, + "slot_end": 1731479400000, + "slot_roomId": "classroom-d", + "resources_presentation": "https://docs.google.com/presentation/d/117F-s4Jnf3r7cRIQqAwsYqwIGULHx4JTcdJjW64wZag", + "resources_slides": "https://drive.google.com/file/d/10lb4qgbLyZnPGyWgUhOLCvXsBWXLs8wl/view", + "speakers": [ + "mario-vega", + "danceratopz", + "dimitry-kh", + "spencer-taylor-brown" + ] }, "vector": [ 0, 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -416772,10 +415605,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -417061,67 +415890,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -417168,6 +415936,10 @@ 0, 0, 0, + 6, + 6, + 6, + 6, 0, 0, 0, @@ -417470,7 +416242,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -417504,6 +416275,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -417552,12 +416324,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -417652,8 +416418,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -417680,11 +416444,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -417760,6 +416519,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -417893,6 +416653,9 @@ 0, 0, 0, + 2, + 2, + 2, 0, 0, 0, @@ -418008,89 +416771,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ] - }, - { - "session": { - "id": "hunt-the-bug-save-the-chain-uncovering-bugs-in-eip-implementations", - "sourceId": "UQ8MWW", - "title": "Hunt the Bug, Save the Chain: Uncovering Bugs in EIP Implementations", - "description": "In this workshop you can find a bug in an EIP implementation on a test network!\r\n\r\nThe Ethereum Foundation Testing Team oversees cross-client execution specification testing, which is critical to avoid consensus issues at the smart-contract execution level.\r\n\r\nYou'll implement tests for a new EIP from scratch using the ethereum/execution-spec-tests framework and execute them on a local test network with a faulty client. Anyone attending has the chance to find the issue and break the network!", - "track": "Core Protocol", - "type": "Workshop", - "expertise": "Intermediate", - "audience": "Engineering", - "featured": true, - "doNotRecord": false, - "tags": [ - "Core Protocol", - "Security", - "Testing", - "python", - "pytest", - "specs", - "Core Protocol", - "Security", - "Testing" - ], - "keywords": [ - "Python", - "Pytest", - "Specs" - ], - "duration": 6666, - "language": "en", - "sources_swarmHash": "d6bd3d078ed4fb9ae1b8c781897264b4738782f469f0d370dea340349c2b9587", - "sources_youtubeId": "K0pQ7bRuJOk", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731472200000, - "slot_end": 1731479400000, - "slot_roomId": "classroom-d", - "resources_presentation": "https://docs.google.com/presentation/d/117F-s4Jnf3r7cRIQqAwsYqwIGULHx4JTcdJjW64wZag", - "resources_slides": null, - "speakers": [ - "mario-vega", - "danceratopz", - "dimitry-kh", - "spencer-taylor-brown" - ] - }, - "vector": [ - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -418156,9 +416836,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -418171,11 +416853,59 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "i-read-every-single-1990s-cypherpunk-email-heres-what-you-should-know", + "sourceId": "V8FHZL", + "title": "I read every single 1990s Cypherpunk email. Here's what you should know.", + "description": "What would Hal Finney, Tim May, David Chaum, and other cypherpunks think about the current state of Ethereum, cryptography, privacy, and trusted hardware? I read every single 1990s cypherpunk email (thousands) to learn more the original movement. I gathered the most interesting and relevant cypherpunk emails, and put them together to make this best-of-the-best cypherpunk presentation.", + "track": "Cypherpunk & Privacy", + "type": "Talk", + "expertise": "Beginner", + "audience": "Community", + "featured": false, + "doNotRecord": false, + "tags": [ + "Permissionless", + "Free Speech", + "Censorship Resistance", + "cypherpunk", + "Censorship Resistance", + "Free Speech", + "Permissionless" + ], + "keywords": [ + "Cypherpunk" + ], + "duration": 1437, + "language": "en", + "sources_swarmHash": "fae41435b03f33e46404a9520b791f54ab7e3b8d4c4c283b8ed9535edcef438d", + "sources_youtubeId": "4DtB96PlAtQ", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67346f1e9dbb7a90e1e41835", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "eventId": "devcon-7", + "slot_start": 1731484800000, + "slot_end": 1731486600000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1GfxZnDdh1oYJ0Cmi0EqvJ6n5WY4Rvok97rq_GW9HmJA", + "resources_slides": "https://drive.google.com/file/d/1GeYZ6UhWREC-3HLhVaL7MfPH7Xul4fQP/view", + "speakers": [ + "porter-adams" + ] + }, + "vector": [ 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -418485,10 +417215,6 @@ 0, 0, 0, - 6, - 6, - 6, - 6, 0, 0, 0, @@ -418587,6 +417313,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -418826,7 +417553,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -418845,7 +417571,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -418950,6 +417675,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -419189,6 +417915,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -419205,17 +417932,6 @@ 0, 0, 0, - 2, - 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -419313,6 +418029,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -419389,11 +418106,9 @@ 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, @@ -419406,59 +418121,11 @@ 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "i-read-every-single-1990s-cypherpunk-email-heres-what-you-should-know", - "sourceId": "V8FHZL", - "title": "I read every single 1990s Cypherpunk email. Here's what you should know.", - "description": "What would Hal Finney, Tim May, David Chaum, and other cypherpunks think about the current state of Ethereum, cryptography, privacy, and trusted hardware? I read every single 1990s cypherpunk email (thousands) to learn more the original movement. I gathered the most interesting and relevant cypherpunk emails, and put them together to make this best-of-the-best cypherpunk presentation.", - "track": "Cypherpunk & Privacy", - "type": "Talk", - "expertise": "Beginner", - "audience": "Community", - "featured": false, - "doNotRecord": false, - "tags": [ - "Permissionless", - "Free Speech", - "Censorship Resistance", - "cypherpunk", - "Censorship Resistance", - "Free Speech", - "Permissionless" - ], - "keywords": [ - "Cypherpunk" - ], - "duration": 1437, - "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "67346f1e9dbb7a90e1e41835", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731484800000, - "slot_end": 1731486600000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1GfxZnDdh1oYJ0Cmi0EqvJ6n5WY4Rvok97rq_GW9HmJA", - "resources_slides": null, - "speakers": [ - "porter-adams" - ] - }, - "vector": [ 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -419544,12 +418211,14 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -419557,6 +418226,48 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "impossibility-within-dynamically-available-protocols", + "sourceId": "SUNDNH", + "title": "Impossibility within Dynamically Available Protocols", + "description": "This talk will be about dynamically available protocols and their properties. LMD-GHOST which is the fork choice rule for Ethereum consensus currently can face ex-ante and re-org attacks. GoldFish and other protocols aim to fix this but they themselves then face problems with asynchrony resilience and subcommittees. \r\nI also want to present possible solutions to these issues and establish some impossibility results that might be useful in consensus research for path towards single slot finality.", + "track": "[CLS] EPF Day", + "type": "Lightning Talk", + "expertise": "Expert", + "audience": "Academic", + "featured": false, + "doNotRecord": false, + "tags": [ + "Consensus Mechanisms", + "Finality", + "Single-slot Finality" + ], + "keywords": [ + "Dynamic", + "Availability" + ], + "duration": 847, + "language": "en", + "sources_swarmHash": "0e4939a7f956db573c7fdbe8e3f1c28adcd318a6659903b592bd01ac09bde4a5", + "sources_youtubeId": "ZZS5hKyxqOQ", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67347eef9dbb7a90e1ae0ce5", + "eventId": "devcon-7", + "slot_start": 1731488400000, + "slot_end": 1731489300000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1_2sjOdakXbWTFCsQUBSCgpvHSd9_OwHcKRN41aiBnJc", + "resources_slides": "https://drive.google.com/file/d/1BBizpHatfuYq__EhDqW4oVpuZHF84Qey/view", + "speakers": [ + "yash-saraswat" + ] + }, + "vector": [ 0, 0, 0, @@ -419572,6 +418283,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -419867,7 +418579,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -419971,6 +418682,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -420231,7 +418943,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -420352,7 +419063,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -420472,7 +419182,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -420501,6 +419210,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -420586,20 +419296,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -420702,6 +419398,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -420769,14 +419467,12 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -420784,48 +419480,6 @@ 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "impossibility-within-dynamically-available-protocols", - "sourceId": "SUNDNH", - "title": "Impossibility within Dynamically Available Protocols", - "description": "This talk will be about dynamically available protocols and their properties. LMD-GHOST which is the fork choice rule for Ethereum consensus currently can face ex-ante and re-org attacks. GoldFish and other protocols aim to fix this but they themselves then face problems with asynchrony resilience and subcommittees. \r\nI also want to present possible solutions to these issues and establish some impossibility results that might be useful in consensus research for path towards single slot finality.", - "track": "[CLS] EPF Day", - "type": "Lightning Talk", - "expertise": "Expert", - "audience": "Academic", - "featured": false, - "doNotRecord": false, - "tags": [ - "Consensus Mechanisms", - "Finality", - "Single-slot Finality" - ], - "keywords": [ - "Dynamic", - "Availability" - ], - "duration": 847, - "language": "en", - "sources_swarmHash": "0e4939a7f956db573c7fdbe8e3f1c28adcd318a6659903b592bd01ac09bde4a5", - "sources_youtubeId": "ZZS5hKyxqOQ", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "67347eef9dbb7a90e1ae0ce5", - "eventId": "devcon-7", - "slot_start": 1731488400000, - "slot_end": 1731489300000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1_2sjOdakXbWTFCsQUBSCgpvHSd9_OwHcKRN41aiBnJc", - "resources_slides": null, - "speakers": [ - "yash-saraswat" - ] - }, - "vector": [ 0, 0, 0, @@ -420841,7 +419495,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -420927,6 +419580,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -420936,9 +419590,63 @@ 0, 0, 0, + 2, 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "improving-the-user-experience-by-user-research", + "sourceId": "ZVUFEY", + "title": "Improving the User Experience by User Research.", + "description": "This workshop will help you understand your users and their needs, motivations and problems because this is a critical stage in product development.\r\nThis will help reduce development risks and costs through improved user experience, decision validity, increased user loyalty, etc.\r\nWe will practice in-depth interviews at the workshop, analyze its results and create a Customer Journey Map.", + "track": "Usability", + "type": "Workshop", + "expertise": "Beginner", + "audience": "Product", + "featured": false, + "doNotRecord": false, + "tags": [ + "User Experience", + "Interface", + "Accessibility", + "User Research", + "adoption", + "blockchain", + "mass", + "Accessibility", + "Interface", + "User Experience", + "User Research" + ], + "keywords": [ + "Customer Journey Map", + "In-depth interviews", + "Blockchain Mass Adoption." + ], + "duration": 4254, + "language": "en", + "sources_swarmHash": "e32e0f241560e407a1450d712a6db039fb681b3956e5c8e6798b4616c6a901b3", + "sources_youtubeId": "1PpaGPkCfpY", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673457ee9dbb7a90e1199e95", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "eventId": "devcon-7", + "slot_start": 1731389400000, + "slot_end": 1731394800000, + "slot_roomId": "classroom-c", + "resources_presentation": "https://docs.google.com/presentation/d/1FKJnGwx0Fa6M46QKoFqfn0W7-iZIbFqvnLkxjd-Pct0", + "resources_slides": "https://drive.google.com/file/d/1xTs3bDUS3LfpXJ3fcPXyM1_bZT38VR6D/view", + "speakers": [ + "andrii-bondar" + ] + }, + "vector": [ 0, 0, 0, @@ -420947,6 +419655,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -421241,7 +419950,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -421354,6 +420062,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -421702,6 +420411,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -421745,6 +420455,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -421826,6 +420537,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -421841,6 +420553,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -421880,6 +420594,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -421960,23 +420675,6 @@ 0, 0, 0, - 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -422143,7 +420841,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -422153,63 +420850,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0 - ] - }, - { - "session": { - "id": "improving-the-user-experience-by-user-research", - "sourceId": "ZVUFEY", - "title": "Improving the User Experience by User Research.", - "description": "This workshop will help you understand your users and their needs, motivations and problems because this is a critical stage in product development.\r\nThis will help reduce development risks and costs through improved user experience, decision validity, increased user loyalty, etc.\r\nWe will practice in-depth interviews at the workshop, analyze its results and create a Customer Journey Map.", - "track": "Usability", - "type": "Workshop", - "expertise": "Beginner", - "audience": "Product", - "featured": false, - "doNotRecord": false, - "tags": [ - "User Experience", - "Interface", - "Accessibility", - "User Research", - "adoption", - "blockchain", - "mass", - "Accessibility", - "Interface", - "User Experience", - "User Research" - ], - "keywords": [ - "Customer Journey Map", - "In-depth interviews", - "Blockchain Mass Adoption." - ], - "duration": 4254, - "language": "en", - "sources_swarmHash": "e32e0f241560e407a1450d712a6db039fb681b3956e5c8e6798b4616c6a901b3", - "sources_youtubeId": "1PpaGPkCfpY", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673457ee9dbb7a90e1199e95", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731389400000, - "slot_end": 1731394800000, - "slot_roomId": "classroom-c", - "resources_presentation": "https://docs.google.com/presentation/d/1FKJnGwx0Fa6M46QKoFqfn0W7-iZIbFqvnLkxjd-Pct0", - "resources_slides": null, - "speakers": [ - "andrii-bondar" - ] - }, - "vector": [ 0, 0, 0, @@ -422218,7 +420858,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -422319,11 +420958,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -422332,6 +420973,33 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "inch", + "sourceId": "AWQHPU", + "title": "INCH", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", + "featured": false, + "doNotRecord": false, + "keywords": [], + "tags": [], + "language": "en", + "speakers": [], + "eventId": "devcon-7", + "slot_start": 1731580200000, + "slot_end": 1731583800000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1XIExS1_AoQ1qy7x-JA9-WtnrbRg6bJqnZ5hnpK-w-Sw", + "resources_slides": "" + }, + "vector": [ 0, 0, 0, @@ -422341,6 +421009,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -422626,7 +421295,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -422977,7 +421645,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -423021,7 +421688,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -423048,7 +421714,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -423103,7 +421768,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -423119,8 +421783,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -423160,8 +421822,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -423526,13 +422186,11 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -423541,32 +422199,6 @@ 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "inch", - "sourceId": "AWQHPU", - "title": "INCH", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [], - "eventId": "devcon-7", - "slot_start": 1731580200000, - "slot_end": 1731583800000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1XIExS1_AoQ1qy7x-JA9-WtnrbRg6bJqnZ5hnpK-w-Sw" - }, - "vector": [ 0, 0, 0, @@ -423576,7 +422208,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -423679,8 +422310,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -423693,8 +422326,56 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "inclusion-list-inevitable-tradeoffs", + "sourceId": "XEE9EG", + "title": "Inclusion List Inevitable Tradeoffs", + "description": "Inclusion lists have been a popular topic over the years, with various versions emerging, such as EIP-7547 and FOCIL. All these inclusion lists are constrained by a common trade-off: the Ethereum slot time. This talk explores the details of this trade-off and examines whether there is a \"best\" solution given these constraints.", + "track": "Cryptoeconomics", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Engineering", + "featured": false, + "doNotRecord": false, + "tags": [ + "Decentralization Improvements", + "Censorship Resistance", + "inclusivity", + "lists", + "Censorship Resistance", + "Decentralization Improvements" + ], + "keywords": [ + "inclusion", + "list" + ], + "duration": 426, + "language": "en", + "sources_swarmHash": "", + "sources_youtubeId": "GKBV62BamGo", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673473f09dbb7a90e128f649", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673473f09dbb7a90e128f649.vtt", + "transcript_text": " So this one is more again on inclusion this and it's more on the engineering side that as someone that has been focusing on inclusion this over the last year and I also started looking at fossil more and more and this is where my perspective in terms of engineering challenge with regards to inclusion is. So today, where we are, right? So today we have Ethereum slot and each slot is 12 seconds. And what are the constraints within the slot? So as a proposer, I want to propose a block. And I would hate to have my block gets reorg and that's not nice So I want to propose My block on the strongest head that's possible right and after I propose a block everyone else on the network that's running a node will verify the block and compute what is the head and As a tester their job is to attest to the head of the block So this is where the for choice follows if you use lmdGhost, and then as an aggregator, as optional, you aggregate attestations, and then the proposer essentially follows the votes for the next slot, and then it builds on top of the head. And then because of timing game, you can see there is a phenomenon that today, the attester caught up is at four seconds. So basically everything is pushed towards a four second mark. And that's kind of the equilibrium right now that everything happens within three to four seconds. And then between four to 12 seconds of nothing typically happens unless you are the next law proposer, you are listening to transactions, and you are building blood. So where does inclusion list fit in into this picture? So here I'm speaking in terms of Fossil. So Fossil is a EIP 7805 I believe, but go search it if you don't know what Fossil is. So Fossil is probably the best inclusion list design that I have seen so far that is mostly bully-proof, in my opinion. And then it has the same slot property. And then, so what Fossil does essentially is that it essentially allows secondary runs of proposals that are allowed to send their local block and then such that it can train the next slot proposer, essentially. So where does that leave us in the picture, right? So because of that, we have to essentially add this proposer in the middle of the 12 slots. That means that as a proposer for the inclusion disk, I have to essentially verify the block beforehand, such that I want to essentially propose the block beforehand such that I want to essentially propose the best inclusion disk effort, right? And then as a constraint, the next slot builder or the proposer, I have to essentially pack the inclusion disk into the block. And then if I miss inclusion disk, then I may miss my block. And also as a tester, I want to make sure that the block satisfies the inclusion this. So there are three more constraints as a builder, as a proposer, as a tester, which I cover here. So where are some parameters that we can play in terms of trade-off? So for example, how big is the size of an inclusion disk? If the inclusion disk size is so small that it may not be useful, but the inclusion disk size is too big, then you open up network for DOS concerns. What is the size of the inclusion disk committee? Because we want committee size to be reasonable, but then if the size is too big again you open up network does concern and then how much overlapping are there within the inclusion this and then what is the satisfactory rule right so as a proposal for the next slot as a tester i'm verifying the block like what like basically like how much of the inclusion this the proposal has to satisfy for the block block to be valid. So what are the concerns? So first concern, I think, is the increase of bandwidth and compute for node. Like, depends on how big your inclusion disk size is. And then, again, the second concern is that proposer, like, how much time do I have to build the block? And then, as a tester, how much time do I have to verify the block? So here are some open questions for us to study if we're interested in this inclusion disk space. How compatible it is with the future roadmap, such as peer-dos, such as EPPS? How does inclusion disk work with account abstraction? And then how can we add block transactions into the inclusion disk such that it doesn't open up those concerns? And then how can we better utilize local mempool for inclusion disk? Maybe we can just essentially send the transaction hash instead of sending the full transactions. Finally, will there be other protocol market for inclusion disk? And it's something that we need to study more. So yeah, if you're interested to contribute, hit up julian and hit up me and then yeah definitely i'm definitely very excited about this inclusion this design space thank you thank you so now we have time for a few Q&A. Please raise your hand if you have any questions. OK. No questions? OK, it seems that there's a question here. Yeah, super interesting. I had a question. So I don't know if you can answer this but um in any capacity are you thinking about doing fossil or inclusion lists for l2s like arbitrum or yeah and how is that different from wasn't the eip and what could be on mainnet right so there too today is most of the literature or all of the literature today they have just one sequencer right so the sequencer definitely have a lot of power say today if you want to force your transactions in there like if sequencer ignores you there's nothing you can do but then there's a lot of people say well you can force transaction through layer 1 but that's also not nice because you have to wait like 24 hours right but then like I think like decentralized sequencer kind of self-set if you assume an honest majority so I would say the space in terms of censorship resistance on there too it's it's definitely very different on there one because on there too you can essentially having like 1 million validators you could just have like 10 sequencers and then trust like honest majority and then as long as you assume some of them are honest, and they were, like basically, basically they have to include your transactions. Any other question? Okay, well, thank you very much for your talk. Please give some applause to Terence.", + "eventId": "devcon-7", + "slot_start": 1731489600000, + "slot_end": 1731490200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/18aJAdqUOqTUSwaSiW85kTjIKaVx1BRU7lQDigrzc_wc", + "resources_slides": "https://drive.google.com/file/d/1uLT2CwpWgLHUSedTDr-OdnBBtuXhAQ9a/view", + "speakers": [ + "terence" + ] + }, + "vector": [ 0, 0, + 6, 0, 0, 0, @@ -424034,6 +422715,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -424445,6 +423127,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -424586,6 +423269,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -424821,6 +423505,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -424882,10 +423568,8 @@ 0, 0, 0, - 2, 0, 0, - 2, 0, 0, 0, @@ -424898,56 +423582,8 @@ 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "inclusion-list-inevitable-tradeoffs", - "sourceId": "XEE9EG", - "title": "Inclusion List Inevitable Tradeoffs", - "description": "Inclusion lists have been a popular topic over the years, with various versions emerging, such as EIP-7547 and FOCIL. All these inclusion lists are constrained by a common trade-off: the Ethereum slot time. This talk explores the details of this trade-off and examines whether there is a \"best\" solution given these constraints.", - "track": "Cryptoeconomics", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "tags": [ - "Decentralization Improvements", - "Censorship Resistance", - "inclusivity", - "lists", - "Censorship Resistance", - "Decentralization Improvements" - ], - "keywords": [ - "inclusion", - "list" - ], - "duration": 426, - "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673473f09dbb7a90e128f649", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673473f09dbb7a90e128f649.vtt", - "transcript_text": " So this one is more again on inclusion this and it's more on the engineering side that as someone that has been focusing on inclusion this over the last year and I also started looking at fossil more and more and this is where my perspective in terms of engineering challenge with regards to inclusion is. So today, where we are, right? So today we have Ethereum slot and each slot is 12 seconds. And what are the constraints within the slot? So as a proposer, I want to propose a block. And I would hate to have my block gets reorg and that's not nice So I want to propose My block on the strongest head that's possible right and after I propose a block everyone else on the network that's running a node will verify the block and compute what is the head and As a tester their job is to attest to the head of the block So this is where the for choice follows if you use lmdGhost, and then as an aggregator, as optional, you aggregate attestations, and then the proposer essentially follows the votes for the next slot, and then it builds on top of the head. And then because of timing game, you can see there is a phenomenon that today, the attester caught up is at four seconds. So basically everything is pushed towards a four second mark. And that's kind of the equilibrium right now that everything happens within three to four seconds. And then between four to 12 seconds of nothing typically happens unless you are the next law proposer, you are listening to transactions, and you are building blood. So where does inclusion list fit in into this picture? So here I'm speaking in terms of Fossil. So Fossil is a EIP 7805 I believe, but go search it if you don't know what Fossil is. So Fossil is probably the best inclusion list design that I have seen so far that is mostly bully-proof, in my opinion. And then it has the same slot property. And then, so what Fossil does essentially is that it essentially allows secondary runs of proposals that are allowed to send their local block and then such that it can train the next slot proposer, essentially. So where does that leave us in the picture, right? So because of that, we have to essentially add this proposer in the middle of the 12 slots. That means that as a proposer for the inclusion disk, I have to essentially verify the block beforehand, such that I want to essentially propose the block beforehand such that I want to essentially propose the best inclusion disk effort, right? And then as a constraint, the next slot builder or the proposer, I have to essentially pack the inclusion disk into the block. And then if I miss inclusion disk, then I may miss my block. And also as a tester, I want to make sure that the block satisfies the inclusion this. So there are three more constraints as a builder, as a proposer, as a tester, which I cover here. So where are some parameters that we can play in terms of trade-off? So for example, how big is the size of an inclusion disk? If the inclusion disk size is so small that it may not be useful, but the inclusion disk size is too big, then you open up network for DOS concerns. What is the size of the inclusion disk committee? Because we want committee size to be reasonable, but then if the size is too big again you open up network does concern and then how much overlapping are there within the inclusion this and then what is the satisfactory rule right so as a proposal for the next slot as a tester i'm verifying the block like what like basically like how much of the inclusion this the proposal has to satisfy for the block block to be valid. So what are the concerns? So first concern, I think, is the increase of bandwidth and compute for node. Like, depends on how big your inclusion disk size is. And then, again, the second concern is that proposer, like, how much time do I have to build the block? And then, as a tester, how much time do I have to verify the block? So here are some open questions for us to study if we're interested in this inclusion disk space. How compatible it is with the future roadmap, such as peer-dos, such as EPPS? How does inclusion disk work with account abstraction? And then how can we add block transactions into the inclusion disk such that it doesn't open up those concerns? And then how can we better utilize local mempool for inclusion disk? Maybe we can just essentially send the transaction hash instead of sending the full transactions. Finally, will there be other protocol market for inclusion disk? And it's something that we need to study more. So yeah, if you're interested to contribute, hit up julian and hit up me and then yeah definitely i'm definitely very excited about this inclusion this design space thank you thank you so now we have time for a few Q&A. Please raise your hand if you have any questions. OK. No questions? OK, it seems that there's a question here. Yeah, super interesting. I had a question. So I don't know if you can answer this but um in any capacity are you thinking about doing fossil or inclusion lists for l2s like arbitrum or yeah and how is that different from wasn't the eip and what could be on mainnet right so there too today is most of the literature or all of the literature today they have just one sequencer right so the sequencer definitely have a lot of power say today if you want to force your transactions in there like if sequencer ignores you there's nothing you can do but then there's a lot of people say well you can force transaction through layer 1 but that's also not nice because you have to wait like 24 hours right but then like I think like decentralized sequencer kind of self-set if you assume an honest majority so I would say the space in terms of censorship resistance on there too it's it's definitely very different on there one because on there too you can essentially having like 1 million validators you could just have like 10 sequencers and then trust like honest majority and then as long as you assume some of them are honest, and they were, like basically, basically they have to include your transactions. Any other question? Okay, well, thank you very much for your talk. Please give some applause to Terence.", - "eventId": "devcon-7", - "slot_start": 1731489600000, - "slot_end": 1731490200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/18aJAdqUOqTUSwaSiW85kTjIKaVx1BRU7lQDigrzc_wc", - "resources_slides": null, - "speakers": [ - "terence" - ] - }, - "vector": [ 0, 0, - 6, 0, 0, 0, @@ -425048,7 +423684,9 @@ 0, 0, 0, + 2, 0, + 2, 0, 0, 0, @@ -425061,9 +423699,57 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "indexing-entire-24-billion-transactions-on-ethereum-in-10-hours", + "sourceId": "QEDEUG", + "title": "Indexing Entire 2.4 Billion Transactions on Ethereum in 10 Hours", + "description": "This talk covers learnings from building a general-purpose indexer which index every single transaction since genesis. There is also technical decisions when we have to deal with 7 billions records of data and how to process all of those data in less than half a day. Additionally, we will discuss the difference between batch data processing and real-time data processing, sharing best practices and strategies for both approaches.", + "track": "Developer Experience", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Engineering", + "featured": false, + "doNotRecord": false, + "tags": [ + "Architecture", + "Scalability", + "Event monitoring", + "data", + "processor", + "Architecture", + "Event monitoring", + "Scalability" + ], + "keywords": [ + "Data", + "Processing" + ], + "duration": 509, + "language": "en", + "sources_swarmHash": "acedb4b51b4007f8a151c15a8ebd2d0e0ca17cb2dfe9172b2556b692d4a55d05", + "sources_youtubeId": "MQsj9MWBz1M", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6734879d9dbb7a90e1230047", + "eventId": "devcon-7", + "slot_start": 1731492600000, + "slot_end": 1731493200000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1e7StVYyUS6PD_m8Qka4g3W8mafU8txCAZgD9XA95sSI", + "resources_slides": "https://drive.google.com/file/d/13pfCL_lp6_NyfbqNJeY_vD-0n1l-4NfP/view", + "speakers": [ + "panjamapong-panj-sermsawatsri" + ] + }, + "vector": [ 0, 0, 0, + 6, 0, 0, 0, @@ -425288,7 +423974,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -425477,6 +424162,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -425702,7 +424388,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -425844,7 +424529,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -425872,6 +424556,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -425949,6 +424634,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -426063,6 +424749,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -426081,8 +424768,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -426122,6 +424807,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -426194,6 +424880,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -426261,9 +424948,7 @@ 0, 0, 0, - 2, 0, - 2, 0, 0, 0, @@ -426276,57 +424961,9 @@ 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "indexing-entire-24-billion-transactions-on-ethereum-in-10-hours", - "sourceId": "QEDEUG", - "title": "Indexing Entire 2.4 Billion Transactions on Ethereum in 10 Hours", - "description": "This talk covers learnings from building a general-purpose indexer which index every single transaction since genesis. There is also technical decisions when we have to deal with 7 billions records of data and how to process all of those data in less than half a day. Additionally, we will discuss the difference between batch data processing and real-time data processing, sharing best practices and strategies for both approaches.", - "track": "Developer Experience", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "tags": [ - "Architecture", - "Scalability", - "Event monitoring", - "data", - "processor", - "Architecture", - "Event monitoring", - "Scalability" - ], - "keywords": [ - "Data", - "Processing" - ], - "duration": 509, - "language": "en", - "sources_swarmHash": "acedb4b51b4007f8a151c15a8ebd2d0e0ca17cb2dfe9172b2556b692d4a55d05", - "sources_youtubeId": "MQsj9MWBz1M", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6734879d9dbb7a90e1230047", - "eventId": "devcon-7", - "slot_start": 1731492600000, - "slot_end": 1731493200000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1e7StVYyUS6PD_m8Qka4g3W8mafU8txCAZgD9XA95sSI", - "resources_slides": null, - "speakers": [ - "panjamapong-panj-sermsawatsri" - ] - }, - "vector": [ 0, 0, 0, - 6, 0, 0, 0, @@ -426418,9 +425055,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -426433,9 +425072,57 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "indexing-ethereum-when-and-how-to-build-an-indexer", + "sourceId": "BGGFDD", + "title": "Indexing Ethereum: When and How to Build an Indexer", + "description": "Open source Ethereum Indexers are great for quickly getting your project off the ground. However, there are limits to these tools and in some cases building your own Indexer is the right thing to do. This talk will explore why you might want to build your own and outline a technical approach for building simple, reliable Indexers.", + "track": "Developer Experience", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", + "featured": false, + "doNotRecord": false, + "tags": [ + "Architecture", + "Developer Infrastructure", + "Best Practices", + "infrastructure", + "Architecture", + "Best Practices", + "Developer Infrastructure" + ], + "keywords": [ + "database", + "indexing", + "infrastructure" + ], + "duration": 1567, + "language": "en", + "sources_swarmHash": "0a2be92ccf1e09bf590a829255287bef39cd443f27bd890025f50ed271c35286", + "sources_youtubeId": "WgBab6kamtg", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67345e239dbb7a90e1548599", + "eventId": "devcon-7", + "slot_start": 1731481200000, + "slot_end": 1731483000000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1UA3bcjbOHIUGe57PEX-2bhr64qsal8zYSkn0UedXY0E", + "resources_slides": "https://drive.google.com/file/d/1JJdphZ73AA4Ug1x25QL7NT7047cO_Pvf/view", + "speakers": [ + "ryan-smith" + ] + }, + "vector": [ 0, 0, 0, + 6, 0, 0, 0, @@ -426740,9 +425427,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -426852,6 +425536,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -427136,7 +425821,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -427237,6 +425921,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -427244,6 +425929,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -427329,7 +426015,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -427370,6 +426055,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -427388,7 +426074,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -427461,7 +426146,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -427637,11 +426321,9 @@ 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, @@ -427654,57 +426336,9 @@ 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "indexing-ethereum-when-and-how-to-build-an-indexer", - "sourceId": "BGGFDD", - "title": "Indexing Ethereum: When and How to Build an Indexer", - "description": "Open source Ethereum Indexers are great for quickly getting your project off the ground. However, there are limits to these tools and in some cases building your own Indexer is the right thing to do. This talk will explore why you might want to build your own and outline a technical approach for building simple, reliable Indexers.", - "track": "Developer Experience", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "tags": [ - "Architecture", - "Developer Infrastructure", - "Best Practices", - "infrastructure", - "Architecture", - "Best Practices", - "Developer Infrastructure" - ], - "keywords": [ - "database", - "indexing", - "infrastructure" - ], - "duration": 1567, - "language": "en", - "sources_swarmHash": "0a2be92ccf1e09bf590a829255287bef39cd443f27bd890025f50ed271c35286", - "sources_youtubeId": "WgBab6kamtg", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "67345e239dbb7a90e1548599", - "eventId": "devcon-7", - "slot_start": 1731481200000, - "slot_end": 1731483000000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1UA3bcjbOHIUGe57PEX-2bhr64qsal8zYSkn0UedXY0E", - "resources_slides": null, - "speakers": [ - "ryan-smith" - ] - }, - "vector": [ 0, 0, 0, - 6, 0, 0, 0, @@ -427794,9 +426428,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -427809,6 +426445,44 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "indistinguishability-obfuscation-io", + "sourceId": "KDUKFD", + "title": "Indistinguishability Obfuscation (iO)", + "description": "There has been a lot of recent progress and interest in iO (Indistinguishability Obfuscation). This session will cover topics from the basics to theory and attempts at practical implementations—plus ways of breaking these attempts.", + "track": "[CLS] Programmable / Frogrammable Cryptography, by 0xPARC", + "type": "Workshop", + "expertise": "Intermediate", + "audience": "", + "featured": false, + "doNotRecord": false, + "keywords": [ + "Programmable Cryptography", + "iO" + ], + "tags": [ + "Cryptography" + ], + "language": "en", + "speakers": [ + "barry", + "tianyao-gu", + "b-l", + "janmajaya-mall" + ], + "eventId": "devcon-7", + "sources_youtubeId": "Ckh3WX00wIw", + "slot_start": 1731654900000, + "slot_end": 1731660300000, + "slot_roomId": "breakout-2", + "resources_presentation": "https://docs.google.com/presentation/d/1ezCRXGstLPjkBZnbw-GuffthHA6ChZ3jbGvDnrUxbyk", + "resources_slides": "https://drive.google.com/file/d/15VNEBz0NOjQl6A56D8PDHmEBN_uzskX6/view" + }, + "vector": [ 0, 0, 0, @@ -427823,6 +426497,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -427999,6 +426674,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -428119,7 +426795,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -428188,6 +426863,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -428225,6 +426901,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -428483,7 +427161,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -428506,7 +427183,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -428514,7 +427190,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -428569,6 +427244,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -428640,7 +427316,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -429015,11 +427690,9 @@ 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, @@ -429032,42 +427705,6 @@ 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "indistinguishability-obfuscation-io", - "sourceId": "KDUKFD", - "title": "Indistinguishability Obfuscation (iO)", - "description": "There has been a lot of recent progress and interest in iO (Indistinguishability Obfuscation). This session will cover topics from the basics to theory and attempts at practical implementations—plus ways of breaking these attempts.", - "track": "[CLS] Programmable / Frogrammable Cryptography, by 0xPARC", - "type": "Workshop", - "expertise": "Intermediate", - "audience": "", - "featured": false, - "doNotRecord": false, - "keywords": [ - "Programmable Cryptography", - "iO" - ], - "tags": [ - "Cryptography" - ], - "language": "en", - "speakers": [ - "barry", - "tianyao-gu", - "b-l", - "janmajaya-mall" - ], - "eventId": "devcon-7", - "slot_start": 1731654900000, - "slot_end": 1731660300000, - "slot_roomId": "breakout-2", - "resources_presentation": "https://docs.google.com/presentation/d/1ezCRXGstLPjkBZnbw-GuffthHA6ChZ3jbGvDnrUxbyk" - }, - "vector": [ 0, 0, 0, @@ -429082,19 +427719,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -429168,6 +427792,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -429181,12 +427806,62 @@ 0, 0, 0, + 2, 0, 0, + 0 + ] + }, + { + "session": { + "id": "insights-from-block-propagation-in-the-ethereum-p2p-network", + "sourceId": "T8GXPY", + "title": "Insights from block propagation in the Ethereum P2P network", + "description": "Libp2p’s Gossipsub protocol is one of the most critical pieces of the Ethereum protocol stack, disseminating blocks between nodes on time and ensuring that misbehaving nodes are rejected from the network. ProbeLab has studied the performance of Gossipsub in Ethereum’s P2P network, building tooling to monitor block propagations and spot abnormalities.\r\nWe revealed ample space for optimisation in the protocol, which will help define the next steps in Ethereum's roadmap. Come and hear our findings!", + "track": "Core Protocol", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Research", + "featured": false, + "doNotRecord": false, + "keywords": [ + "Block Propagation", + "Networking Protocols" + ], + "tags": [ + "Core Protocol", + "Architecture", + "Scalability", + "network", + "protocol", + "Architecture", + "Core Protocol", + "Scalability" + ], + "language": "en", + "sources_swarmHash": "83c6fc07386bf925ecb5ef2a2f580c555958de271f25db62a09fe412f3ca7b30", + "sources_youtubeId": "AH8NmuW7pw8", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "mikel-cortes-cortze" + ], + "eventId": "devcon-7", + "slot_start": 1731570600000, + "slot_end": 1731571200000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1Do39xW55yzxbDah8ClU174jW2BCWeaJUCWQ-N15sadE", + "resources_slides": "https://drive.google.com/file/d/1yKUmAmpPXM_jpuvHloD8Yk4resOZBXxN/view" + }, + "vector": [ 0, 0, 0, 0, + 6, 0, 0, 0, @@ -429260,7 +427935,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -429449,7 +428123,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -429487,8 +428160,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -429606,6 +428277,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -429832,7 +428504,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -429953,6 +428624,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -429995,6 +428667,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -430020,6 +428693,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -430071,6 +428745,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -430317,6 +428992,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -430382,7 +429058,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -430396,56 +429071,12 @@ 0, 0, 0, - 2, 0, 0, - 0 - ] - }, - { - "session": { - "id": "insights-from-block-propagation-in-the-ethereum-p2p-network", - "sourceId": "T8GXPY", - "title": "Insights from block propagation in the Ethereum P2P network", - "description": "Libp2p’s Gossipsub protocol is one of the most critical pieces of the Ethereum protocol stack, disseminating blocks between nodes on time and ensuring that misbehaving nodes are rejected from the network. ProbeLab has studied the performance of Gossipsub in Ethereum’s P2P network, building tooling to monitor block propagations and spot abnormalities.\r\nWe revealed ample space for optimisation in the protocol, which will help define the next steps in Ethereum's roadmap. Come and hear our findings!", - "track": "Core Protocol", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Research", - "featured": false, - "doNotRecord": false, - "keywords": [ - "Block Propagation", - "Networking Protocols" - ], - "tags": [ - "Core Protocol", - "Architecture", - "Scalability", - "network", - "protocol", - "Architecture", - "Core Protocol", - "Scalability" - ], - "language": "en", - "speakers": [ - "mikel-cortes-cortze" - ], - "eventId": "devcon-7", - "slot_start": 1731570600000, - "slot_end": 1731571200000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1Do39xW55yzxbDah8ClU174jW2BCWeaJUCWQ-N15sadE" - }, - "vector": [ 0, 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -430535,10 +429166,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -430550,6 +429183,55 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "interoperability-between-l2s-latest-developments-framework-and-challenges", + "sourceId": "3ZH9ST", + "title": "Interoperability between L2s: Latest developments, Framework and Challenges", + "description": "The number of L2s is growing rapidly and it’s crucial to create strong interoperability solutions to reduce liquidity fragmentation and friction for users. We provide a framework for analyzing interoperability solutions that defines 6 levels of interoperability. For each level, we deep dive the consequences on UX, DevEx, scalability, fee structures, and MEV potential. We also provide an ecosystem map categorizing the level of interoperability offered by existing projects.", + "track": "Layer 2", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Engineering", + "featured": false, + "doNotRecord": false, + "tags": [ + "Fragmentation", + "Cross-L2", + "Developer Infrastructure", + "interoperability", + "Cross-L2", + "Developer Infrastructure", + "Fragmentation" + ], + "keywords": [ + "Composability", + "Interoperability" + ], + "duration": 434, + "language": "en", + "sources_swarmHash": "f312319bdc29280d6466e892fe79207f272961439803ba5130a9043225029290", + "sources_youtubeId": "-G6oOQTb5AI", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735e5539dbb7a90e1a8b6f4", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735da889dbb7a90e132b668.vtt", + "transcript_text": " Tanya Cushman Reviewer:\" Peter van de Ven Hello. So, I'm Dave, a co-founder of Alliance to Feed the Earth in Disasters, or AllFed, also a professor at University of Canterbury in New Zealand. So AllFed's mission is to build resilience to global catastrophes, and its vision is feeding everyone no matter what. AllFed is all around the world, has board members including Robin Hanson, Andrew Sandberg, Jan Tallinn and Martin Hellman who won the Turing Award for cryptography. One example of the catastrophes we work on was there was an eruption in 1815 that caused the year without a summer in 1816. And there was famine in many parts of the world, including Europe. And there's the catastrophes we focus on disrupt food supply greater than 5% of total food production in the world. And there are many such catastrophes. One of them is abrupt climate change. Another one is extreme weather on multiple continents at the same time. And a UK government study estimated that that alone had an 80% chance of happening this century. But there are other catastrophes. There could be a super pest that attacks crops that's resistant to pesticides or a super weed that outcompetes crops. You could also have disruption of pollinators, disruption of beneficial bacteria, asteroid impact. And most extreme is nuclear winter. So if we have a large-scale nuclear war, we'd have burning of cities, smoke would go up into the stratosphere and stay there for up to a decade, and the global climate would be severely disrupted, around 9 degrees Celsius drop globally and the agricultural output would fall about 90 percent and to put this into the perspective you can see the yield the number of tons per hectare per year of global food production and if we had a severe nuclear winter, basically our yields would go back to before the Industrial Revolution. So what can we do about this? Well, we could relocate cool, loving crops closer to the equator, things like wheat and potatoes. Also, mushrooms don't require sunlight to grow. But there are a number of other options. We could scale up seaweed. Seaweed can grow around 10% per day, even in nuclear winter conditions. We could turn fiber or wood into sugar, so that's cellulistic sugar. We could take crop residues and make leaf protein concentrate from them. We could build greenhouses and we could also do fermentation. So there are several companies that are now turning natural gas into protein and also some companies turning hydrogen into protein. But these companies are not thinking about how to do it fast in a catastrophe, so that's what we focus on. We also look at catastrophe scenarios that could disrupt infrastructure, such as electricity. And these include extreme solar storm, a detonation of a nuclear weapon at high altitude, causing an electromagnetic pulse that could destroy electronics, or a cyber attack, which could be AI-enabled, or an extreme pandemic that could cause people to be unable or unwilling to report to critical industries. And then this would cascade across industries. cascade across industries. So as we've talked about today, future pandemics could be far more severe than COVID. They could have the transmissibility of measles and the fatality of rabies and have no vaccine. And we're not prepared for that. And we could have a collapse of of critical industries. We've done some research on potential backup plans for meeting basic needs of food, energy, and water in these scenarios. But there's another line of research I want to talk about. And that is, could we scale up some of the technologies that we've heard about today, like UV or in-room filtration like we have in this room. And it would be better if we could scale these ahead of the catastrophe, but we're not ready yet. And that's probably going to take more like tens of billions of dollars. So what we're interested in doing is figuring out whether we could scale them up very quickly with our current capability in a catastrophe, in an extreme pandemic. And another option is massively increasing ventilation from outside and also sequestering workers. And in terms of cost effectiveness, we think that for the resilient foods that I talked about earlier an investment of something like $100 million could get us research and piloting of the technologies and planning and we have some peer-reviewed papers making the case that this is a very cost-effective way of saving expected lives and also improving the long-run future. But in this case in particular, we don't need to build big industrial pilots. This could be done potentially for millions of dollars, so it would be extremely cost-effective. So some of the pilots, the paper factories actually have most of the equipment already that we need to turn wood into sugar, but we'd like to actually try it out and convert a paper factory into a sugar factory. Another pilot we'd like to do is a resilient satellite. So if we had an extreme solar storm or EMPs, the satellites we have now would be destroyed quickly. But if we had a satellite that was resilient to these catastrophes, we could get emergency communication. And with just one satellite in a polar orbit, it would be able to get information to everyone on Earth with just regular cell phones once per day. And here, again, the investment is just in the millions of dollars. So now I'll have Yash talk about some opportunities with crypto. Thank you, David. You might have all seen already some of the solution sets that we could have for such a scenario. But there's still more work to be done. So for the next couple of minutes, I want to explore with all of you about how we as a community can come together and build resilience to global food catastrophes. There is a lot more work to be done. Some of the things that we can do using the decentralization ethos and the defensive ethos from the crypto ecosystems could be communication of crucial information. We would want to have the capability and the capacity to distribute things like disaster resilient guides or response plans when these catastrophes occur. So one thing that we can do is we already have decentralized hosting and storage through IPFS, but we would still need mechanisms that would help us when we do not have access to internet. So we need to have such technologies to be able to communicate very crucial information that could save a lot of lives during such a scenario. We could also build software solutions to enable coordination with multiple stakeholders who would do decision-making. This need not be during a catastrophe, but this could also be done without a catastrophe, too. We can utilize all of the things that the Web3 ecosystem is working on. For example, more pluralistic decision-making and using prediction markets, as Robin Hanson was talking about today for more informed policy decisions but this time only to sort of predict the different catastrophes. We can also build, govern and launch our own community-owned resilient satellites. As David previously explained, we need satellites which can be resilient to solar storms or hams. So I want to leave this with all of you about a potential idea of a food resilience DAO so that we as a community, we can together build and govern resilience at a planetary scale. Does a DAO structure suitable for this? I'm not sure yet, but would love to allow all of your inputs. But there is so much we can do together and take control of our own resilience and defense as a community. But there are some other simpler things that we can do. For example, directly supporting the work that AllFed does through Ethereum. You can just send some ETH to AllFed.Eth. Some of the things that Alfred's working on currently is sort of mapping out the entire technology roadmaps, building a technology tree for food resilience. One of them could be research on precision fermentation. You can make fats from microorganisms, but we would need to still map out what sort of bottlenecks are there in the industries. And as David pointed out, when a catastrophe occurs, how can we actually scale up very rapidly? So we need to have that research and plans available beforehand. We can also do some really cool pilots. We can simulate a nuclear winter condition in the Australian interior. We can do a test launch of our resilient satellite as well. So if you're interested to contribute and help, my signal is up there at h.88. You can hit me up. So I want to leave you with this about potential community-owned resilience. And if you have any questions, we can now take it. I think you can go to the QR code on the side and have some questions there.", + "eventId": "devcon-7", + "slot_start": 1731579600000, + "slot_end": 1731580200000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1DgmkfIFJfD0vf-bVsGTFZt1Nv09KHD5RE7ct8x0puek", + "resources_slides": "https://drive.google.com/file/d/1sgrmEztgz8rBpKfjG-NUOOJYQPvVrRXh/view", + "speakers": [ + "marshall-vyletel-jr", + "wei-dai" + ] + }, + "vector": [ 0, 0, 0, @@ -430557,6 +429239,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -430860,7 +429543,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -430971,6 +429653,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -431209,7 +429893,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -431252,7 +429935,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -431278,7 +429960,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -431330,7 +430011,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -431343,6 +430023,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -431353,6 +430034,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -431498,6 +430180,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -431578,7 +430261,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -431686,6 +430368,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -431753,12 +430436,10 @@ 0, 0, 0, - 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -431770,55 +430451,6 @@ 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "interoperability-between-l2s-latest-developments-framework-and-challenges", - "sourceId": "3ZH9ST", - "title": "Interoperability between L2s: Latest developments, Framework and Challenges", - "description": "The number of L2s is growing rapidly and it’s crucial to create strong interoperability solutions to reduce liquidity fragmentation and friction for users. We provide a framework for analyzing interoperability solutions that defines 6 levels of interoperability. For each level, we deep dive the consequences on UX, DevEx, scalability, fee structures, and MEV potential. We also provide an ecosystem map categorizing the level of interoperability offered by existing projects.", - "track": "Layer 2", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "tags": [ - "Fragmentation", - "Cross-L2", - "Developer Infrastructure", - "interoperability", - "Cross-L2", - "Developer Infrastructure", - "Fragmentation" - ], - "keywords": [ - "Composability", - "Interoperability" - ], - "duration": 434, - "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6735e5539dbb7a90e1a8b6f4", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735da889dbb7a90e132b668.vtt", - "transcript_text": " Tanya Cushman Reviewer:\" Peter van de Ven Hello. So, I'm Dave, a co-founder of Alliance to Feed the Earth in Disasters, or AllFed, also a professor at University of Canterbury in New Zealand. So AllFed's mission is to build resilience to global catastrophes, and its vision is feeding everyone no matter what. AllFed is all around the world, has board members including Robin Hanson, Andrew Sandberg, Jan Tallinn and Martin Hellman who won the Turing Award for cryptography. One example of the catastrophes we work on was there was an eruption in 1815 that caused the year without a summer in 1816. And there was famine in many parts of the world, including Europe. And there's the catastrophes we focus on disrupt food supply greater than 5% of total food production in the world. And there are many such catastrophes. One of them is abrupt climate change. Another one is extreme weather on multiple continents at the same time. And a UK government study estimated that that alone had an 80% chance of happening this century. But there are other catastrophes. There could be a super pest that attacks crops that's resistant to pesticides or a super weed that outcompetes crops. You could also have disruption of pollinators, disruption of beneficial bacteria, asteroid impact. And most extreme is nuclear winter. So if we have a large-scale nuclear war, we'd have burning of cities, smoke would go up into the stratosphere and stay there for up to a decade, and the global climate would be severely disrupted, around 9 degrees Celsius drop globally and the agricultural output would fall about 90 percent and to put this into the perspective you can see the yield the number of tons per hectare per year of global food production and if we had a severe nuclear winter, basically our yields would go back to before the Industrial Revolution. So what can we do about this? Well, we could relocate cool, loving crops closer to the equator, things like wheat and potatoes. Also, mushrooms don't require sunlight to grow. But there are a number of other options. We could scale up seaweed. Seaweed can grow around 10% per day, even in nuclear winter conditions. We could turn fiber or wood into sugar, so that's cellulistic sugar. We could take crop residues and make leaf protein concentrate from them. We could build greenhouses and we could also do fermentation. So there are several companies that are now turning natural gas into protein and also some companies turning hydrogen into protein. But these companies are not thinking about how to do it fast in a catastrophe, so that's what we focus on. We also look at catastrophe scenarios that could disrupt infrastructure, such as electricity. And these include extreme solar storm, a detonation of a nuclear weapon at high altitude, causing an electromagnetic pulse that could destroy electronics, or a cyber attack, which could be AI-enabled, or an extreme pandemic that could cause people to be unable or unwilling to report to critical industries. And then this would cascade across industries. cascade across industries. So as we've talked about today, future pandemics could be far more severe than COVID. They could have the transmissibility of measles and the fatality of rabies and have no vaccine. And we're not prepared for that. And we could have a collapse of of critical industries. We've done some research on potential backup plans for meeting basic needs of food, energy, and water in these scenarios. But there's another line of research I want to talk about. And that is, could we scale up some of the technologies that we've heard about today, like UV or in-room filtration like we have in this room. And it would be better if we could scale these ahead of the catastrophe, but we're not ready yet. And that's probably going to take more like tens of billions of dollars. So what we're interested in doing is figuring out whether we could scale them up very quickly with our current capability in a catastrophe, in an extreme pandemic. And another option is massively increasing ventilation from outside and also sequestering workers. And in terms of cost effectiveness, we think that for the resilient foods that I talked about earlier an investment of something like $100 million could get us research and piloting of the technologies and planning and we have some peer-reviewed papers making the case that this is a very cost-effective way of saving expected lives and also improving the long-run future. But in this case in particular, we don't need to build big industrial pilots. This could be done potentially for millions of dollars, so it would be extremely cost-effective. So some of the pilots, the paper factories actually have most of the equipment already that we need to turn wood into sugar, but we'd like to actually try it out and convert a paper factory into a sugar factory. Another pilot we'd like to do is a resilient satellite. So if we had an extreme solar storm or EMPs, the satellites we have now would be destroyed quickly. But if we had a satellite that was resilient to these catastrophes, we could get emergency communication. And with just one satellite in a polar orbit, it would be able to get information to everyone on Earth with just regular cell phones once per day. And here, again, the investment is just in the millions of dollars. So now I'll have Yash talk about some opportunities with crypto. Thank you, David. You might have all seen already some of the solution sets that we could have for such a scenario. But there's still more work to be done. So for the next couple of minutes, I want to explore with all of you about how we as a community can come together and build resilience to global food catastrophes. There is a lot more work to be done. Some of the things that we can do using the decentralization ethos and the defensive ethos from the crypto ecosystems could be communication of crucial information. We would want to have the capability and the capacity to distribute things like disaster resilient guides or response plans when these catastrophes occur. So one thing that we can do is we already have decentralized hosting and storage through IPFS, but we would still need mechanisms that would help us when we do not have access to internet. So we need to have such technologies to be able to communicate very crucial information that could save a lot of lives during such a scenario. We could also build software solutions to enable coordination with multiple stakeholders who would do decision-making. This need not be during a catastrophe, but this could also be done without a catastrophe, too. We can utilize all of the things that the Web3 ecosystem is working on. For example, more pluralistic decision-making and using prediction markets, as Robin Hanson was talking about today for more informed policy decisions but this time only to sort of predict the different catastrophes. We can also build, govern and launch our own community-owned resilient satellites. As David previously explained, we need satellites which can be resilient to solar storms or hams. So I want to leave this with all of you about a potential idea of a food resilience DAO so that we as a community, we can together build and govern resilience at a planetary scale. Does a DAO structure suitable for this? I'm not sure yet, but would love to allow all of your inputs. But there is so much we can do together and take control of our own resilience and defense as a community. But there are some other simpler things that we can do. For example, directly supporting the work that AllFed does through Ethereum. You can just send some ETH to AllFed.Eth. Some of the things that Alfred's working on currently is sort of mapping out the entire technology roadmaps, building a technology tree for food resilience. One of them could be research on precision fermentation. You can make fats from microorganisms, but we would need to still map out what sort of bottlenecks are there in the industries. And as David pointed out, when a catastrophe occurs, how can we actually scale up very rapidly? So we need to have that research and plans available beforehand. We can also do some really cool pilots. We can simulate a nuclear winter condition in the Australian interior. We can do a test launch of our resilient satellite as well. So if you're interested to contribute and help, my signal is up there at h.88. You can hit me up. So I want to leave you with this about potential community-owned resilience. And if you have any questions, we can now take it. I think you can go to the QR code on the side and have some questions there.", - "eventId": "devcon-7", - "slot_start": 1731579600000, - "slot_end": 1731580200000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1DgmkfIFJfD0vf-bVsGTFZt1Nv09KHD5RE7ct8x0puek", - "resources_slides": null, - "speakers": [ - "marshall-vyletel-jr", - "wei-dai" - ] - }, - "vector": [ 0, 0, 0, @@ -431826,7 +430458,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -431910,9 +430541,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -431925,9 +430558,48 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "interpreting-solidity", + "sourceId": "GQAEZX", + "title": "Interpreting Solidity", + "description": "In this talk, we present an alternative way of executing Solidity: interpreting it.\r\nFoundry popularized writing more in Solidity, including tests and scripts. However, the compilation model is limiting for some use cases, such as interactive environments or general purpose scripting. We first describe how interpreting can solve many of these limitations, then, we explain how to build such an interpreter, finally, we present a Solidity REPL that we built using this approach: https://eclair.so", + "track": "Developer Experience", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Developper", + "featured": false, + "doNotRecord": true, + "keywords": [ + "NA" + ], + "tags": [ + "Developer Infrastructure", + "Tooling", + "Languages", + "Developer Infrastructure", + "Languages", + "Tooling" + ], + "language": "en", + "speakers": [ + "daniel-perez" + ], + "eventId": "devcon-7", + "slot_start": 1731578400000, + "slot_end": 1731580200000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1YKUtPFBeb26s1YkKpnXAOT5YJuWFJaIAKmQLoipb0oM", + "resources_slides": "https://drive.google.com/file/d/1kq4F6OS57K5O0wbORU7X9OkHbULYHCFB/view" + }, + "vector": [ 0, 0, 0, + 6, 0, 0, 0, @@ -432241,8 +430913,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -432349,6 +431019,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -432613,7 +431284,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -432624,7 +431294,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -432696,6 +431365,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -432728,6 +431398,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -432959,7 +431630,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -433133,11 +431803,9 @@ 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, @@ -433150,47 +431818,9 @@ 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "interpreting-solidity", - "sourceId": "GQAEZX", - "title": "Interpreting Solidity", - "description": "In this talk, we present an alternative way of executing Solidity: interpreting it.\r\nFoundry popularized writing more in Solidity, including tests and scripts. However, the compilation model is limiting for some use cases, such as interactive environments or general purpose scripting. We first describe how interpreting can solve many of these limitations, then, we explain how to build such an interpreter, finally, we present a Solidity REPL that we built using this approach: https://eclair.so", - "track": "Developer Experience", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Developper", - "featured": false, - "doNotRecord": true, - "keywords": [ - "NA" - ], - "tags": [ - "Developer Infrastructure", - "Tooling", - "Languages", - "Developer Infrastructure", - "Languages", - "Tooling" - ], - "language": "en", - "speakers": [ - "daniel-perez" - ], - "eventId": "devcon-7", - "slot_start": 1731578400000, - "slot_end": 1731580200000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1YKUtPFBeb26s1YkKpnXAOT5YJuWFJaIAKmQLoipb0oM" - }, - "vector": [ 0, 0, 0, - 6, 0, 0, 0, @@ -433275,6 +431905,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -433286,10 +431917,60 @@ 0, 0, 0, + 2, 0, 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "introducing-provable-object-data", + "sourceId": "YP9HRR", + "title": "Introducing Provable Object Data", + "description": "Built on learnings from experimental projects like Zupass, Provable Object Data (POD) is a new format with open-source libraries for any app to issue verifiable data, and make ZK proofs of claims about that data. PODs allow arbitrary key/value data to be signed and distributed. Flexible proofs about PODs can be created using a highly-configurable family of General Purpose Circuits (GPCs), without app-specific circuits or trusted setup. This talk will focus on POD and GPC motivation and design.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Beginner", + "audience": "Developer", + "featured": false, + "doNotRecord": false, + "tags": [ + "Libraries", + "Zero-Knowledge", + "Use cases of cryptography", + "pod", + "Libraries", + "Use cases of cryptography", + "Zero-Knowledge" + ], + "keywords": [ + "Zupass", + "developers", + "POD" + ], + "duration": 1688, + "language": "en", + "sources_swarmHash": "a71f14cf60f06f7b9023ff145306f8897dfe26741a61e581bafddd95bebe1aee", + "sources_youtubeId": "nXmd0NHC59Q", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735dbd09dbb7a90e1646d41", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735dbd09dbb7a90e1646d41.vtt", + "transcript_text": " Okay, and we're live. Welcome, everybody. My name's Andrew. I'm with Xerox PARC. And I'm here to talk to you a little bit about pods. How many here have been using pods this week? I think the answer should be everyone because you got into the building somehow and that actually required one. So what are pods? So your devs con ticket is a pod. The proof of attendance to this talk that you can claim from the Q&A app that's up on the screen there right now. If you're in the room, I don't think this works remotely. That's also a pod. Some of you have been playing Frog Crypto this week. I see some frog hats out there. All those frogs are pods. A pod really can be anything. It can be a secret message. It can be your identity credentials. It could be your driver's license if we get any governments involved to actually do this it can it's cryptographic data of any sort So what is the pod framework? So pod is is a technology a framework that makes it easy for apps to issue cryptographic data and to make zk proofs about that Data, it's a data format that's optimized for fish improving It's a standard of how that data format can be sent around and things can be proven about it And it's a framework of how that data format can be sent around and things can be proven about it. And it's a framework with some developer SDKs. Check out our documentation site. I'll have a link at the end if you want to try it out. It's mostly in TypeScript, but can be used on other platforms as well. We have ports in a few other languages. So I'm hoping some of you will get some use out of it. So one last WTF is zero-knowledge proofs. How many people here have used ZK proofs before? I feel like you understand them. Okay, a few. It's kind of obscure technology. That's kind of the point of pods is to make it easier to use so you don't have to understand how the underlying math works. But in brief, a ZK proof lets you prove the validity of any private data or any computation on your private data without revealing that private data itself. And that proof is trustworthy because there's some math that basically you can only calculate if you did it validly. At Xerox PARC, we think of ZKProofs as a universal cryptographic adapter. Basically, I've got lots of different kinds of private data. By doing computations on that data in a verifiable way, I can present to somebody whatever I want that is validly proven from that data. The example in this diagram, which you'll find in our blog post, is like, what if I could calculate my own credit score from signed data I got from my bank or from the IRS? I don't need to ask a credit reporting company to gather all this stuff together. I can gather it myself, and I can make a provable statement about what my credit course score is and apply for a loan. This is part of the vision of something we call the Parknet, programmable cryptography internet, which we think is going to be much better once programmable cryptography catches on in all of these ways. ZK proofs are a big part of this, but it's only the beginning. See the other talks being given by my colleagues this week. Also, we have a whole day CLS tomorrow about programmable cryptography. But today we're going to be focused on ZK proofs and what pods let you do with them. So this is the pod ecosystem that we envision. You need issuers who are issuing this cryptographic data. They're mostly using a private key to sign it. Those takes the form of attestations, which users themselves can hold on to. They hold on to their own private data. They don't need an intermediary. At some point, some consumer asks the user, please prove something about yourself, your identity, your age, the fact that you went to DevCon, things like that. And then the consumer can generate a ZK proof and send, sorry, the user can send the ZK proof to the consumer who can verify that proof. They do need that third arrow in the diagram, which is a little bit of knowledge about who the attester is. You need at the very least to know that that attester has a public key that you should trust. There might also be things like what is the event ID that represents DevCon on a ticket, things like that. But that kind of completes the diagram. Okay. So why are we doing this? So I work with the team that builds ZooPass. You've all been using that to check into DevCon. And we believe that the best learning on this kind of technology comes from contact with reality, meaning we want real users to try this. We want to do it at scale. There are 12,000 people at DevCon this week who are stress-testing ZooPass for us. Thank you. I'm sorry if the performance has not always been as great, but it seems to be standing up. And we want to use these opportunities to onboard new users by bridging data that is not ZK-friendly into our ZK-friendly world and take advantage of people who are willing to be early adopters like the crypto community. So by bridging, what I mean is we're bringing data in the red boxes on this diagram Into the green world red in my diagrams of this talk and the next one means like non zk friendly systems Whereas green means ek friendly systems we can bridge it in we can then issue it like your devcon ticket Which is loaded from a database that isn't cryptographically signed And then you can the verifiers can get you into another system like telegram in order to join the the DEVCON chat group. All that is working today. In order to bring this in front of the most users, we do have to accept some constraints. So we're not using the most cutting edge of ZK technologies. We want everyone to be able to use it, which means we've built a mobile friendly web app. Which means everything we do has to run in a browser on a phone. Even an older phone, even on a bad network when the Wi-Fi is overloaded at the conference. So that became a bit of a mantra when I was building some of these technologies. There's a lot of cool ZK technology out there that is great, but it needs to run on a big back-end server and I don't have one of those when I'm in a browser on a phone. So we've got to use tried and true technologies. For people who are in the know, we use SIRCOM and GROSS16. You may or may not have heard of those, but that's kind of the underlying technology. They've been around for quite a few years, so they're pretty well battle tested at this point. So I want to talk a little bit about the systems we built along the way. So this is what ZooPass ticketing looked like a year ago at DevConnect when we were in Istanbul. So it's the same triangle that you've seen here. We were pulling data out of Precix and issuing the tickets. We used a format called an EDDSA ticket. That's a signed piece of data, but it's not a pod, which I'll explain a little bit later. And then we had a proving circuit where you could prove your ticket, you could reveal some fields, you can prove that you owned it, etc. So what did it take us to build this? Don't pay attention to all the details here, but look at the line counts on these PRs when we wrote these things. It's pretty large. That's quite a few lines of code that it took. And in just the ZK proof, there's about 15,000 lines of code that are still there, not including tests and documentation. So it's kind of complicated. So that was the first thing we built. The second thing we built was Frog Crypto, the first version last year, which used a very similar data format. So frogs were issued by the server as what was called an EDDSA frog, very similar format to tickets, and then you could make a proof about it, you could present it to our Zucat telegram bot who would let you into the secret frog holders chat. This all happened last year in Istanbul. So what did it take to build that? It turns out it was very similar. There was a lot of duplication of effort. There was a lot of similar patterns, but you couldn't actually reuse the underlying data. So there clearly is a pattern here, right? We want to issue some signed data. We want someone to request a proof and then to be given a proof of that signed data, we want someone to request a proof and then to be given a proof of that signed data, but it turned out that each time we had to build it, we had to rewrite a whole bunch of code in order to customize it. So I'm an engineer, I don't like this kind of complexity, I'd rather do things once because I'm lazy. So why is this so hard? So the signed data part, the EDSA PCD that we were using as our underlying data format. Used as a fixed size hash, it hashes 16 numbers in an array. And therefore, every new data type that we wanted to put in there, we had to do some custom coding to decide how those numbers in that array go together to make this data type. I would analogize this to imagine you were processing all your data in a hex editor directly as bytes. It's kind of inconvenient. We have better tools than that now. And on the proof side, ZK circuits are a little bit awkward to program. Like they don't use a normal programming model. You don't write it in a language you're used to. Every variable is what's called a field element. This is a mathematical concept. It's a very big number, modulo some big prime number, and you've got to like write equations on those field elements. So it's kind of complicated. And also, once you build a ZK circuit, it's very fixed. In order for the prover and verifier to agree on what's valid, the circuit can't change very much. You have to publish a whole new circuit. So that makes this a bit hard. I would analogize this, again, to in the hardware world, this is like an ASIC. It's a chip that does one thing. It might do it very well, but it still only does one thing, and every time you want to do another thing, you've got to build a whole new chip. It's kind of inconvenient. So what do we need here? Well, what we'd really like to have is what's called a ZKVM. Basically, if you have an ASIC and you want something more general, out there that lets you basically write code, run it inside of a ZK circuit and validate that this is the correct output. It's great. Some other people are giving talks about it this week. But unfortunately for our situation, it's a little bit too much. Like I said, my mantra has to work in a browser on a phone. ZK VMs are pretty big right now. You're not going to be able to do that on an older phone in a few seconds. So we have to do something a little bit more limited than that. But again, I'm an engineer, I like working within constraints and coming up with clever solutions. So here's what we came up with. So on the data side, I'm finally gonna explain to you what a pod is at some level. So a pod is just a collection of names and values. Think of it like a JSON object, except that it's flat. There's no hierarchy of nested objects, just names and values. It can have multiple data types in it for those values. The data is then cryptographically signed in a way that makes it easy to make proofs about it. And I'm going to get into more of that a little bit later. Also, I forgot to mention this at the beginning. We are having a deep dive session after this intro session. So stick around for that if you want lots more detail. But I'll give you what I can in the next 15 minutes. On the proof side, we also can generalize. So we have what we call a general purpose circuit, which means rather than having a fully CPU-like circuit in a ZKVM or having the ASIC fixed circuit, we can do something in between. I would analogize it more to an FPGA. We've got some logic blocks. We call them modules. You can feed in some inputs to your circuit in order to decide how those logic blocks are connected to each other and make a different proof every time using the same circuit. We call this framework GPC for general purpose circuit. And in addition to the circuits individually being configurable, we precompile a set of circuits in what we call a family at different sizes with different sets of modules. So when you want to make a proof, you can pick the circuit in the family that has enough modules for what you want and not any more because having a bigger circuit means more time to prove, more memory, etc. So you can make the right trade-offs there. So with that, we get the generalized version of the ZK ecosystem where every issuer is issuing pods. They might contain very different kinds of data. It might be a frog, it might be a driver's license, but it's still a pod. And then when you make proofs about it, you can freely decide what you want to prove and write a configuration to represent that proof. So with that in mind, at this point, what is a pod? So a pod is a data format that makes zkproofs easy. It's a key value store. It's going to be hashed and signed in a very specific way involving a Merkle tree, which I can explain more of later. And it's optimized for efficiency zkproving. Here's an example of a pod. So we've got some names and values. Most of these are very straightforward, so I'm not going to go through them all in detail. The one that's maybe a little bit interesting is the cardholder. So this is meant to look like a driver's license in some fictional country. The cardholder is my semaphore ID. This is what Zupass uses to identify you. It's really a public-private key pair. So the public key is what's going to go in the pod to say that this is my pod, or in this case, this is my driver's license. What you see on the right is the JSON format for this. It's optimized to be a little bit terse and also human readable. So things that don't need a type annotation, you'll notice don't have them because the JSON type itself is enough data for that. Once you get down to actually building the Merkle tree, like everything does have a type, but in this table I call them type hints because the type is not part of the cryptographic data. Instead, it is guidance to how do I hash this data into a piece of cryptographically verifiable data. More on that later. So the first thing I do to make this into a pod is I build a merkle tree i'm not going to go into detail on that but basically you arrange the elements into a tree you hash them all together until you get to a root and that root is what we call a content id the content id is derived from the data so if you have the same data you can derive the same content id regardless of how it was formatted in json one detail that you might notice on the right is that the names have been alphabetized. That's how we make sure that it is deterministic and you always get the same content ID. But everything else is just hashing. And then now once I've got the content ID, that's the thing that I sign. So if I'm an issuer and I want to issue a pod, first I get the data, I Merkle-ize it, I get a content ID, and then I just write a signature on that content ID, and that's enough to validate that the entire pod is valid. So we have a ZK-friendly data format. We'd probably like to do some ZK proving on it. So let's talk about the GPC side of this that is what lets you do that. As I mentioned earlier, GPCs are circuits made of reusable modules, as well as a family of multiple circuits so you can pick the size that you want. Let's look at what that looks like. So this is an example of a GPC configuration. This is how you say, what do I want to prove? And you're gonna present this as this JSON object that says what you wanna prove, and the system is going to do the rest compiling this down to what to do with the circuit so here's a very minimal proof i'm going to try and prove that i have a driver's license that says i'm allowed to drive right so i my configuration says i have a pod i'm going to call it id card this is actually an arbitrary name that's just part of the configuration to refer to it later it has some entries and one of those entries is driver. That is not an arbitrary name. That's a name that was in the pod and is going to be hashed and checked. And what do I want to do with it? Well, I want to reveal it. So is reveal is true means this is a proof. It's going to prove that I have a pod, that it contains this entry, and it's going to reveal that its value is hopefully true because I detail that wasn't on the previous slide. That's because it's done by default, so I didn't need to include it in the config, but it's important to talk about. What I proved if I don't have, think about the signup key, is I just proved that I have a pod containing the word driver with the value true. That doesn't mean it's actually a driver's license. In order to do that, you've got to do something cryptographic. So the easiest way to do that is you check that the pod was signed by a public key that is well known. That might be the government of California, which is where I live. Hopefully we'll get them to issue pods eventually. But that is implicit. The signing key is also always revealed by default, but you can choose to not reveal it if you want to, in which case you can constrain it in other ways you might constrain it to be equal to some other element without actually revealing it or constrain it to be a member of a list like maybe i have a list of all the signing keys of the 50 u.s states and i just want to prove i have a driver's license from one of them i don't want to tell you which one okay let's get straight and get a little bit more complicated um so i've proven that i have a driver's license that says driver equals true. I haven't actually proven that it's my driver's license yet. I could have stolen somebody else's. The thing is that pods, because they're just data, they are transferable. I can copy them. The way we make a pod bound to a single user is by putting that user's public key in it, which I showed earlier when we were looking at the entries. And the way you prove that you are that user is you make a proof that you hold the private key that corresponds to that public key. And the way you say that in the gpcconfig is this is owner ID field. You say is owner ID, and I give the type of public key I'm using, which is semaphore version 4 from our friends at PSE. And that basically means that this proof is going to be configured to check that I have the right private key in my private inputs. And in this case, it's not even going to reveal what my public key is, just that I own this pod and this pod says I can drive. Okay, let's get to a little bit more ZK and hiding some more data. Instead of proving that I'm a driver, what if I just want to prove I'm over 21? Maybe I want to go buy some alcohol. I don't know what the age is in Thailand, but back home it's 21. So I can just say I have a pod containing an entry called date of birth. That entry is not going to be revealed, but it's going to be in this range, and that's the numeric range for the date that is 21 years ago. We should make this more friendly and let you just pass in a date object, but for now it's a number. So this is a proof that I am over 21 and that I own this pod. I didn't take out that field, but everything else is not revealed and I'm being very anonymous. One last example, we can make proofs of multiple pods at once if we have a circuit with enough modules. So here's one that I'm proving I'm over 21 and also proving that I have a ticket to an event that maybe I'm going to go to an after party after DevCon. And in this case, the ticket, I'm proving that its attendee name is the same as the name in my driver's license. I'm proving that I own it and I'm also proving that the event ID of that ticket is in a valid list. I'm not revealing what I have a ticket to, but it's maybe a list of like DevCon related events that are happening in Thailand this week. So this is kind of a minimal anonymous way of checking into a party. Of course, if I'm there in person, I'm revealing some more about myself by being there, but you get the idea. Okay. So last piece of this, I've now configured my proof. I've decided what I want to prove How do I actually make a proof and all of this is an example of what you can do with the the GPC libraries So the three things I need in order to make a proof one of them is the proof config that I've already given you some examples of The second thing is the inputs. That's the actual pods Which I need to have in order to make proofs about them There are also other inputs like my private key or like that list of valid event IDs that I want to prove that my event ID is one of. Those are all inputs. The third thing I have to feed in is something called an artifact path. That is, where do we find the binaries that know how to generate this circuit? So when a ZK circuit is compiled, it generates a proving key, a verification key, and also a witness generator. Don't worry about what those are, but there's some like big binary things that the prover and verifier have to agree with. We distribute these via NPM. We also put them on various CDNs. You can download them. So you have to just decide for your app. Are you going to download them, put them on disk, give a path to them? Are you going to download them from a URL, there are options. Once you've got these things together, the gpc proof function will generate the proof. It puts together that configuration, it picks a circuit that fits that configuration with enough modules, it downloads the corresponding artifacts for that circuit, and it generates the proof. And then the last thing it does, oh, I should have gone to the next slide, here we go. So it needs to compile down all those inputs into circuit signals that can feed into the actual ZK circuit, which are mathematical field elements, as I mentioned. And then after it's done and it gets a valid proof, it will decompile some of the outputs and turn them into what's called the revealed claims object. So it comes out of a proof. You've got the actual claims object. So it comes out of a proof. You've got the actual mathematical proof. That's just opaque numbers that are needed by the verifier. That's the actual ZK part. You've got a bound config, which is exactly like the configuration that you fed in, except that now it contains the identifier of the circuit that was selected so that the verifier knows how to verify it correctly. And then you've got the revealed claims. If I revealed that I am a licensed driver, driver equals true, that would be in this object. If I revealed my name, et cetera, that would be here. And that's what the decompiling is for. It's taking the circuit outputs and turning them back into a string or whatever the representative thing is. Okay, so those three things are exactly what I should send to a verifier, whoever I'm gonna prove this to. They need those three things. They also need an artifact path to download the corresponding verification key. And then they can verify the proof. They just do very much the same thing. They're going to compile some of those inputs back down into ZK land where there are circuit signals. They're going to verify the proof and they're going to say yes or no, whether it's valid. And, you know, gravy, we're at the end and hopefully everything went right and I've proven what I wanted to prove to you. So final takeaways, summary of what this was a bit of a speed run through. So pods are data that's designed to be proven about. Any pod is a signed attestation of something, whether it's I have a ticket, whether it's I have a driver's license, etc. GPCs allow you to flexibly make proofs about those pods by using modular circuits, which can be configured using a JSON-like configuration language. And the system will auto-select the circuit that you need depending on your configuration. So all your app needs to do is say, please make me a proof of this with these inputs and everything else is handled for you. Then the last step is the verifier verifies the proof, and then the apps do have to decide what things they trust. How do you trust that this is the correct proof? Like I alluded to before, you should check that this ID card was actually signed by the government. You should know the public key or you should know the event ID for DevCon. You should also check, and I'll say a little more about this in the deep dive, that the configuration that was sent to you was actually the configuration you asked for. So you don't want the prover to say, oh, I have a proof of something, but not necessarily the thing you asked for. That's something that you should check as well. But once you do all of that, this end-to-end should be very solid and you should be getting the information you need. Okay. That's it for the speedrun intro. Please check out our documentation. They're on pod.org that just went live yesterday. And also there's a link that just went by, t.me slash zoo pass to join the telegram group. And yeah, let's go do some Q&A. All right. Where do you store the sound for identity secret for users in Zupass? So that's all client side. Zupass stores all of your private data client side. The Zupass server is aware of your public key because that's how it can make sure that you get issued the right Devcon tickets and things like that But yeah, zoo pass is a client-side cryptographic data manager To what extent is pod an open data standard, so I consider it open we haven't like published a spec for it I should work on that but all of our code is open source, so people can do interoperability with it. The pod format itself is very generic and interoperable. It's the GPC compiler that turns a pod into the specifics of what you need to prove with a specific GPC. So the GPCs are kind of less standard and generic, though they also could be used on multiple platforms. We do have an example of GPC verification on chain that just started working a couple days ago, so all that is possible outside of a browser, but we don't have as many examples there as we do on the pod data. Can we scroll down? Is there anything more? Can you compare pod to verifiable credential? Yes. This is something I looked into. Pod is simpler. It doesn't really have a fixed schema or anything that ties it into a specific standard. You could put JSON-LD-based verifiable credential data in a pod if you wanted to. But a pod is much more flexible. At the cryptographic level, there is a difference in the kind of Merkle tree we use. The pod uses the lean IMT, which is something that Semaphore created, which is much shallower because pods tend to be relatively small, as opposed to the sparse Merkle tree that is used, at least for the implementation of verifiable credentials that I'm aware of, which is the one from IDEN3. That is a much deeper Merkle tree, but it can do things like prove absence of an entry, which pods can't do. Okay. What else do we have? How frequent is pod refresh? Very frequent so far, but we're hoping to keep it much more stable after DevCon. I don't have a strong answer to that. What else? How do you convert JSON to a Merkle tree? Please stick around for the deep dive session that's coming up. I'll tell you all about that. What else? Yeah. So, the—in the example of prover and verifier, the user's device can generate the proof and that's why everything has to work in a browser on a phone. Client side proving is definitely the default in ZooPass. Not every app has to do it. These are libraries. You can call them wherever you want. There's much more difference between verifiers, whether they're doing server side verification or client side verification. That depends what your use case is and what you're protecting against Are the issued credentials signed and the proof that the crunch loops we scrolled away We do not use BSS signatures to verify partial properties, that's what we use the Merkel tree for again more details on that coming up Is it possible to make information in ZooPass portable? I think that pods do make that possible, yes, as long as it's a pod and there are APIs for getting data out of ZooPass if you want to. That's called the ZAPI, at which point you can take this to whatever platform you want. We have implementations of pods in Python, C, and Rust for various projects, so it's not too hard to do. How do apps know whether a proof from a verifier is legit? Well, the framework tells you that it is a valid proof. And it will confirm for you that this configuration and these revealed claims and this proof match up and are valid. So the prover couldn't have cheated about that. What they could cheat about is app level semantics. So if you ask for a proof of a driver's license and I sent you a proof of a frog instead, that's something that the framework can't tell you because it just says that's a valid proof. So you do have to check, is that the configure I asked for? Is the signer of this driver's license the government, etc.? But yeah, that's the kind of level of verification we got. Okay. I think that's it. Can we go back to the slides briefly? Okay. Those of you who are collecting frogs, I've got something for you if we can switch back to my slides. Oh, yeah. We'll leave that up for a minute or two. I think we've got like three minutes before the next session starts anyway. So feel free to frog away. Okay. And as I said, we're going to go straight into a deep dive session, which is going to be 90 minutes. We probably won't use the whole thing, but that's what we're scheduled for. So stick around if you want more details to answer any of those questions.", + "eventId": "devcon-7", + "slot_start": 1731569400000, + "slot_end": 1731571200000, + "slot_roomId": "classroom-b", + "resources_presentation": "https://docs.google.com/presentation/d/1M8ozawZM8Xme8xRHKoop-7XlGGAjINE02ztaxWPyaXo", + "resources_slides": "https://drive.google.com/file/d/1JillBs444Tzyk6sqezOY1tr4QT8pfkDv/view", + "speakers": [ + "andrew-twyman" + ] + }, + "vector": [ 0, 0, 0, @@ -433300,6 +431981,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -433313,6 +431995,7 @@ 0, 0, 0, + 4, 0, 0, 0, @@ -433611,7 +432294,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -433959,7 +432641,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -433992,7 +432673,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -434035,7 +432715,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -434051,6 +432730,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -434066,6 +432747,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -434426,6 +433108,71 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -434501,7 +433248,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -434513,58 +433259,89 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, 2, 0, 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, 0 ] }, { "session": { - "id": "introducing-provable-object-data", - "sourceId": "YP9HRR", - "title": "Introducing Provable Object Data", - "description": "Built on learnings from experimental projects like Zupass, Provable Object Data (POD) is a new format with open-source libraries for any app to issue verifiable data, and make ZK proofs of claims about that data. PODs allow arbitrary key/value data to be signed and distributed. Flexible proofs about PODs can be created using a highly-configurable family of General Purpose Circuits (GPCs), without app-specific circuits or trusted setup. This talk will focus on POD and GPC motivation and design.", + "id": "introduction-to-hash-based-proof-systems", + "sourceId": "EUAERD", + "title": "Introduction to hash-based proof systems", + "description": "Over the last decade, ZK has been gaining attention due to its applications in verifiable private computation and the scalability of blockchains. The development of general-purpose zkvms powered with STARK/hash-based proof systems have made writing provable applications simpler, abstracting developers from the details of ZK. In this talk, we will explain the basics of hash-based proof systems, different arithmetization schemes and how to prove computations without needing a trusted setup.", "track": "Applied Cryptography", - "type": "Talk", + "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Developer", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Libraries", - "Zero-Knowledge", - "Use cases of cryptography", - "pod", - "Libraries", - "Use cases of cryptography", - "Zero-Knowledge" - ], "keywords": [ - "Zupass", - "developers", - "POD" + "Binius", + "Reed-Solomon" + ], + "tags": [ + "Scalability", + "ZKP", + "STARK", + "reed-solomon", + "Scalability", + "STARK", + "ZKP" ], - "duration": 1688, "language": "en", - "sources_swarmHash": "a71f14cf60f06f7b9023ff145306f8897dfe26741a61e581bafddd95bebe1aee", - "sources_youtubeId": "nXmd0NHC59Q", + "sources_swarmHash": "142f2355c580bfe903ae33829f5d32180bcb83382f60682077b05a498df10096", + "sources_youtubeId": "hd8uYCXUwa4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735dbd09dbb7a90e1646d41", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735dbd09dbb7a90e1646d41.vtt", - "transcript_text": " Okay, and we're live. Welcome, everybody. My name's Andrew. I'm with Xerox PARC. And I'm here to talk to you a little bit about pods. How many here have been using pods this week? I think the answer should be everyone because you got into the building somehow and that actually required one. So what are pods? So your devs con ticket is a pod. The proof of attendance to this talk that you can claim from the Q&A app that's up on the screen there right now. If you're in the room, I don't think this works remotely. That's also a pod. Some of you have been playing Frog Crypto this week. I see some frog hats out there. All those frogs are pods. A pod really can be anything. It can be a secret message. It can be your identity credentials. It could be your driver's license if we get any governments involved to actually do this it can it's cryptographic data of any sort So what is the pod framework? So pod is is a technology a framework that makes it easy for apps to issue cryptographic data and to make zk proofs about that Data, it's a data format that's optimized for fish improving It's a standard of how that data format can be sent around and things can be proven about it And it's a framework of how that data format can be sent around and things can be proven about it. And it's a framework with some developer SDKs. Check out our documentation site. I'll have a link at the end if you want to try it out. It's mostly in TypeScript, but can be used on other platforms as well. We have ports in a few other languages. So I'm hoping some of you will get some use out of it. So one last WTF is zero-knowledge proofs. How many people here have used ZK proofs before? I feel like you understand them. Okay, a few. It's kind of obscure technology. That's kind of the point of pods is to make it easier to use so you don't have to understand how the underlying math works. But in brief, a ZK proof lets you prove the validity of any private data or any computation on your private data without revealing that private data itself. And that proof is trustworthy because there's some math that basically you can only calculate if you did it validly. At Xerox PARC, we think of ZKProofs as a universal cryptographic adapter. Basically, I've got lots of different kinds of private data. By doing computations on that data in a verifiable way, I can present to somebody whatever I want that is validly proven from that data. The example in this diagram, which you'll find in our blog post, is like, what if I could calculate my own credit score from signed data I got from my bank or from the IRS? I don't need to ask a credit reporting company to gather all this stuff together. I can gather it myself, and I can make a provable statement about what my credit course score is and apply for a loan. This is part of the vision of something we call the Parknet, programmable cryptography internet, which we think is going to be much better once programmable cryptography catches on in all of these ways. ZK proofs are a big part of this, but it's only the beginning. See the other talks being given by my colleagues this week. Also, we have a whole day CLS tomorrow about programmable cryptography. But today we're going to be focused on ZK proofs and what pods let you do with them. So this is the pod ecosystem that we envision. You need issuers who are issuing this cryptographic data. They're mostly using a private key to sign it. Those takes the form of attestations, which users themselves can hold on to. They hold on to their own private data. They don't need an intermediary. At some point, some consumer asks the user, please prove something about yourself, your identity, your age, the fact that you went to DevCon, things like that. And then the consumer can generate a ZK proof and send, sorry, the user can send the ZK proof to the consumer who can verify that proof. They do need that third arrow in the diagram, which is a little bit of knowledge about who the attester is. You need at the very least to know that that attester has a public key that you should trust. There might also be things like what is the event ID that represents DevCon on a ticket, things like that. But that kind of completes the diagram. Okay. So why are we doing this? So I work with the team that builds ZooPass. You've all been using that to check into DevCon. And we believe that the best learning on this kind of technology comes from contact with reality, meaning we want real users to try this. We want to do it at scale. There are 12,000 people at DevCon this week who are stress-testing ZooPass for us. Thank you. I'm sorry if the performance has not always been as great, but it seems to be standing up. And we want to use these opportunities to onboard new users by bridging data that is not ZK-friendly into our ZK-friendly world and take advantage of people who are willing to be early adopters like the crypto community. So by bridging, what I mean is we're bringing data in the red boxes on this diagram Into the green world red in my diagrams of this talk and the next one means like non zk friendly systems Whereas green means ek friendly systems we can bridge it in we can then issue it like your devcon ticket Which is loaded from a database that isn't cryptographically signed And then you can the verifiers can get you into another system like telegram in order to join the the DEVCON chat group. All that is working today. In order to bring this in front of the most users, we do have to accept some constraints. So we're not using the most cutting edge of ZK technologies. We want everyone to be able to use it, which means we've built a mobile friendly web app. Which means everything we do has to run in a browser on a phone. Even an older phone, even on a bad network when the Wi-Fi is overloaded at the conference. So that became a bit of a mantra when I was building some of these technologies. There's a lot of cool ZK technology out there that is great, but it needs to run on a big back-end server and I don't have one of those when I'm in a browser on a phone. So we've got to use tried and true technologies. For people who are in the know, we use SIRCOM and GROSS16. You may or may not have heard of those, but that's kind of the underlying technology. They've been around for quite a few years, so they're pretty well battle tested at this point. So I want to talk a little bit about the systems we built along the way. So this is what ZooPass ticketing looked like a year ago at DevConnect when we were in Istanbul. So it's the same triangle that you've seen here. We were pulling data out of Precix and issuing the tickets. We used a format called an EDDSA ticket. That's a signed piece of data, but it's not a pod, which I'll explain a little bit later. And then we had a proving circuit where you could prove your ticket, you could reveal some fields, you can prove that you owned it, etc. So what did it take us to build this? Don't pay attention to all the details here, but look at the line counts on these PRs when we wrote these things. It's pretty large. That's quite a few lines of code that it took. And in just the ZK proof, there's about 15,000 lines of code that are still there, not including tests and documentation. So it's kind of complicated. So that was the first thing we built. The second thing we built was Frog Crypto, the first version last year, which used a very similar data format. So frogs were issued by the server as what was called an EDDSA frog, very similar format to tickets, and then you could make a proof about it, you could present it to our Zucat telegram bot who would let you into the secret frog holders chat. This all happened last year in Istanbul. So what did it take to build that? It turns out it was very similar. There was a lot of duplication of effort. There was a lot of similar patterns, but you couldn't actually reuse the underlying data. So there clearly is a pattern here, right? We want to issue some signed data. We want someone to request a proof and then to be given a proof of that signed data, we want someone to request a proof and then to be given a proof of that signed data, but it turned out that each time we had to build it, we had to rewrite a whole bunch of code in order to customize it. So I'm an engineer, I don't like this kind of complexity, I'd rather do things once because I'm lazy. So why is this so hard? So the signed data part, the EDSA PCD that we were using as our underlying data format. Used as a fixed size hash, it hashes 16 numbers in an array. And therefore, every new data type that we wanted to put in there, we had to do some custom coding to decide how those numbers in that array go together to make this data type. I would analogize this to imagine you were processing all your data in a hex editor directly as bytes. It's kind of inconvenient. We have better tools than that now. And on the proof side, ZK circuits are a little bit awkward to program. Like they don't use a normal programming model. You don't write it in a language you're used to. Every variable is what's called a field element. This is a mathematical concept. It's a very big number, modulo some big prime number, and you've got to like write equations on those field elements. So it's kind of complicated. And also, once you build a ZK circuit, it's very fixed. In order for the prover and verifier to agree on what's valid, the circuit can't change very much. You have to publish a whole new circuit. So that makes this a bit hard. I would analogize this, again, to in the hardware world, this is like an ASIC. It's a chip that does one thing. It might do it very well, but it still only does one thing, and every time you want to do another thing, you've got to build a whole new chip. It's kind of inconvenient. So what do we need here? Well, what we'd really like to have is what's called a ZKVM. Basically, if you have an ASIC and you want something more general, out there that lets you basically write code, run it inside of a ZK circuit and validate that this is the correct output. It's great. Some other people are giving talks about it this week. But unfortunately for our situation, it's a little bit too much. Like I said, my mantra has to work in a browser on a phone. ZK VMs are pretty big right now. You're not going to be able to do that on an older phone in a few seconds. So we have to do something a little bit more limited than that. But again, I'm an engineer, I like working within constraints and coming up with clever solutions. So here's what we came up with. So on the data side, I'm finally gonna explain to you what a pod is at some level. So a pod is just a collection of names and values. Think of it like a JSON object, except that it's flat. There's no hierarchy of nested objects, just names and values. It can have multiple data types in it for those values. The data is then cryptographically signed in a way that makes it easy to make proofs about it. And I'm going to get into more of that a little bit later. Also, I forgot to mention this at the beginning. We are having a deep dive session after this intro session. So stick around for that if you want lots more detail. But I'll give you what I can in the next 15 minutes. On the proof side, we also can generalize. So we have what we call a general purpose circuit, which means rather than having a fully CPU-like circuit in a ZKVM or having the ASIC fixed circuit, we can do something in between. I would analogize it more to an FPGA. We've got some logic blocks. We call them modules. You can feed in some inputs to your circuit in order to decide how those logic blocks are connected to each other and make a different proof every time using the same circuit. We call this framework GPC for general purpose circuit. And in addition to the circuits individually being configurable, we precompile a set of circuits in what we call a family at different sizes with different sets of modules. So when you want to make a proof, you can pick the circuit in the family that has enough modules for what you want and not any more because having a bigger circuit means more time to prove, more memory, etc. So you can make the right trade-offs there. So with that, we get the generalized version of the ZK ecosystem where every issuer is issuing pods. They might contain very different kinds of data. It might be a frog, it might be a driver's license, but it's still a pod. And then when you make proofs about it, you can freely decide what you want to prove and write a configuration to represent that proof. So with that in mind, at this point, what is a pod? So a pod is a data format that makes zkproofs easy. It's a key value store. It's going to be hashed and signed in a very specific way involving a Merkle tree, which I can explain more of later. And it's optimized for efficiency zkproving. Here's an example of a pod. So we've got some names and values. Most of these are very straightforward, so I'm not going to go through them all in detail. The one that's maybe a little bit interesting is the cardholder. So this is meant to look like a driver's license in some fictional country. The cardholder is my semaphore ID. This is what Zupass uses to identify you. It's really a public-private key pair. So the public key is what's going to go in the pod to say that this is my pod, or in this case, this is my driver's license. What you see on the right is the JSON format for this. It's optimized to be a little bit terse and also human readable. So things that don't need a type annotation, you'll notice don't have them because the JSON type itself is enough data for that. Once you get down to actually building the Merkle tree, like everything does have a type, but in this table I call them type hints because the type is not part of the cryptographic data. Instead, it is guidance to how do I hash this data into a piece of cryptographically verifiable data. More on that later. So the first thing I do to make this into a pod is I build a merkle tree i'm not going to go into detail on that but basically you arrange the elements into a tree you hash them all together until you get to a root and that root is what we call a content id the content id is derived from the data so if you have the same data you can derive the same content id regardless of how it was formatted in json one detail that you might notice on the right is that the names have been alphabetized. That's how we make sure that it is deterministic and you always get the same content ID. But everything else is just hashing. And then now once I've got the content ID, that's the thing that I sign. So if I'm an issuer and I want to issue a pod, first I get the data, I Merkle-ize it, I get a content ID, and then I just write a signature on that content ID, and that's enough to validate that the entire pod is valid. So we have a ZK-friendly data format. We'd probably like to do some ZK proving on it. So let's talk about the GPC side of this that is what lets you do that. As I mentioned earlier, GPCs are circuits made of reusable modules, as well as a family of multiple circuits so you can pick the size that you want. Let's look at what that looks like. So this is an example of a GPC configuration. This is how you say, what do I want to prove? And you're gonna present this as this JSON object that says what you wanna prove, and the system is going to do the rest compiling this down to what to do with the circuit so here's a very minimal proof i'm going to try and prove that i have a driver's license that says i'm allowed to drive right so i my configuration says i have a pod i'm going to call it id card this is actually an arbitrary name that's just part of the configuration to refer to it later it has some entries and one of those entries is driver. That is not an arbitrary name. That's a name that was in the pod and is going to be hashed and checked. And what do I want to do with it? Well, I want to reveal it. So is reveal is true means this is a proof. It's going to prove that I have a pod, that it contains this entry, and it's going to reveal that its value is hopefully true because I detail that wasn't on the previous slide. That's because it's done by default, so I didn't need to include it in the config, but it's important to talk about. What I proved if I don't have, think about the signup key, is I just proved that I have a pod containing the word driver with the value true. That doesn't mean it's actually a driver's license. In order to do that, you've got to do something cryptographic. So the easiest way to do that is you check that the pod was signed by a public key that is well known. That might be the government of California, which is where I live. Hopefully we'll get them to issue pods eventually. But that is implicit. The signing key is also always revealed by default, but you can choose to not reveal it if you want to, in which case you can constrain it in other ways you might constrain it to be equal to some other element without actually revealing it or constrain it to be a member of a list like maybe i have a list of all the signing keys of the 50 u.s states and i just want to prove i have a driver's license from one of them i don't want to tell you which one okay let's get straight and get a little bit more complicated um so i've proven that i have a driver's license that says driver equals true. I haven't actually proven that it's my driver's license yet. I could have stolen somebody else's. The thing is that pods, because they're just data, they are transferable. I can copy them. The way we make a pod bound to a single user is by putting that user's public key in it, which I showed earlier when we were looking at the entries. And the way you prove that you are that user is you make a proof that you hold the private key that corresponds to that public key. And the way you say that in the gpcconfig is this is owner ID field. You say is owner ID, and I give the type of public key I'm using, which is semaphore version 4 from our friends at PSE. And that basically means that this proof is going to be configured to check that I have the right private key in my private inputs. And in this case, it's not even going to reveal what my public key is, just that I own this pod and this pod says I can drive. Okay, let's get to a little bit more ZK and hiding some more data. Instead of proving that I'm a driver, what if I just want to prove I'm over 21? Maybe I want to go buy some alcohol. I don't know what the age is in Thailand, but back home it's 21. So I can just say I have a pod containing an entry called date of birth. That entry is not going to be revealed, but it's going to be in this range, and that's the numeric range for the date that is 21 years ago. We should make this more friendly and let you just pass in a date object, but for now it's a number. So this is a proof that I am over 21 and that I own this pod. I didn't take out that field, but everything else is not revealed and I'm being very anonymous. One last example, we can make proofs of multiple pods at once if we have a circuit with enough modules. So here's one that I'm proving I'm over 21 and also proving that I have a ticket to an event that maybe I'm going to go to an after party after DevCon. And in this case, the ticket, I'm proving that its attendee name is the same as the name in my driver's license. I'm proving that I own it and I'm also proving that the event ID of that ticket is in a valid list. I'm not revealing what I have a ticket to, but it's maybe a list of like DevCon related events that are happening in Thailand this week. So this is kind of a minimal anonymous way of checking into a party. Of course, if I'm there in person, I'm revealing some more about myself by being there, but you get the idea. Okay. So last piece of this, I've now configured my proof. I've decided what I want to prove How do I actually make a proof and all of this is an example of what you can do with the the GPC libraries So the three things I need in order to make a proof one of them is the proof config that I've already given you some examples of The second thing is the inputs. That's the actual pods Which I need to have in order to make proofs about them There are also other inputs like my private key or like that list of valid event IDs that I want to prove that my event ID is one of. Those are all inputs. The third thing I have to feed in is something called an artifact path. That is, where do we find the binaries that know how to generate this circuit? So when a ZK circuit is compiled, it generates a proving key, a verification key, and also a witness generator. Don't worry about what those are, but there's some like big binary things that the prover and verifier have to agree with. We distribute these via NPM. We also put them on various CDNs. You can download them. So you have to just decide for your app. Are you going to download them, put them on disk, give a path to them? Are you going to download them from a URL, there are options. Once you've got these things together, the gpc proof function will generate the proof. It puts together that configuration, it picks a circuit that fits that configuration with enough modules, it downloads the corresponding artifacts for that circuit, and it generates the proof. And then the last thing it does, oh, I should have gone to the next slide, here we go. So it needs to compile down all those inputs into circuit signals that can feed into the actual ZK circuit, which are mathematical field elements, as I mentioned. And then after it's done and it gets a valid proof, it will decompile some of the outputs and turn them into what's called the revealed claims object. So it comes out of a proof. You've got the actual claims object. So it comes out of a proof. You've got the actual mathematical proof. That's just opaque numbers that are needed by the verifier. That's the actual ZK part. You've got a bound config, which is exactly like the configuration that you fed in, except that now it contains the identifier of the circuit that was selected so that the verifier knows how to verify it correctly. And then you've got the revealed claims. If I revealed that I am a licensed driver, driver equals true, that would be in this object. If I revealed my name, et cetera, that would be here. And that's what the decompiling is for. It's taking the circuit outputs and turning them back into a string or whatever the representative thing is. Okay, so those three things are exactly what I should send to a verifier, whoever I'm gonna prove this to. They need those three things. They also need an artifact path to download the corresponding verification key. And then they can verify the proof. They just do very much the same thing. They're going to compile some of those inputs back down into ZK land where there are circuit signals. They're going to verify the proof and they're going to say yes or no, whether it's valid. And, you know, gravy, we're at the end and hopefully everything went right and I've proven what I wanted to prove to you. So final takeaways, summary of what this was a bit of a speed run through. So pods are data that's designed to be proven about. Any pod is a signed attestation of something, whether it's I have a ticket, whether it's I have a driver's license, etc. GPCs allow you to flexibly make proofs about those pods by using modular circuits, which can be configured using a JSON-like configuration language. And the system will auto-select the circuit that you need depending on your configuration. So all your app needs to do is say, please make me a proof of this with these inputs and everything else is handled for you. Then the last step is the verifier verifies the proof, and then the apps do have to decide what things they trust. How do you trust that this is the correct proof? Like I alluded to before, you should check that this ID card was actually signed by the government. You should know the public key or you should know the event ID for DevCon. You should also check, and I'll say a little more about this in the deep dive, that the configuration that was sent to you was actually the configuration you asked for. So you don't want the prover to say, oh, I have a proof of something, but not necessarily the thing you asked for. That's something that you should check as well. But once you do all of that, this end-to-end should be very solid and you should be getting the information you need. Okay. That's it for the speedrun intro. Please check out our documentation. They're on pod.org that just went live yesterday. And also there's a link that just went by, t.me slash zoo pass to join the telegram group. And yeah, let's go do some Q&A. All right. Where do you store the sound for identity secret for users in Zupass? So that's all client side. Zupass stores all of your private data client side. The Zupass server is aware of your public key because that's how it can make sure that you get issued the right Devcon tickets and things like that But yeah, zoo pass is a client-side cryptographic data manager To what extent is pod an open data standard, so I consider it open we haven't like published a spec for it I should work on that but all of our code is open source, so people can do interoperability with it. The pod format itself is very generic and interoperable. It's the GPC compiler that turns a pod into the specifics of what you need to prove with a specific GPC. So the GPCs are kind of less standard and generic, though they also could be used on multiple platforms. We do have an example of GPC verification on chain that just started working a couple days ago, so all that is possible outside of a browser, but we don't have as many examples there as we do on the pod data. Can we scroll down? Is there anything more? Can you compare pod to verifiable credential? Yes. This is something I looked into. Pod is simpler. It doesn't really have a fixed schema or anything that ties it into a specific standard. You could put JSON-LD-based verifiable credential data in a pod if you wanted to. But a pod is much more flexible. At the cryptographic level, there is a difference in the kind of Merkle tree we use. The pod uses the lean IMT, which is something that Semaphore created, which is much shallower because pods tend to be relatively small, as opposed to the sparse Merkle tree that is used, at least for the implementation of verifiable credentials that I'm aware of, which is the one from IDEN3. That is a much deeper Merkle tree, but it can do things like prove absence of an entry, which pods can't do. Okay. What else do we have? How frequent is pod refresh? Very frequent so far, but we're hoping to keep it much more stable after DevCon. I don't have a strong answer to that. What else? How do you convert JSON to a Merkle tree? Please stick around for the deep dive session that's coming up. I'll tell you all about that. What else? Yeah. So, the—in the example of prover and verifier, the user's device can generate the proof and that's why everything has to work in a browser on a phone. Client side proving is definitely the default in ZooPass. Not every app has to do it. These are libraries. You can call them wherever you want. There's much more difference between verifiers, whether they're doing server side verification or client side verification. That depends what your use case is and what you're protecting against Are the issued credentials signed and the proof that the crunch loops we scrolled away We do not use BSS signatures to verify partial properties, that's what we use the Merkel tree for again more details on that coming up Is it possible to make information in ZooPass portable? I think that pods do make that possible, yes, as long as it's a pod and there are APIs for getting data out of ZooPass if you want to. That's called the ZAPI, at which point you can take this to whatever platform you want. We have implementations of pods in Python, C, and Rust for various projects, so it's not too hard to do. How do apps know whether a proof from a verifier is legit? Well, the framework tells you that it is a valid proof. And it will confirm for you that this configuration and these revealed claims and this proof match up and are valid. So the prover couldn't have cheated about that. What they could cheat about is app level semantics. So if you ask for a proof of a driver's license and I sent you a proof of a frog instead, that's something that the framework can't tell you because it just says that's a valid proof. So you do have to check, is that the configure I asked for? Is the signer of this driver's license the government, etc.? But yeah, that's the kind of level of verification we got. Okay. I think that's it. Can we go back to the slides briefly? Okay. Those of you who are collecting frogs, I've got something for you if we can switch back to my slides. Oh, yeah. We'll leave that up for a minute or two. I think we've got like three minutes before the next session starts anyway. So feel free to frog away. Okay. And as I said, we're going to go straight into a deep dive session, which is going to be 90 minutes. We probably won't use the whole thing, but that's what we're scheduled for. So stick around if you want more details to answer any of those questions.", - "eventId": "devcon-7", - "slot_start": 1731569400000, - "slot_end": 1731571200000, - "slot_roomId": "classroom-b", - "resources_presentation": "https://docs.google.com/presentation/d/1M8ozawZM8Xme8xRHKoop-7XlGGAjINE02ztaxWPyaXo", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "andrew-twyman" - ] + "diego-kingston" + ], + "eventId": "devcon-7", + "slot_start": 1731392400000, + "slot_end": 1731393000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/13SZq6cgLNu-xaLH6s8Xx4zOAbocLGeK_vQMElFVIUtU", + "resources_slides": "https://drive.google.com/file/d/1SzCLj48a7CfRuHIde2kNdS3olPwqUaPR/view" }, "vector": [ 0, @@ -434591,10 +433368,6 @@ 0, 0, 0, - 4, - 0, - 0, - 0, 0, 0, 0, @@ -434995,6 +433768,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -435329,8 +434103,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -435346,7 +434118,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -435396,6 +434167,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -435460,6 +434232,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -435530,6 +434303,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -435881,14 +434655,12 @@ 0, 0, 0, - 0, + 2, 0, 2, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -435903,38 +434675,40 @@ }, { "session": { - "id": "introduction-to-hash-based-proof-systems", - "sourceId": "EUAERD", - "title": "Introduction to hash-based proof systems", - "description": "Over the last decade, ZK has been gaining attention due to its applications in verifiable private computation and the scalability of blockchains. The development of general-purpose zkvms powered with STARK/hash-based proof systems have made writing provable applications simpler, abstracting developers from the details of ZK. In this talk, we will explain the basics of hash-based proof systems, different arithmetization schemes and how to prove computations without needing a trusted setup.", + "id": "introduction-to-multilateral-trade-credit-set-off-in-mpc", + "sourceId": "VYD38F", + "title": "Introduction to Multilateral Trade Credit Set-off in MPC", + "description": "Multilateral Trade Credit Set-off is a process for collecting outstanding invoices from a network of firms and detecting cycles. A cycle is a circular pattern of due payments that connects businesses. Removing a cycle yields liquidity savings for the firms involved. This process is done by a central agency that collects the invoices and performs the netting. Instead, we leverage MPC to perform the set-ff while preserving the privacy of sensitive financial data of the firms", "track": "Applied Cryptography", "type": "Lightning Talk", - "expertise": "Beginner", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Binius", - "Reed-Solomon" - ], "tags": [ - "Scalability", - "ZKP", - "STARK", - "reed-solomon", - "Scalability", - "STARK", - "ZKP" + "finance" ], - "language": "en", - "speakers": [ - "diego-kingston" + "keywords": [ + "MPC", + "cryptography", + "finance" ], + "duration": 680, + "language": "en", + "sources_swarmHash": "7a26e690c86585c39a8f2060e0df78edb94d20dc82bf22ba67b3c85cdc3d2bcb", + "sources_youtubeId": "OCEEe8azbR8", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731392400000, - "slot_end": 1731393000000, + "slot_start": 1731391200000, + "slot_end": 1731391800000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/13SZq6cgLNu-xaLH6s8Xx4zOAbocLGeK_vQMElFVIUtU" + "resources_presentation": "https://docs.google.com/presentation/d/1uaHx0jU0Bz-S7lJarLkDXQgyJwYi9XQaoCd5IniQ4ls", + "resources_slides": "https://drive.google.com/file/d/1noMU0_SkCnjQZcFA2oTGg2KzY3GbO4Tz/view", + "speakers": [ + "enrico-bottazzi" + ] }, "vector": [ 0, @@ -436763,7 +435537,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -436828,7 +435601,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -436899,7 +435671,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -437249,12 +436020,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 0, - 2, - 0, 2, 0, 0, @@ -437273,40 +436042,35 @@ }, { "session": { - "id": "introduction-to-multilateral-trade-credit-set-off-in-mpc", - "sourceId": "VYD38F", - "title": "Introduction to Multilateral Trade Credit Set-off in MPC", - "description": "Multilateral Trade Credit Set-off is a process for collecting outstanding invoices from a network of firms and detecting cycles. A cycle is a circular pattern of due payments that connects businesses. Removing a cycle yields liquidity savings for the firms involved. This process is done by a central agency that collects the invoices and performs the netting. Instead, we leverage MPC to perform the set-ff while preserving the privacy of sensitive financial data of the firms", + "id": "io", + "sourceId": "9BQWGB", + "title": "iO", + "description": "It will be worth it ;)", "track": "Applied Cryptography", - "type": "Lightning Talk", - "expertise": "Intermediate", + "type": "Talk", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "finance" - ], - "keywords": [ - "MPC", - "cryptography", - "finance" - ], - "duration": 680, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "7a26e690c86585c39a8f2060e0df78edb94d20dc82bf22ba67b3c85cdc3d2bcb", - "sources_youtubeId": "OCEEe8azbR8", + "sources_swarmHash": "46d5ef22074d469f164fac15b71964a1b097a2df621f13d511931dc51491d35a", + "sources_youtubeId": "5hDj0TB8s18", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731391200000, - "slot_end": 1731391800000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1uaHx0jU0Bz-S7lJarLkDXQgyJwYi9XQaoCd5IniQ4ls", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "enrico-bottazzi" - ] + "barry-whitehat" + ], + "eventId": "devcon-7", + "slot_start": 1731555000000, + "slot_end": 1731556800000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1RcEikB5_ALOwZaJQaAvBqDR_O7aF9ycww9YUXYxXCFA", + "resources_slides": "https://drive.google.com/file/d/15ywtmf8B9_utODU2EwKL2EnT1_0AcOqJ/view" }, "vector": [ 0, @@ -438452,10 +437216,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -438626,7 +437386,6 @@ 2, 0, 0, - 0, 2, 0, 0, @@ -438645,37 +437404,50 @@ }, { "session": { - "id": "io", - "sourceId": "9BQWGB", - "title": "iO", - "description": "It will be worth it ;)", - "track": "Applied Cryptography", - "type": "Talk", - "expertise": "", - "audience": "Engineering", + "id": "is-multi-block-mev-a-thing-insights-from-2-years-of-mev-boost-data", + "sourceId": "E3JADX", + "title": "Is multi-block MEV a thing? Insights from 2 years of MEV Boost Data", + "description": "Multi-block MEV describes MEV that arises from one party controlling several consecutive slots. Currently, it is discussed as a potential blocker for several prominent mechanism designs. We analyzed two years of MEV boost data covering more than 5 million slots to investigate historical patterns of it. Amongst other findings we see less multi-slot sequences occur than randomly feasible however that payments for longer sequences are higher than average.", + "track": "Cryptoeconomics", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [ - "barry-whitehat" + "tags": [ + "Economics", + "Tokenomics", + "MEV", + "data", + "analysis", + "Economics", + "MEV", + "Tokenomics" + ], + "keywords": [ + "Multi-block MEV", + "Data Analysis" ], + "duration": 1085, + "language": "en", + "sources_swarmHash": "", + "sources_youtubeId": "KcEZHQiopTg", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6736f4321b0f83434d3ca90a", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736f4321b0f83434d3ca90a.vtt", + "transcript_text": " I Did already Friday morning quite a few people make it to the MEV stage seems like they're quite some MEV ultras Looking forward to it. I'll be doing quick intro on multi block MEV just quick lightning talk to dive right in. Quick recap, what is multi-block MEV? Actually, multi-block MEV means one block builder has multiple slots in a row and can extract more MEV by having the slots in a sequence than by having individual slots. This arises due to the fact that MEV accrues Exponentially if we look at the data we can see that over time it slightly exponentially grows for example one strategy could be to Manipulate prices a block builder can for example buy a certain token in slot one, then only include buy transactions on one side and not include the sell transactions, and then in his last slot capture the MEV of selling the token first. This is one potential strategy of manipulating the price. There are other ways to do it. We can do liquidation attacks of also forcing the price to go below a certain threshold and thereby liquidating assets and capturing the liquidation gains. So I won't go too deep into it, but a whole set of strategies. What did we do? We looked at data since the merge, 4.3 million data points of MEV boost payments, and did, on the other side, a Monte Carlo simulation of daily market shares of builders to see what would be like a statistically normal distribution of multi-slots to have a baseline. And based on this, what we saw is, interestingly, we have less multi-slots to have a baseline and based on this what we saw is interestingly we have less multi-slot sequences than expected. We see especially on the chart on the right we see the blue bars being the expected distribution of multi-slots. We see less of them also interestingly what we saw, however, longest sequence was 25 slots in a row by the same builder, Beaver Build. And for the same validator and same builder, it was 11 slots in a row March of this year. So what we see just by statistics, there are long sequences, which would allow for multi-slot MEV strategies. So in the next step, what we looked at was the payments, are long sequences which would allow for multi-slot MEV strategies. So in the next step what we looked at was the payments, the MEV boost payments for longer sequences. So this is basically for a sequence of length 5 for example, what was the average payment. What we see interestingly it increases for longer. So it seems to be there is a value in longer sequences. However, why this is the case, so far we can only speculate. One hypothesis could be that MefBoost, it's a continuous first price auction, so it basically, from an economics perspective, works like a second price auction from the value where we end and so it could be actually driven not by the highest bidder but by the second highest bidder who has an increasing value for the blocks based on private order flow, MEV captured in private order flow so it could be the case that actually the intrinsic value of the second highest bidder increases and that drives these increasing values. So what we also did is looked for the payments per slot. So in longer sequences, what was the bid for the slot at a specific point there? We saw so far there is an increase but only a very very slight increase so this led us to the conclusion that so far we don't see dedicated multi-slot strategies. We also looked at the top 10 builders to see if there's a certain pattern for the top 10 builders if there's there are builders that are deviating from the mean. We didn't see any very outstanding data. In the case of few builders that have more slots, more multi-slot sequences, or that are particularly strongly correlating with the same validators. However, from our perspective, that's probably based on things like latency and co-location and not based on other types of strategies that are run. Last thing that we looked at was autocorrelation of math boost payments to see if we can use historical math boost payments to predict future ones. There what we see is based on different correlation metrics that it strongly goes down after the second or third slot so it means there's a very very low auto correlation so a low predictability so we don't have times of high or low MEV but there's a fast reversion to the mean overall. Yes, there was the quick run-through. If the topic in general is interesting, there's a post on ETH Research. I'm going into the details, also linked to the Jupyter notebook with all the data, and I'm also doing a workshop today in the afternoon on execution tickets, agent-based simulation of execution tickets, so whoever is interested, feel free to join. Otherwise, happy to take on questions. I think I have around one and a half minutes or one minute left for questions. Feel free to scan the QR code or ask them directly. So we already have two questions. We want more. You can just scan the QR code and put it. And one quick question, Pascal. Where is your talk so that the audience is aware of the room that the talk is in? The talk is in classroom B, so just down the aisle. So I'll read out the question for those who are joining virtually. Have you studied Arbitrum Time Boost? Do you think DAO should be capturing MEV revenue? To be honest, I haven't looked deeply into Arbitrum Time Boost. I think there are people in the audience that have much more knowledge of this, so I can't really give their in-depth take. Unlike Proposer, Builder is not guaranteed to win PBS auction multiple times in a row. Would you advocate for a random Proposer allocation, which does not allow the same Proposer in a row? In general, I think it makes sense. However, I think there is. So, I mean, that's a bit of an outlook already on the simulation. What we saw is that the secondary market usually reduces decentralization, which means there should be some component of just-in-time auction. However, I think with the current MF boost setup, what we have is the problem of in-flight variation. So I think a combination of both could be interesting. But I think some component of randomization does make sense to prevent multi-slot MEV strategies. Thank you so much. That was a great talk and a good start to the day. To MEV Day, I like the name that you gave. This is MEV Room, you know? Thank you. Thank you. So we have our next talk coming up really quick with Felix Leupold. The talk is on do we really even need PBS and how we can solve MEV at the app versus infrastructure layer. This is going to be a full day of hardcore research, so really glad to see all of you here and engaging and asking questions. Make sure to scan the QR code if you have any questions for our speaker. And with no further ado, let's welcome Felix on stage. Hi, I will have to speed run this talk. so if you want a more relaxed version of it, you can scan the QR code or go to tinyurl.com slash DAPCon, where I gave this talk in a 25-minute version. Let's start with the problem that I have with proposer-builder separation, why I think it's not a good idea. There are three main arguments in my point of view. First, the trust and centralization problems that it brings onto the Ethereum base layer for all types of transactions, not even the ones that actually contain MEV. Ethereum today is proud to have more than one million validators proposing and validating the chain, and we are one of the most decentralized chains, or the most decentralized chains in the world. But at the end of the day, if you look at it, there's only two very well-known entities, Beaver and Titan, that propose more than 90% of the block's contents. And how valuable is it really to have a vast validator network when the most important part, the content of the block, is decided by two entities. Moreover, PBS adds this trusted component to the supply chain, the MEV relay that builders have to put their faith in. And even if we trust the research that's ongoing, that's trying to propose ways to enshrine the MEV relay into the base layer and get rid of that trusted component, I think there's a couple of issues with that. First, it adds complexity to the core protocol, one thing that we're trying to avoid. And second of all, it's not even clear that if implemented, it will find adoption because MEV is a very latency-intense game, and any decentralized peer-to-peer gossip-based solution is always going to have latency disadvantages and also feature disparity. Builders today want to have bid cancellation as an example. They want to make a bid and then cancel it if they change their mind. And such a feature is simply not possible in an enshrined PBS version because once you gossip the message, you cannot take it back. And let's not forget that PBS actually maximizes the harm that is inflicted on users. In order to win the PBS auction, you have to create the block that is extracting the most value via sandwiching or other types of MEV attack, and thus we've created a system that really favors the worst possible outcome for the user. Okay, enough of a rant. What do we do about it? First, let's maybe take a step back and try to find out why does MEV exist in the first place. And here, really, the fundamental reason is that in today's system, every token trades at many different prices within the same block. Here I brought you an example block from a couple of days ago in which ETHUSD trades at least seven different times with seven different prices. And so you'll see 6-6 arbitrageurs that are able to access, for instance, the Uniswap pool at the previous block's outdated price. And then the rest of the block will trade at the new kind of fair equilibrium price. You will see liquidators that are able to get bad collateral at a fixed liquidation penalty, 15-20% below fair market price, and the rest of the block trading at a different price. And last but not least, you have sandwichers that frontrun a user to buy a token at price A, the user then priced it at price B, and the sandwicher sells it at price C. All of these strategies have one thing in common. The same asset trades in the same block at different prices. And the reason for this is that we basically took our very well best known standard first come first served mechanisms that work in traditional continuous time markets and deployed them on blockchain. And this was the key mistake. Ethereum doesn't have continuous time. There's only new information being released every 12 seconds, and then it's released in one go. And the ordering of the information within the block is up to a unilateral party, the proposer, to decide how to order things there. And so it's unsafe to deploy any kind of mechanism that works on continuous time on a discrete time blockchain. So what's next? How can we do to fix this? Well, as you might guess, we need to come to a point where trading the same asset in the same block should lead to the same execution regardless of trade ordering. Or put differently, we need to get to one price per token per block. This design requires to be able to batch trades together so it doesn't work on raw Ethereum transactions. Instead, we need trade intents or just signed limit orders and batch those together in a multidimensional auction which can later be cleared at a single price. For details on how this looks, I encourage you to look at Cow Protocol, the trading mechanism that powers CowSwap, which pioneered and implements this mechanism. It's processing many billions of dollars in transaction volume every month and is basically proof that this mechanism can work on-chain. Cow Protocol provides MEV protection for all the cases that I mentioned on two slides ago. We protect swappers by having buyers and sellers that trade the same asset receive the same price regardless of their ordering. And we can even match them together in what is known as coincidence of wants. By making AMMs part of the batch and treating them like swappers, LPs no longer trade at outdated prices, but instead get the fair new equilibrium clearing price. And liquidations are just stop-loss orders, which you can also add to the total liquidity pool and make sure that collateral recovery happens at the most efficient and fair price. I want to leave you with this number, which is a DUNE query that you can check for yourself that shows that more than 97% of MEV today is trading-related, and that is sandwiches, sex-sex arbitrage, backrunning, liquidations. And those can be vastly reduced, if not completely eliminated at the application layer by not using pseudo-continuous time mechanisms. So I'd like you to join me on my mission to convince this space that we should fix the mechanism rather than making the chain more complex. And with that, thank you very much. Thanks for that great talk. If any of you have questions, you need to scan the QR code and ask the questions remotely. I'm going to read out the question for those joining virtually. Do you have an estimation of reduction in MEV if only DEX used is cow? Yeah, so right now, CowSwap focuses on swap MEV protection. I didn't break the number 97% into swap AMM resistance and liquidations. So right now, it would probably be less than 97%. But the mechanism itself, not the product as it exists today, but the mechanism itself would be able to reduce MEV by that number that I showed. The other question is, do you think PBS causes centralization or is it correlation? I think PBS causes centralization mainly because it's a winner-takes-all latency game. I wouldn't say it's correlation. There's also a reason I couldn't get to that slide because I didn't have enough time for why I think cow protocol or an application specific, um, auction is not going to cause the same centralization or even if it would not affect kind of all types of transactions. So if I want to make a transfer, I'm not kind of subject to the censoring that happens today in the, in the builder market. Um, but yeah, for this, I would refer to the longer talk. How would you compare cow with what Sorella is doing to fix MEV on the application layer via Uniswap hooks? Yeah, I mean, cow is working out there and live. I think the mechanism is relatively similar. And yeah, I think that's the gist of it. You can deal with the last question. How does cow deal with multi-block MEV? It doesn't, because basically the batch itself has its own kind of heartbeat, its own time. And so if you were to try to do multi-batch MEV, I think you're running the risk that whoever solver will win the batch is going to execute the trade before you. So there's no real incentive for you to...", "eventId": "devcon-7", - "slot_start": 1731555000000, - "slot_end": 1731556800000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1RcEikB5_ALOwZaJQaAvBqDR_O7aF9ycww9YUXYxXCFA" + "slot_start": 1731639300000, + "slot_end": 1731639900000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1spOihF0kLB_BzD62uWufsHORVgg_JGXZoISZsJris6M", + "resources_slides": "https://drive.google.com/file/d/1qigMecB5gPgxQ_SUfpKx0lRZ1jfQ0OTc/view", + "speakers": [ + "pascal-stichler" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 6, @@ -438718,6 +437490,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -439095,8 +437868,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -439427,7 +438198,9 @@ 0, 0, 0, + 6, 0, + 6, 0, 0, 0, @@ -439458,6 +438231,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -439466,6 +438240,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -439734,6 +438509,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -439981,9 +438757,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 2, @@ -439998,59 +438774,35 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "is-multi-block-mev-a-thing-insights-from-2-years-of-mev-boost-data", - "sourceId": "E3JADX", - "title": "Is multi-block MEV a thing? Insights from 2 years of MEV Boost Data", - "description": "Multi-block MEV describes MEV that arises from one party controlling several consecutive slots. Currently, it is discussed as a potential blocker for several prominent mechanism designs. We analyzed two years of MEV boost data covering more than 5 million slots to investigate historical patterns of it. Amongst other findings we see less multi-slot sequences occur than randomly feasible however that payments for longer sequences are higher than average.", - "track": "Cryptoeconomics", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "jackson-the-dev", + "sourceId": "GGHN3U", + "title": "Jackson the Dev", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Economics", - "Tokenomics", - "MEV", - "data", - "analysis", - "Economics", - "MEV", - "Tokenomics" - ], - "keywords": [ - "Multi-block MEV", - "Data Analysis" - ], - "duration": 1085, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6736f4321b0f83434d3ca90a", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736f4321b0f83434d3ca90a.vtt", - "transcript_text": " I Did already Friday morning quite a few people make it to the MEV stage seems like they're quite some MEV ultras Looking forward to it. I'll be doing quick intro on multi block MEV just quick lightning talk to dive right in. Quick recap, what is multi-block MEV? Actually, multi-block MEV means one block builder has multiple slots in a row and can extract more MEV by having the slots in a sequence than by having individual slots. This arises due to the fact that MEV accrues Exponentially if we look at the data we can see that over time it slightly exponentially grows for example one strategy could be to Manipulate prices a block builder can for example buy a certain token in slot one, then only include buy transactions on one side and not include the sell transactions, and then in his last slot capture the MEV of selling the token first. This is one potential strategy of manipulating the price. There are other ways to do it. We can do liquidation attacks of also forcing the price to go below a certain threshold and thereby liquidating assets and capturing the liquidation gains. So I won't go too deep into it, but a whole set of strategies. What did we do? We looked at data since the merge, 4.3 million data points of MEV boost payments, and did, on the other side, a Monte Carlo simulation of daily market shares of builders to see what would be like a statistically normal distribution of multi-slots to have a baseline. And based on this, what we saw is, interestingly, we have less multi-slots to have a baseline and based on this what we saw is interestingly we have less multi-slot sequences than expected. We see especially on the chart on the right we see the blue bars being the expected distribution of multi-slots. We see less of them also interestingly what we saw, however, longest sequence was 25 slots in a row by the same builder, Beaver Build. And for the same validator and same builder, it was 11 slots in a row March of this year. So what we see just by statistics, there are long sequences, which would allow for multi-slot MEV strategies. So in the next step, what we looked at was the payments, are long sequences which would allow for multi-slot MEV strategies. So in the next step what we looked at was the payments, the MEV boost payments for longer sequences. So this is basically for a sequence of length 5 for example, what was the average payment. What we see interestingly it increases for longer. So it seems to be there is a value in longer sequences. However, why this is the case, so far we can only speculate. One hypothesis could be that MefBoost, it's a continuous first price auction, so it basically, from an economics perspective, works like a second price auction from the value where we end and so it could be actually driven not by the highest bidder but by the second highest bidder who has an increasing value for the blocks based on private order flow, MEV captured in private order flow so it could be the case that actually the intrinsic value of the second highest bidder increases and that drives these increasing values. So what we also did is looked for the payments per slot. So in longer sequences, what was the bid for the slot at a specific point there? We saw so far there is an increase but only a very very slight increase so this led us to the conclusion that so far we don't see dedicated multi-slot strategies. We also looked at the top 10 builders to see if there's a certain pattern for the top 10 builders if there's there are builders that are deviating from the mean. We didn't see any very outstanding data. In the case of few builders that have more slots, more multi-slot sequences, or that are particularly strongly correlating with the same validators. However, from our perspective, that's probably based on things like latency and co-location and not based on other types of strategies that are run. Last thing that we looked at was autocorrelation of math boost payments to see if we can use historical math boost payments to predict future ones. There what we see is based on different correlation metrics that it strongly goes down after the second or third slot so it means there's a very very low auto correlation so a low predictability so we don't have times of high or low MEV but there's a fast reversion to the mean overall. Yes, there was the quick run-through. If the topic in general is interesting, there's a post on ETH Research. I'm going into the details, also linked to the Jupyter notebook with all the data, and I'm also doing a workshop today in the afternoon on execution tickets, agent-based simulation of execution tickets, so whoever is interested, feel free to join. Otherwise, happy to take on questions. I think I have around one and a half minutes or one minute left for questions. Feel free to scan the QR code or ask them directly. So we already have two questions. We want more. You can just scan the QR code and put it. And one quick question, Pascal. Where is your talk so that the audience is aware of the room that the talk is in? The talk is in classroom B, so just down the aisle. So I'll read out the question for those who are joining virtually. Have you studied Arbitrum Time Boost? Do you think DAO should be capturing MEV revenue? To be honest, I haven't looked deeply into Arbitrum Time Boost. I think there are people in the audience that have much more knowledge of this, so I can't really give their in-depth take. Unlike Proposer, Builder is not guaranteed to win PBS auction multiple times in a row. Would you advocate for a random Proposer allocation, which does not allow the same Proposer in a row? In general, I think it makes sense. However, I think there is. So, I mean, that's a bit of an outlook already on the simulation. What we saw is that the secondary market usually reduces decentralization, which means there should be some component of just-in-time auction. However, I think with the current MF boost setup, what we have is the problem of in-flight variation. So I think a combination of both could be interesting. But I think some component of randomization does make sense to prevent multi-slot MEV strategies. Thank you so much. That was a great talk and a good start to the day. To MEV Day, I like the name that you gave. This is MEV Room, you know? Thank you. Thank you. So we have our next talk coming up really quick with Felix Leupold. The talk is on do we really even need PBS and how we can solve MEV at the app versus infrastructure layer. This is going to be a full day of hardcore research, so really glad to see all of you here and engaging and asking questions. Make sure to scan the QR code if you have any questions for our speaker. And with no further ado, let's welcome Felix on stage. Hi, I will have to speed run this talk. so if you want a more relaxed version of it, you can scan the QR code or go to tinyurl.com slash DAPCon, where I gave this talk in a 25-minute version. Let's start with the problem that I have with proposer-builder separation, why I think it's not a good idea. There are three main arguments in my point of view. First, the trust and centralization problems that it brings onto the Ethereum base layer for all types of transactions, not even the ones that actually contain MEV. Ethereum today is proud to have more than one million validators proposing and validating the chain, and we are one of the most decentralized chains, or the most decentralized chains in the world. But at the end of the day, if you look at it, there's only two very well-known entities, Beaver and Titan, that propose more than 90% of the block's contents. And how valuable is it really to have a vast validator network when the most important part, the content of the block, is decided by two entities. Moreover, PBS adds this trusted component to the supply chain, the MEV relay that builders have to put their faith in. And even if we trust the research that's ongoing, that's trying to propose ways to enshrine the MEV relay into the base layer and get rid of that trusted component, I think there's a couple of issues with that. First, it adds complexity to the core protocol, one thing that we're trying to avoid. And second of all, it's not even clear that if implemented, it will find adoption because MEV is a very latency-intense game, and any decentralized peer-to-peer gossip-based solution is always going to have latency disadvantages and also feature disparity. Builders today want to have bid cancellation as an example. They want to make a bid and then cancel it if they change their mind. And such a feature is simply not possible in an enshrined PBS version because once you gossip the message, you cannot take it back. And let's not forget that PBS actually maximizes the harm that is inflicted on users. In order to win the PBS auction, you have to create the block that is extracting the most value via sandwiching or other types of MEV attack, and thus we've created a system that really favors the worst possible outcome for the user. Okay, enough of a rant. What do we do about it? First, let's maybe take a step back and try to find out why does MEV exist in the first place. And here, really, the fundamental reason is that in today's system, every token trades at many different prices within the same block. Here I brought you an example block from a couple of days ago in which ETHUSD trades at least seven different times with seven different prices. And so you'll see 6-6 arbitrageurs that are able to access, for instance, the Uniswap pool at the previous block's outdated price. And then the rest of the block will trade at the new kind of fair equilibrium price. You will see liquidators that are able to get bad collateral at a fixed liquidation penalty, 15-20% below fair market price, and the rest of the block trading at a different price. And last but not least, you have sandwichers that frontrun a user to buy a token at price A, the user then priced it at price B, and the sandwicher sells it at price C. All of these strategies have one thing in common. The same asset trades in the same block at different prices. And the reason for this is that we basically took our very well best known standard first come first served mechanisms that work in traditional continuous time markets and deployed them on blockchain. And this was the key mistake. Ethereum doesn't have continuous time. There's only new information being released every 12 seconds, and then it's released in one go. And the ordering of the information within the block is up to a unilateral party, the proposer, to decide how to order things there. And so it's unsafe to deploy any kind of mechanism that works on continuous time on a discrete time blockchain. So what's next? How can we do to fix this? Well, as you might guess, we need to come to a point where trading the same asset in the same block should lead to the same execution regardless of trade ordering. Or put differently, we need to get to one price per token per block. This design requires to be able to batch trades together so it doesn't work on raw Ethereum transactions. Instead, we need trade intents or just signed limit orders and batch those together in a multidimensional auction which can later be cleared at a single price. For details on how this looks, I encourage you to look at Cow Protocol, the trading mechanism that powers CowSwap, which pioneered and implements this mechanism. It's processing many billions of dollars in transaction volume every month and is basically proof that this mechanism can work on-chain. Cow Protocol provides MEV protection for all the cases that I mentioned on two slides ago. We protect swappers by having buyers and sellers that trade the same asset receive the same price regardless of their ordering. And we can even match them together in what is known as coincidence of wants. By making AMMs part of the batch and treating them like swappers, LPs no longer trade at outdated prices, but instead get the fair new equilibrium clearing price. And liquidations are just stop-loss orders, which you can also add to the total liquidity pool and make sure that collateral recovery happens at the most efficient and fair price. I want to leave you with this number, which is a DUNE query that you can check for yourself that shows that more than 97% of MEV today is trading-related, and that is sandwiches, sex-sex arbitrage, backrunning, liquidations. And those can be vastly reduced, if not completely eliminated at the application layer by not using pseudo-continuous time mechanisms. So I'd like you to join me on my mission to convince this space that we should fix the mechanism rather than making the chain more complex. And with that, thank you very much. Thanks for that great talk. If any of you have questions, you need to scan the QR code and ask the questions remotely. I'm going to read out the question for those joining virtually. Do you have an estimation of reduction in MEV if only DEX used is cow? Yeah, so right now, CowSwap focuses on swap MEV protection. I didn't break the number 97% into swap AMM resistance and liquidations. So right now, it would probably be less than 97%. But the mechanism itself, not the product as it exists today, but the mechanism itself would be able to reduce MEV by that number that I showed. The other question is, do you think PBS causes centralization or is it correlation? I think PBS causes centralization mainly because it's a winner-takes-all latency game. I wouldn't say it's correlation. There's also a reason I couldn't get to that slide because I didn't have enough time for why I think cow protocol or an application specific, um, auction is not going to cause the same centralization or even if it would not affect kind of all types of transactions. So if I want to make a transfer, I'm not kind of subject to the censoring that happens today in the, in the builder market. Um, but yeah, for this, I would refer to the longer talk. How would you compare cow with what Sorella is doing to fix MEV on the application layer via Uniswap hooks? Yeah, I mean, cow is working out there and live. I think the mechanism is relatively similar. And yeah, I think that's the gist of it. You can deal with the last question. How does cow deal with multi-block MEV? It doesn't, because basically the batch itself has its own kind of heartbeat, its own time. And so if you were to try to do multi-batch MEV, I think you're running the risk that whoever solver will win the batch is going to execute the trade before you. So there's no real incentive for you to...", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731639300000, - "slot_end": 1731639900000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1spOihF0kLB_BzD62uWufsHORVgg_JGXZoISZsJris6M", - "resources_slides": null, - "speakers": [ - "pascal-stichler" - ] + "slot_start": 1731664800000, + "slot_end": 1731668400000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1TAcraJVlaaRRLhKud8QT_bgLYkfYy-QRJtI2GiS2nd4", + "resources_slides": "" }, "vector": [ 0, 0, - 6, 0, 0, 0, @@ -440058,6 +438810,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -440090,7 +438843,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -440801,9 +439553,7 @@ 0, 0, 0, - 6, 0, - 6, 0, 0, 0, @@ -440834,7 +439584,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -440843,7 +439592,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -441113,7 +439861,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -441362,9 +440109,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 2, @@ -441379,43 +440126,49 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "jackson-the-dev", - "sourceId": "GGHN3U", - "title": "Jackson the Dev", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", + "id": "json-rpc-enhancement-in-geth", + "sourceId": "7KZLFF", + "title": "JSON-RPC Enhancement in Geth", + "description": "Introducing trace_* namespace and eth_getTransactionBySenderAndNonce into ethereum execution clients(geth,reth) to enhance the transaction and trace querying capabilities.", + "track": "[CLS] EPF Day", + "type": "Lightning Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Architecture", + "Frameworks", + "User Experience" + ], + "keywords": [ + "execution client", + "json-rpc" + ], + "duration": 801, "language": "en", - "speakers": [], + "sources_swarmHash": "4e61ae38126f26ad651edd1931f371700863c255c80b6960001052dbc4aa16af", + "sources_youtubeId": "ifYFJRoW4m8", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6734298f9dbb7a90e1af087a", "eventId": "devcon-7", - "slot_start": 1731664800000, - "slot_end": 1731668400000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1TAcraJVlaaRRLhKud8QT_bgLYkfYy-QRJtI2GiS2nd4" + "slot_start": 1731469500000, + "slot_end": 1731470400000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1seSZfQPsg8riFizMYXy6BWgpFjxQVJYELPyjZazrxIc", + "resources_slides": "https://drive.google.com/file/d/1jXEZlPn40SQ2oW7F9lxWaQCmKnG2heiB/view", + "speakers": [ + "jsvisa" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, 0, 0, 0, @@ -441431,6 +440184,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -441842,6 +440596,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -442178,6 +440933,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -442223,6 +440979,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -442265,6 +441022,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -442723,6 +441481,7 @@ 2, 0, 0, + 0, 2, 0, 0, @@ -442741,40 +441500,44 @@ }, { "session": { - "id": "json-rpc-enhancement-in-geth", - "sourceId": "7KZLFF", - "title": "JSON-RPC Enhancement in Geth", - "description": "Introducing trace_* namespace and eth_getTransactionBySenderAndNonce into ethereum execution clients(geth,reth) to enhance the transaction and trace querying capabilities.", - "track": "[CLS] EPF Day", - "type": "Lightning Talk", + "id": "keynote-glass-houses-and-tornados", + "sourceId": "K9A8EG", + "title": "Keynote: Glass Houses and Tornados", + "description": "The Tornado Cash sanctions and criminal prosecutions have challenged longstanding assumptions within crypto about the limits of money transmission licensing, money laundering statutes, and sanctions laws. They've also revealed a longstanding assumption from some in policy and law enforcement circles: that blockchains have always been and must remain transparent. Neither assumption has served us well and the time has come for legal certainty. This talk is about how we get there.", + "track": "Cypherpunk & Privacy", + "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", - "featured": false, + "audience": "Lobby", + "featured": true, "doNotRecord": false, "tags": [ - "Architecture", - "Frameworks", - "User Experience" + "Governance", + "Mixers", + "Open Source Software", + "Privacy" ], "keywords": [ - "execution client", - "json-rpc" + "Legal", + "Government", + "Regulation" ], - "duration": 801, + "duration": 904, "language": "en", - "sources_swarmHash": "4e61ae38126f26ad651edd1931f371700863c255c80b6960001052dbc4aa16af", - "sources_youtubeId": "ifYFJRoW4m8", + "sources_swarmHash": "", + "sources_youtubeId": "7LRbiZ_FiSg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734298f9dbb7a90e1af087a", + "sources_streamethId": "6736eb891b0f83434d441c83", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731469500000, - "slot_end": 1731470400000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1seSZfQPsg8riFizMYXy6BWgpFjxQVJYELPyjZazrxIc", - "resources_slides": null, + "slot_start": 1731648600000, + "slot_end": 1731649800000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1Xs3Tvj3iPf9ArWjPRjf3e7zXu_JG8R-eXuI5yEgHV6c", + "resources_slides": "https://drive.google.com/file/d/1ldvlW_g6g6BiOKBxdEhHXr9TprztsMKM/view", "speakers": [ - "jsvisa" + "peter-van-valkenburgh" ] }, "vector": [ @@ -442783,6 +441546,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -442793,7 +441557,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -443545,12 +442308,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -443591,7 +442348,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -443629,13 +442385,13 @@ 0, 0, 0, + 2, 0, + 2, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -443649,6 +442405,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -443924,6 +442681,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -444096,7 +442854,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -444109,49 +442866,53 @@ 0, 0, 0, - 0 + 0, + 2 ] }, { "session": { - "id": "keynote-glass-houses-and-tornados", - "sourceId": "K9A8EG", - "title": "Keynote: Glass Houses and Tornados", - "description": "The Tornado Cash sanctions and criminal prosecutions have challenged longstanding assumptions within crypto about the limits of money transmission licensing, money laundering statutes, and sanctions laws. They've also revealed a longstanding assumption from some in policy and law enforcement circles: that blockchains have always been and must remain transparent. Neither assumption has served us well and the time has come for legal certainty. This talk is about how we get there.", + "id": "keynote-how-to-properly-open-source-software-lessons-learned-from-the-linux-foundation", + "sourceId": "MDHXHK", + "title": "Keynote: How to Properly Open Source Software: Lessons Learned from the Linux Foundation", + "description": "It can be challenging to properly open source software: there are licenses, IP, security reporting, and many other issues that need to be addressed. In this talk, we will discuss the best practices for open source software development learned from almost 25 years of experience at the Linux Foundation. Attendees will learn about how to set up their projects for a variety of potential goals, including things like maximizing security and community building.", "track": "Cypherpunk & Privacy", "type": "Talk", "expertise": "Intermediate", - "audience": "Lobby", + "audience": "Developer", "featured": true, "doNotRecord": false, "tags": [ - "Governance", - "Mixers", "Open Source Software", - "Privacy" + "FOSS", + "Best Practices", + "development", + "open", + "Best Practices", + "FOSS", + "Open Source Software" ], "keywords": [ - "Legal", - "Government", - "Regulation" + "Linux Foundation", + "Open Development" ], - "duration": 904, + "duration": 1544, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "e13e672d87c72979ca89c66581ada9c4921fe5e1b70ca785bdf6e4e7dbff67ca", + "sources_youtubeId": "9wDusmibYCg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736eb891b0f83434d441c83", + "sources_streamethId": "673816891b0f83434ddb4f43", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731648600000, - "slot_end": 1731649800000, + "slot_start": 1731649800000, + "slot_end": 1731651600000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1Xs3Tvj3iPf9ArWjPRjf3e7zXu_JG8R-eXuI5yEgHV6c", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1nEJvDuhtXFhZrplozdiBHSDSlr4Xbzxi2jSrYBCSPL8", + "resources_slides": "https://drive.google.com/file/d/1-bs4LIRrT8WUez0ZXR-bFVjkoU3apZvF/view", "speakers": [ - "peter-van-valkenburgh" + "hart-montgomery" ] }, "vector": [ @@ -444245,6 +443006,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -444584,10 +443346,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -444937,6 +443695,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -445002,7 +443761,6 @@ 0, 0, 0, - 2, 0, 2, 0, @@ -445022,7 +443780,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -445242,6 +443999,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -445467,14 +444225,13 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -445485,54 +444242,41 @@ 0, 0, 0, - 0, - 2 + 0 ] }, { "session": { - "id": "keynote-how-to-properly-open-source-software-lessons-learned-from-the-linux-foundation", - "sourceId": "MDHXHK", - "title": "Keynote: How to Properly Open Source Software: Lessons Learned from the Linux Foundation", - "description": "It can be challenging to properly open source software: there are licenses, IP, security reporting, and many other issues that need to be addressed. In this talk, we will discuss the best practices for open source software development learned from almost 25 years of experience at the Linux Foundation. Attendees will learn about how to set up their projects for a variety of potential goals, including things like maximizing security and community building.", - "track": "Cypherpunk & Privacy", + "id": "keynote-infinite-diversity-in-infinite-combinations", + "sourceId": "3MNMHA", + "title": "Keynote: ⿻ Infinite Diversity in Infinite Combinations", + "description": "This talk explores the evolving relationship between freedom, wisdom, and technology, centered on ⿻ Plurality—a philosophy that promotes collaborative diversity.\r\n\r\nDrawing on experiences from Taiwan and beyond, we’ll examine how decentralized governance can scale to bridge divides, empower autonomy, and co-create innovative solutions for the challenges of the 21st century.", + "track": "Real World Ethereum", "type": "Talk", - "expertise": "Intermediate", - "audience": "Developer", + "expertise": "Beginner", + "audience": "Community", "featured": true, "doNotRecord": false, - "tags": [ - "Open Source Software", - "FOSS", - "Best Practices", - "development", - "open", - "Best Practices", - "FOSS", - "Open Source Software" - ], "keywords": [ - "Linux Foundation", - "Open Development" + "Plurality" + ], + "tags": [ + "Decentralization", + "Governance", + "Political systems" ], - "duration": 1544, "language": "en", - "sources_swarmHash": "e13e672d87c72979ca89c66581ada9c4921fe5e1b70ca785bdf6e4e7dbff67ca", - "sources_youtubeId": "9wDusmibYCg", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673816891b0f83434ddb4f43", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "speakers": [ + "audrey-tang" + ], "eventId": "devcon-7", - "slot_start": 1731649800000, - "slot_end": 1731651600000, + "slot_start": 1731389400000, + "slot_end": 1731391200000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1nEJvDuhtXFhZrplozdiBHSDSlr4Xbzxi2jSrYBCSPL8", - "resources_slides": null, - "speakers": [ - "hart-montgomery" - ] + "sources_youtubeId": "n3R4ze2hesk", + "sources_swarmHash": "7b57f594e589cebcc14cb04fcc90c7201ef214a347ba31c146c0fbe984a280ae", + "resources_presentation": "https://docs.google.com/presentation/d/1hyqMQ-ALTG3QKpk5SkiuUcDNN1L0Z_UuyGNml54Xc60", + "resources_slides": "https://drive.google.com/file/d/1jBpgzgzMBMJo3TlLYH3CyBxqiwnVrJ4l/view" }, "vector": [ 0, @@ -445540,6 +444284,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -445629,7 +444374,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -445964,6 +444708,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -446317,7 +445062,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -446379,6 +445123,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -446388,6 +445133,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -446622,10 +445368,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -446680,7 +445422,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -446855,10 +445596,8 @@ 0, 0, 0, - 2, - 0, - 0, 0, + 2, 0, 0, 0, @@ -446871,35 +445610,43 @@ }, { "session": { - "id": "keynote-infinite-diversity-in-infinite-combinations", - "sourceId": "3MNMHA", - "title": "Keynote: ⿻ Infinite Diversity in Infinite Combinations", - "description": "This talk explores the evolving relationship between freedom, wisdom, and technology, centered on ⿻ Plurality—a philosophy that promotes collaborative diversity.\r\n\r\nDrawing on experiences from Taiwan and beyond, we’ll examine how decentralized governance can scale to bridge divides, empower autonomy, and co-create innovative solutions for the challenges of the 21st century.", - "track": "Real World Ethereum", + "id": "keynote-lessons-learned-from-tor", + "sourceId": "ZHU7UQ", + "title": "Keynote: Lessons learned from Tor", + "description": "I will share lessons learned during Tor's twenty years as free software fighting for privacy and human rights. We'll talk about distributed trust and privacy by design, how to help people understand the good uses of your tech, getting allies in both cypherpunks and government, why transparency and community-building are so essential to trust, and successes from other spaces. It may seem like the crypto wars never really end, but we all have a part to play in saving the world.", + "track": "Cypherpunk & Privacy", "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "expertise": "Intermediate", + "audience": "Engineering", "featured": true, "doNotRecord": false, - "keywords": [ - "Plurality" - ], "tags": [ - "Decentralization", - "Governance", - "Political systems" + "Anonymity", + "Privacy", + "Sustainability" ], - "language": "en", - "speakers": [ - "audrey-tang" + "keywords": [ + "Human", + "rights" ], + "duration": 1911, + "language": "en", + "sources_swarmHash": "bc74f4b9d585e354aacc1ead7ee0dec2d4b065e411877d110f120117979ff52f", + "sources_youtubeId": "o302oKXbdK8", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673811671b0f83434dc79ca7", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673811671b0f83434dc79ca7.vtt", + "transcript_text": " The microphone is working. The slides seem like they're working. I hope we'll find out soon if they're... I'm going to press the forward button. Okay, I see them over here. That means that they will appear pretty soon. So, hi, I'm Roger Dingledine from the Tor Project. Once upon a time, I started a non-profit called Tor and wrote the original code, so there's a lot to talk about today. I'm going to break this down into two pieces. The first piece is giving you a crash course on Tor and things to think about, and then the second piece is advice and lessons for the Ethereum and cryptocurrency communities from what we've learned over the past many years. So Tor is a lot of different things. It's a free software project, open source project that you learned about from the last speaker. It's also a bunch of different projects, each trying to provide privacy in various ways. It's a non-profit as I mentioned and the fun part about Tor is the global community of people all around the world who use it and rely on it and help to make it stronger and better. So many people use Tor in the form of Tor Browser which is a Firefox based browser that fixes a bunch of application level issues. So there's the underlying program Tor but the interface that most people see is Tor browser. Okay, so what am I talking about here in terms of privacy? I talk about privacy and security, but really the phrase is communications metadata. It's the idea that I want to browse the web and I want somebody watching my internet connection to not learn what destination I'm going to. And I want the website to be able to not learn where I'm coming from. So how many people here recognize Creepy NSA Dude from back in the Snowden times? So he helped to run the intelligence system for the US government and he has this scary, terrible quote, we kill people based on metadata. And the important thing here is nobody in the attacker world these days is trying to break the encryption anymore. The encryption works well. Instead, let's build a social graph of who's talking to who, when they're talking, how much they're talking, and then we'll look for who's talking to who, when they're talking, how much they're talking, and then we'll look for who's in the middle of that graph, and then we break into their house and steal their laptop or whatever attack we have to do from there. And there are a bunch of other current events that are related to all of this. Here's a news story from, I guess, five years ago now. The U.S. Attorney General asks Facebook to stop doing end-to-end encryption for their messenger. And there's this terrible quote from the request. We're writing a request that Facebook does not proceed with its plan to implement end-to-end encryption without ensuring there is no reduction to user safety. That drives me nuts. Imagine if they said, we're writing to request that Facebook not implement HTTPS without ensuring that there is no reduction to user safety. That would be an unbelievable thing for them to say, but here they are saying it. And then fast forward four years, we still have the same arguments going on where England is saying, but if you allow your users to have control over their own information, society will collapse, everybody will become criminals, we'll never be able to track everything. And another example from earlier this year, apparently in the US, law enforcement sent a YouTube video URL to somebody they were trying to attack and then they went to YouTube and said, tell us everybody who watched that video. And I don't know if YouTube actually answered them but that's the future of law enforcement where you make use of the huge platforms that collaborate with governments and that's it for your privacy. So I'd like to live in a world instead where people, individuals have control over who gets to learn about them. And then the final example that happened a couple of years ago, apparently there was somebody in the US state of Nebraska who wanted an abortion and talked to her mother on Facebook Messenger about whether she should get an abortion and now she's being prosecuted for conspiracy to want an abortion and that's the country that I live in now. So that's another case where privacy, the ability to have conversations of things that should be legal, is something that the world needs to keep. Okay, so I actually only use the word anonymity when I'm talking to researchers. When I'm talking to my parents, I tell them I'm working on a privacy system, because anonymity, I don't know who needs that but privacy is an important value for everybody. And then when I'm talking to Walmart and Google and other companies, I work on communication security because privacy is dead, says the Oracle guy, anonymity is confusing. But of course we need to protect somebody from learning who our suppliers are, what we're investigating on the internet. When Goldman Sachs is thinking about stocks to invest in, of course they don't want their competitors watching their internet traffic to learn what research they're doing. And then from the government side, it's the same tools, but for governments I explain that I work on traffic analysis resistant communication networks. And again, it's the same security properties, but it's about figuring out how to phrase it for those groups. And then there's a fourth category, which we can talk about in this country, but maybe not some countries nearby here on the human rights side, where there are people who can't reach bbc.com and they want to be able to read the news or share news. Okay, so how do you build one of these? The easy answer is you have some centralized system like a VPN and all of the users connect and ask for a web page. And that's great, except what about that central point of failure? So imagine that really in the middle. It always starts off with, we're never going to look at any of your traffic. Don't worry. We promise to keep you safe. Okay, actually, we do look at all of your traffic, but don't worry, we won't write it down. Okay, actually, we do log everything, but don't worry, we won't tell other people. Okay, well, of course, we answer governments, but don't worry, we won't tell other people. And I don't know where the next step after that is. The problem there is you can't tell. It's a centralized system. You don't know what they're going to do with your data. They have your data. They promise not to screw you. That's not good enough. And it's even worse than that. Even if that centralized system is somehow perfectly honest and really does want to keep you safe, it's still one computer somewhere. All the traffic goes in, the same traffic goes out, and if you're good at math, maybe you can match up this data flow coming in matches that data flow going out, so this user did that. So even if the VPN provider is totally honest and perfectly secure for all definitions of that, the fact that it's a centralized service is itself still a danger if there's some sort of network level adversary that gets to watch things. So the goal of Tor is to distribute the trust to decentralize over multiple relays so there's no single point that gets to know this user is going to that destination. So it's no longer about privacy by promise. It's no longer about, I have the data, I promise I will keep you safe. Now it's about privacy by design, where the architecture of the system means there's no place that knows what you're doing on it. So that's the summary of Tor's goals and threat models. In terms of the network over time, we have about 8,000 relays around the world. Here's a graph of the the load on the network and the capacity of the network over time. So we're continuing to grow. The fun part of this is that if you look at the Wikipedia bandwidth graph, it looks like this just five or ten years behind. So we're continuing to go up as Wikipedia did back then. Okay, so how do you actually think about security for Tor? There are two big answers for that. One of them is the diversity of where the relays are around the world. So as we add more rel the relays are around the world. So as we add more relays in different places around the world it becomes harder for a given attacker to be able to see the traffic going into the Tor network and the corresponding traffic coming out. And then the second one that's a bit more hard to quantify is diversity of types of users. So we have a lot of users in Iran. They're not all political dissidents. Most of them are using Tor because Iran blocked Facebook and they can't read their pictures of kittens on the blogs that they want to look at. And that ordinariness is really important for security. If everybody using Tor in Iran were trying to tear down their government right now, then the fact that you're using Tor would itself be dangerous. So we need a whole lot of ordinary people in order to provide protection for all of the higher value users out there also. Okay, so I talked before about the network level privacy, hiding your IP address, where you're coming from, where you're going. But there's a second piece that you have to do in Tor, which is the application level privacy. So we have Tor browser and it fixes a bunch of browser layer things like cookies, what version of JavaScript, what languages you prefer, how many pixels by how many pixels your browser window is, all of these ways that websites can recognize you. Even if Tor is doing its job, they can still say, oh, that's the guy running on Mac OS who prefers Thai, but Korean will be okay. So there are two different pieces to what we need to do for safety. And Tor browser actually tries to keep you safe in a way that there are all these new private browsing modes and so on out there, which are, I mean, they sound great. But DuckDuckGo did a study a few years ago looking at what users actually think private browsing mode does. And most of them misunderstand. So private browsing mode protects against somebody looking at your hard drive after you've been browsing for a while. It browsing mode protects against somebody looking at your hard drive after you've been browsing for a while. It doesn't protect against somebody on the network watching you. It doesn't protect against the website. It doesn't protect against an advertising company tracking everything you do. So a lot of users thought private browsing mode is basically what Tor browser provides. It should actually protect you against advertisers, websites, ISPs, telephone companies, intelligence agencies, and so on. So here's, I found this fun comic explaining the private browsing mode world. So we've got the huge Chrome saying, which website would you like to see? And we've got our sad user saying, I don't want you to know. And then Chrome puts on a sock puppet and says, how about you tell the hand? And then the user's like, okay, great, yes, now you've reassured me about your privacy. So I want something that's more than just a centralized situation where they promise not to screw you, but actually they're still getting all the data. Okay, so transparency is really important for Tor. Of course we're open source. Of course we're free software. Of course we have public design documents and specifications that describe what we tried to build. And of course we are publicly identified. Hi, I'm Roger. I wrote Tor. And the goal of that is to build a community of people who can trust us. It isn't just about put the code on some website and magically people will believe us. It's about growing a community of people around the world who have met us and argued with us and thought about whether we should do things differently and collaborated. And part of the challenge there is I always talk to some security person who says, oh, ha, ha, the privacy people are talking about transparency. That's so stupid. It's not a contradiction. You can get both. Privacy is about choice. Privacy is about being able to choose who learns things about you. So because I have privacy, I can choose to be transparent and say, hi, I'm Roger, this is what I built. Okay, so that was the crash course on Tor. Now let me talk about some lessons learned from the Tor world that might apply to projects you're working on. So, the first one, make allies, make allies even among your adversaries. So, yes, you need to have good uses and you need to publicize those good uses. So this is for the people who were here two talks ago, don't be tornado cash, figure out how to tell the world about your good uses and maybe that means in your case not just having people who agree with you in the law enforcement groups that might be wondering if you should stay legal but also get users in those places so that they understand why your tool is important and needs to keep existing. So we asked people for user stories about Tor a while ago and here's two. I'm a political activist, part of a semi-criminalized minority. In my younger years, I entered the public debate openly only by anonymizing means among which Tor is key, can I get my message out without having police come to check my papers. Tor allows me freedom to publish my message to the world without being personally persecuted for it. So we have a lot of activists and journalists around the world who have this sort of story. Or I'm a doctor in a very political town. You can imagine Washington DC or Brussels. I have patients who work in legislation where billions of dollars are being spent. When I have to do research on diseases and treatments for my senators, of course I'm going to use Tor. Because I know that somebody is going to want to learn about the information, the medical information about the senators that I treat. And the same story also happens on the legal side where, yes, you're supposed to have client attorney privilege, but the reality is that you need to use technical mechanisms to secure your communication. You can't rely on the U.S. government promising not to listen in on your phone call with your lawyer. Over and over we see cases where it turns out they do listen in. And we've also had some other high-profile users over the years. Happy to talk afterwards about the whistleblowing side of Tor. But the bigger picture there is technology isn't neutral. Looking at this in terms of power imbalances, if you already have power, you don't need Tor as much. You've got a military, you've got law enforcement, you've got the laws, you've got other options. Whereas if you're a minority dissident in Myanmar, you don't have other options. Whereas if you're a minority dissident in Myanmar you don't have other options. So technology is inherently political. Think through what the changes are that you want to make in the world and make sure that the architecture and the projects that you're working on will steer towards those changes. Otherwise you'll just accidentally recreate the existing power structures of centralization and government control and things like that. And a couple of other fun, weird stories in that direction. I had a friend who was in the hacker community and he went and joined the FBI. And he understood Tor. So I had a person inside the FBI who was a Tor ally. And he was telling me a story of being in a Department of Justice meeting where they said, you know what we should do today? We should make a law outlawing Tor. And he got to stand up, because he was in that meeting, and say, excuse me, are you trying to make my job impossible? I work for the FBI and I need Tor. So having him in that room meant that they didn't try to undermine my project that day. So we need a lot of people, not just who understand Tor, but who use it and rely on it in each of these cases. Okay, lesson two, how do you build a sustainable community? So in the Tor case, we approach it with organic growth and the fundamental building block that we have is altruism. It's people who have extra resources, bandwidth, computer connections, they're at a university or an ISP or a company, and bandwidth is essentially free for them, so they contribute that back to the Tor network. Or you could imagine the financial incentives world, where the building block is capitalism. You want to bring people in who are trying to maximize the revenue that they get out of participating. And there are some challenges on the capitalism side. Let's say that we built an alternative tour network where everybody's here to maximize their revenue. In that case, shouldn't you minimize your costs? And everybody should go to the same hosting provider in Germany that's cheapest. And that would be a tendency towards re-centralization. Or if you're in it for the money, why don't you sell the user data on the side? Make an extra buck, maximize your cost even more. So it's not just different ways to build a community that end up in the same destination. You get different properties of your network. Scalability might be better on the capitalism side, but attention to user privacy, I think, is better on the altruism, organic growth side. So it depends what you want out of your system. And I want to be really cognizant here. I don't want to show up and be like capitalism is inappropriate in every situation though there is a part of me that does want to say that. But part of the lesson here is think through what you want out of your network and make sure that you're building a community that has the values that are most important for you. And in my case, the thing that's most important is making sure that the users will remain safe against whatever attackers they might worry about. Okay, so that said, there are a bunch of other incentive approaches which can potentially be helpful. So gamification, badges, building community, making things fun like that, leaderboards, and community building of having meetups. Like at various hacker conferences, we have relay operator meetups and a bunch of people who do contribute talk to each other, share notes, meet each other. And it turns out that if somebody tries to get them to take down the relay, they feel like they're part of a community and they feel like they're letting their friends down if they decide to turn off the relay. And another piece of that is there are non-profits all around the world who get together and run tour exit relays in their community. There's one in France, there's one in the Netherlands, there's one in France, there's one in the Netherlands, there's one in Sweden, there are like three in Germany, two in Canada, several in the US. And the goal of that is, first of all, you have a non-profit that's actually running the relays, but also you can put all the donations together and get more economies of scale, better bandwidth, better ISPs and so on. And there are some other options that we don't do now but maybe we should. One of them would be maybe I don't want relay operators who are trying to maximize their revenue but maybe we could still take donations and subsidize their costs. So they're not profiting, it just costs them less to do the thing that I want them to do anyway. So that would be an example that doesn't ruin the incentive structure not profiting, it just costs them less to do the thing that I want them to do anyway. So that would be an example that doesn't ruin the incentive structure but would still be helpful for growing the community. And there are some other examples like maybe if you run a relay you get faster tour which has some problems that I will talk about afterwards. So the bigger picture here, consider your ground truth. So if you're trying to build in the economics world it's called mechanism design, you folks might call it smart contracts, but if you're trying to structure a situation to steer people the way you want to steer them, think about what you can actually verify. How are you going to verify that the operators are doing the thing that you want them to do? So for example if you're trying to incentivize bandwidth, if you're paying people based on how much bandwidth their relay provides in the current tour design relays basically self advertise how much bandwidth they've got and that's fine because they don't get paid for claiming a larger number but if we naively just set up a thing saying, oh, you're providing a big number, here's your big check, then there would be new opportunities for gaming and lying, and we would end up with a weird arms race where the community is lying to us about what they're doing. Or another example, let's say we want to pay people more if they're not all running at the same ISP in Germany. So we want to incentivize based on location around the world. How do you actually know where the relays are? So here's a fun example of somebody in, I think, Sweden set up a relay and he hacked all the GOIP databases in the world and listed himself as being in Antarctica. And he did it because it was funny. Great. I agree. It's funny. Imagine if we paid him more by saying, whoa, that's a really diverse location. The GOIP databases in the world are not robust. They're not secure to gaming. They're just people who are trying to help out on locating things. So that's an example where your ground truth doesn't actually enable you to do the things you want. So another lesson three, be critical infrastructure. Have other projects rely on you in a way that they need you to stick around. So Brave bundles Tor. We have a bunch of private messaging tools and whistleblowing tools and operating systems. New York Times has an Onion address, BBC, Deutsche Welle and so on. And there's a whole ecosystem of pluggable transports for transforming your Tor traffic in a way that makes it harder to censor, that are reused and reusable by VPN companies and so on to let their users go through censoring firewalls. And another fun bizarre example, more than half of the nodes in the Bitcoin network are reachable as Tor onion addresses. So the Bitcoin network relies on the Tor network for their connectivity, reachability, privacy, and so on. And if you know the people in the Bitcoin world who made this choice, please introduce them to me. And not just the Bitcoin world, there's also the Ethereum world. We have been talking a lot with the Funding the Commons people about how to have alternative ways of sustaining the open source community. And there are a bunch of hardware wallets. Shout out to Trezor. And we did an NFT a while ago, shout out to Trezor, and we did an NFT a while ago, shout out to PleaserDAO. And here we are at DEVCON in the cypherpunks track, so there's a lot of overlapping values here already in terms of people wanting privacy, people caring about decentralization, so there's a lot of overlap there. And then the fourth lesson, learn from past wins. So that HTTPS example that I talked about before, just 10 or 15 years ago, HTTPS was not pervasive. I was arguing with Google that they should turn on HTTPS for their websites. And they said, well, we can't allow encryption by default because schools would block us. Schools want to be able to spy on their students and learn what searches they're doing, so we can't allow encryption. And now, 10 years later, it's normal. Of course you would never go to a website without HTTPS. So we won that somehow, and it became pervasive. It became normal around the world. So how do we do that again? So the next phase that we have is these end-to-end messaging, the WhatsApps of the world, the signals of the world, where there are real adversaries trying to make it only criminals would want privacy on the internet. So it's all part of the bigger crypto, cryptography wars of whether people around the world should have control over their own information and they get to decide who gets to learn what they're doing on the internet. And this is especially challenging because US and England and Australia are the ones yelling, encryption is scary, society will collapse. And then when their foreign ministry goes to China and says, you're a dictator, stop doing that, China is like, look, I'm saying the same thing you're saying, you're scared of encryption, I'm scared of encryption. So there's a real challenge where we, this audience, need to somehow teach our governments and our legal people why encryption should be a basic human right. And there's some weird stuff going on there. Apple is on our side for this round. Facebook is on our side for this round. And that's because they know their customers actually want privacy. So there's something we can leverage here. And then a couple of last thoughts before we get to the Q&A, which I think we're ready for. EFF, Electronic Frontier Foundation, is running a campaign to get tour relays running in universities all around the world. And they'll give you an awesome challenge coin if you do it. So if you're connected to a university, please chat with me afterwards. I'd love to get you involved in that. Okay, and then last thoughts as we begin the Q&A. Consider what kind of incentives will lead to the network you want. Make allies, make sure to teach them about the importance of encryption, privacy, freedom around the world. Please join the Tor network as a relay or a bridge or a snowflake. If you don't know what those mean, happy to chat with you afterwards. But please help out in providing privacy to people all around the world. And yes, we're a nonprofit and I'd be happy to get your donations in whatever cryptocurrency you would like to provide. Thank you. Thank you so much for that, Roger. A little reminder that our privacy should be ours to choose. Now to get straight into the questions, what were the most challenging legal obstacles you encountered throughout Tor's history? Have any of them ever caused you to wake up in a cold sweat at night? Yeah. What a great question. So a lot of it, originally, we started out working with some government people, and soon after that, we were funded by EFF, Electronic Frontier Foundation, and they wanted us to exist as a demonstration of a good example of privacy. So if there were a lawsuit happening, like the Tornado Cache 1 now, they could say, look, you can't just make all this stuff illegal because Tor is one of those things and it exists, it already exists, you can't just get rid of it in the world. So I think in terms of the most challenging legal obstacles, I don't want to say that we've had a smooth ride, but we've worked really hard to try to make sure that everybody knows about the good uses of Tor, and I think that's a really important piece of it, which is challenging because there are governments out there trying to scare people about privacy and say only criminals need to have curtains on their walls, so we need to keep fighting on that one. Have any of them caused you to wake up in a cold sweat at night? Probably not the legal obstacles. How do we scale this thing? How do we keep communities going? How do we actually provide good safety, even in the face of governments trying to attack it or some jerk in Russia trying to tear it apart? So technically, more than legally. All right. theoretically more than legally. Is it possible to track a user's Tor activity by running a significant number of Tor nodes? Does this actually happen in practice? Theoretically, yes. If you run enough of the network, then it should be possible to match up the math looking at flows coming in and flows going out. Does it happen in practice? Not that we know of. We actually know a lot of the people who run the relays around the world. They're part of the relay associations that I talked about earlier. But that said, so that's the good news. Probably people are not successfully attacking the Tor network by running a lot of relays. If you've heard rumors like, I heard the FBI runs half the Tor network, that's garbage. We know the people running half the Tor network, they're not FBI. That's the good news. The bad news is the internet is too centralized. There are not very many cables going across the oceans. There are not very many web servers like Cloudflare and Akamai. So everything, there are not very many telephone companies. Most of the internet runs in Northern Europe. So there's too much centralization there in general. And that limits how much privacy we can actually provide on the internet. If you gave me a more decentralized internet, I could build you a stronger, safer Tor. All right. How can Ethereum help Tor? Yes. So one answer is we're a non-profit. We'd love to have your donations. That's the easy answer. Another answer is we need the world to understand why tools like this can be used for good. So you're in the same fight with the Tornado Cash situation, trying to figure out how do I explain the value of these things. And you've got some bad people using it. And similarly, Tor has something like 8 million daily users, including some jerk in Russia trying to do whatever they're doing. So how do we win the next round of crypto wars and help the whole world understand the importance of these things while at the same time Hollywood is making a new story about a new movie about a new movie about some dude in a hoodie and only bad people need privacy. So I think one of the big answers is we're in the same fight for the next crypto wars. All right we are out of time but maybe we can squeeze in this last one. Is it safe to run a Tor exit node? What are the potential security and legal issues? Yes so many people run Tor exit relays all are the potential security and legal issues on that? Yes, so many people run Tor exit relays all around the world and in almost every situation it is legal to do so. There are legal safe harbors that make you in the same category as the telephone companies. So legally it's actually pretty straightforward. You're all set. The challenge is your relationship with your ISP because you run a tour exit relay. Eventually, somebody sends an abuse complaint and your ISP says, oh my God, what's that? And that's your point where you have to communicate with them and have that relationship built so that people, so that they understand what you're doing. So it's not a legal question. It's about finding an ISP that agrees with you that this is important. All right. Thank you so much, Roger. Can we get a round of applause for Roger, please? Yeah. And I will be around. I'll be around to answer your questions for as long as you have them. I think there's nothing happening right here, so I'll do it here. Eventually, we'll go out there, ask your questions as long as you've got them. Thank you.", "eventId": "devcon-7", - "slot_start": 1731389400000, - "slot_end": 1731391200000, + "slot_start": 1731651600000, + "slot_end": 1731653700000, "slot_roomId": "main-stage", - "sources_youtubeId": "n3R4ze2hesk", - "sources_swarmHash": "7b57f594e589cebcc14cb04fcc90c7201ef214a347ba31c146c0fbe984a280ae", - "resources_presentation": "https://docs.google.com/presentation/d/1hyqMQ-ALTG3QKpk5SkiuUcDNN1L0Z_UuyGNml54Xc60" + "resources_presentation": "https://docs.google.com/presentation/d/1kL3YxEdhVaztgX9zv7TsWTOPmhhTZ7zGvjBwWKxc__E", + "resources_slides": "", + "speakers": [ + "roger-dingledine" + ] }, "vector": [ 0, @@ -446907,7 +445654,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -447332,11 +446078,8 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -447680,6 +446423,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -447749,17 +446493,14 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, @@ -447772,6 +446513,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -448005,6 +446747,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -448215,6 +446958,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -448225,8 +446969,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -448238,42 +446980,41 @@ }, { "session": { - "id": "keynote-lessons-learned-from-tor", - "sourceId": "ZHU7UQ", - "title": "Keynote: Lessons learned from Tor", - "description": "I will share lessons learned during Tor's twenty years as free software fighting for privacy and human rights. We'll talk about distributed trust and privacy by design, how to help people understand the good uses of your tech, getting allies in both cypherpunks and government, why transparency and community-building are so essential to trust, and successes from other spaces. It may seem like the crypto wars never really end, but we all have a part to play in saving the world.", + "id": "keynote-make-ethereum-cypherpunk-again-why-we-need-privacy", + "sourceId": "NKMLNG", + "title": "Keynote: Make Ethereum Cypherpunk Again: Why we need privacy", + "description": "The Web3 revolution seeks to address the sins of Web2. However, in doing so, it’s created an even worse outcome for users - users’ data is publicly available and makes them vulnerable to state-level censorship and adverse actions.\r\n\r\nThis talk will address the philosophical as well as practical considerations of privacy in Web3. \r\nPrivacy is an industry-wide issue and sits at the heart of all that is Web3. Understanding why privacy matters involves recognizing that it is not an isolated concept bu", "track": "Cypherpunk & Privacy", "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "expertise": "Beginner", + "audience": "Developer", "featured": true, "doNotRecord": false, "tags": [ - "Anonymity", + "Zk Rollups", "Privacy", - "Sustainability" + "cypherpunk", + "Privacy", + "Zk Rollups" ], "keywords": [ - "Human", - "rights" + "cypherpunk" ], - "duration": 1911, + "duration": 1572, "language": "en", - "sources_swarmHash": "bc74f4b9d585e354aacc1ead7ee0dec2d4b065e411877d110f120117979ff52f", - "sources_youtubeId": "o302oKXbdK8", + "sources_swarmHash": "20a3cf340f6c43d5173a021c482ae5b81d70ebc0be6456fad506132345e86310", + "sources_youtubeId": "Sod3t2JdmOg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673811671b0f83434dc79ca7", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673811671b0f83434dc79ca7.vtt", - "transcript_text": " The microphone is working. The slides seem like they're working. I hope we'll find out soon if they're... I'm going to press the forward button. Okay, I see them over here. That means that they will appear pretty soon. So, hi, I'm Roger Dingledine from the Tor Project. Once upon a time, I started a non-profit called Tor and wrote the original code, so there's a lot to talk about today. I'm going to break this down into two pieces. The first piece is giving you a crash course on Tor and things to think about, and then the second piece is advice and lessons for the Ethereum and cryptocurrency communities from what we've learned over the past many years. So Tor is a lot of different things. It's a free software project, open source project that you learned about from the last speaker. It's also a bunch of different projects, each trying to provide privacy in various ways. It's a non-profit as I mentioned and the fun part about Tor is the global community of people all around the world who use it and rely on it and help to make it stronger and better. So many people use Tor in the form of Tor Browser which is a Firefox based browser that fixes a bunch of application level issues. So there's the underlying program Tor but the interface that most people see is Tor browser. Okay, so what am I talking about here in terms of privacy? I talk about privacy and security, but really the phrase is communications metadata. It's the idea that I want to browse the web and I want somebody watching my internet connection to not learn what destination I'm going to. And I want the website to be able to not learn where I'm coming from. So how many people here recognize Creepy NSA Dude from back in the Snowden times? So he helped to run the intelligence system for the US government and he has this scary, terrible quote, we kill people based on metadata. And the important thing here is nobody in the attacker world these days is trying to break the encryption anymore. The encryption works well. Instead, let's build a social graph of who's talking to who, when they're talking, how much they're talking, and then we'll look for who's talking to who, when they're talking, how much they're talking, and then we'll look for who's in the middle of that graph, and then we break into their house and steal their laptop or whatever attack we have to do from there. And there are a bunch of other current events that are related to all of this. Here's a news story from, I guess, five years ago now. The U.S. Attorney General asks Facebook to stop doing end-to-end encryption for their messenger. And there's this terrible quote from the request. We're writing a request that Facebook does not proceed with its plan to implement end-to-end encryption without ensuring there is no reduction to user safety. That drives me nuts. Imagine if they said, we're writing to request that Facebook not implement HTTPS without ensuring that there is no reduction to user safety. That would be an unbelievable thing for them to say, but here they are saying it. And then fast forward four years, we still have the same arguments going on where England is saying, but if you allow your users to have control over their own information, society will collapse, everybody will become criminals, we'll never be able to track everything. And another example from earlier this year, apparently in the US, law enforcement sent a YouTube video URL to somebody they were trying to attack and then they went to YouTube and said, tell us everybody who watched that video. And I don't know if YouTube actually answered them but that's the future of law enforcement where you make use of the huge platforms that collaborate with governments and that's it for your privacy. So I'd like to live in a world instead where people, individuals have control over who gets to learn about them. And then the final example that happened a couple of years ago, apparently there was somebody in the US state of Nebraska who wanted an abortion and talked to her mother on Facebook Messenger about whether she should get an abortion and now she's being prosecuted for conspiracy to want an abortion and that's the country that I live in now. So that's another case where privacy, the ability to have conversations of things that should be legal, is something that the world needs to keep. Okay, so I actually only use the word anonymity when I'm talking to researchers. When I'm talking to my parents, I tell them I'm working on a privacy system, because anonymity, I don't know who needs that but privacy is an important value for everybody. And then when I'm talking to Walmart and Google and other companies, I work on communication security because privacy is dead, says the Oracle guy, anonymity is confusing. But of course we need to protect somebody from learning who our suppliers are, what we're investigating on the internet. When Goldman Sachs is thinking about stocks to invest in, of course they don't want their competitors watching their internet traffic to learn what research they're doing. And then from the government side, it's the same tools, but for governments I explain that I work on traffic analysis resistant communication networks. And again, it's the same security properties, but it's about figuring out how to phrase it for those groups. And then there's a fourth category, which we can talk about in this country, but maybe not some countries nearby here on the human rights side, where there are people who can't reach bbc.com and they want to be able to read the news or share news. Okay, so how do you build one of these? The easy answer is you have some centralized system like a VPN and all of the users connect and ask for a web page. And that's great, except what about that central point of failure? So imagine that really in the middle. It always starts off with, we're never going to look at any of your traffic. Don't worry. We promise to keep you safe. Okay, actually, we do look at all of your traffic, but don't worry, we won't write it down. Okay, actually, we do log everything, but don't worry, we won't tell other people. Okay, well, of course, we answer governments, but don't worry, we won't tell other people. And I don't know where the next step after that is. The problem there is you can't tell. It's a centralized system. You don't know what they're going to do with your data. They have your data. They promise not to screw you. That's not good enough. And it's even worse than that. Even if that centralized system is somehow perfectly honest and really does want to keep you safe, it's still one computer somewhere. All the traffic goes in, the same traffic goes out, and if you're good at math, maybe you can match up this data flow coming in matches that data flow going out, so this user did that. So even if the VPN provider is totally honest and perfectly secure for all definitions of that, the fact that it's a centralized service is itself still a danger if there's some sort of network level adversary that gets to watch things. So the goal of Tor is to distribute the trust to decentralize over multiple relays so there's no single point that gets to know this user is going to that destination. So it's no longer about privacy by promise. It's no longer about, I have the data, I promise I will keep you safe. Now it's about privacy by design, where the architecture of the system means there's no place that knows what you're doing on it. So that's the summary of Tor's goals and threat models. In terms of the network over time, we have about 8,000 relays around the world. Here's a graph of the the load on the network and the capacity of the network over time. So we're continuing to grow. The fun part of this is that if you look at the Wikipedia bandwidth graph, it looks like this just five or ten years behind. So we're continuing to go up as Wikipedia did back then. Okay, so how do you actually think about security for Tor? There are two big answers for that. One of them is the diversity of where the relays are around the world. So as we add more rel the relays are around the world. So as we add more relays in different places around the world it becomes harder for a given attacker to be able to see the traffic going into the Tor network and the corresponding traffic coming out. And then the second one that's a bit more hard to quantify is diversity of types of users. So we have a lot of users in Iran. They're not all political dissidents. Most of them are using Tor because Iran blocked Facebook and they can't read their pictures of kittens on the blogs that they want to look at. And that ordinariness is really important for security. If everybody using Tor in Iran were trying to tear down their government right now, then the fact that you're using Tor would itself be dangerous. So we need a whole lot of ordinary people in order to provide protection for all of the higher value users out there also. Okay, so I talked before about the network level privacy, hiding your IP address, where you're coming from, where you're going. But there's a second piece that you have to do in Tor, which is the application level privacy. So we have Tor browser and it fixes a bunch of browser layer things like cookies, what version of JavaScript, what languages you prefer, how many pixels by how many pixels your browser window is, all of these ways that websites can recognize you. Even if Tor is doing its job, they can still say, oh, that's the guy running on Mac OS who prefers Thai, but Korean will be okay. So there are two different pieces to what we need to do for safety. And Tor browser actually tries to keep you safe in a way that there are all these new private browsing modes and so on out there, which are, I mean, they sound great. But DuckDuckGo did a study a few years ago looking at what users actually think private browsing mode does. And most of them misunderstand. So private browsing mode protects against somebody looking at your hard drive after you've been browsing for a while. It browsing mode protects against somebody looking at your hard drive after you've been browsing for a while. It doesn't protect against somebody on the network watching you. It doesn't protect against the website. It doesn't protect against an advertising company tracking everything you do. So a lot of users thought private browsing mode is basically what Tor browser provides. It should actually protect you against advertisers, websites, ISPs, telephone companies, intelligence agencies, and so on. So here's, I found this fun comic explaining the private browsing mode world. So we've got the huge Chrome saying, which website would you like to see? And we've got our sad user saying, I don't want you to know. And then Chrome puts on a sock puppet and says, how about you tell the hand? And then the user's like, okay, great, yes, now you've reassured me about your privacy. So I want something that's more than just a centralized situation where they promise not to screw you, but actually they're still getting all the data. Okay, so transparency is really important for Tor. Of course we're open source. Of course we're free software. Of course we have public design documents and specifications that describe what we tried to build. And of course we are publicly identified. Hi, I'm Roger. I wrote Tor. And the goal of that is to build a community of people who can trust us. It isn't just about put the code on some website and magically people will believe us. It's about growing a community of people around the world who have met us and argued with us and thought about whether we should do things differently and collaborated. And part of the challenge there is I always talk to some security person who says, oh, ha, ha, the privacy people are talking about transparency. That's so stupid. It's not a contradiction. You can get both. Privacy is about choice. Privacy is about being able to choose who learns things about you. So because I have privacy, I can choose to be transparent and say, hi, I'm Roger, this is what I built. Okay, so that was the crash course on Tor. Now let me talk about some lessons learned from the Tor world that might apply to projects you're working on. So, the first one, make allies, make allies even among your adversaries. So, yes, you need to have good uses and you need to publicize those good uses. So this is for the people who were here two talks ago, don't be tornado cash, figure out how to tell the world about your good uses and maybe that means in your case not just having people who agree with you in the law enforcement groups that might be wondering if you should stay legal but also get users in those places so that they understand why your tool is important and needs to keep existing. So we asked people for user stories about Tor a while ago and here's two. I'm a political activist, part of a semi-criminalized minority. In my younger years, I entered the public debate openly only by anonymizing means among which Tor is key, can I get my message out without having police come to check my papers. Tor allows me freedom to publish my message to the world without being personally persecuted for it. So we have a lot of activists and journalists around the world who have this sort of story. Or I'm a doctor in a very political town. You can imagine Washington DC or Brussels. I have patients who work in legislation where billions of dollars are being spent. When I have to do research on diseases and treatments for my senators, of course I'm going to use Tor. Because I know that somebody is going to want to learn about the information, the medical information about the senators that I treat. And the same story also happens on the legal side where, yes, you're supposed to have client attorney privilege, but the reality is that you need to use technical mechanisms to secure your communication. You can't rely on the U.S. government promising not to listen in on your phone call with your lawyer. Over and over we see cases where it turns out they do listen in. And we've also had some other high-profile users over the years. Happy to talk afterwards about the whistleblowing side of Tor. But the bigger picture there is technology isn't neutral. Looking at this in terms of power imbalances, if you already have power, you don't need Tor as much. You've got a military, you've got law enforcement, you've got the laws, you've got other options. Whereas if you're a minority dissident in Myanmar, you don't have other options. Whereas if you're a minority dissident in Myanmar you don't have other options. So technology is inherently political. Think through what the changes are that you want to make in the world and make sure that the architecture and the projects that you're working on will steer towards those changes. Otherwise you'll just accidentally recreate the existing power structures of centralization and government control and things like that. And a couple of other fun, weird stories in that direction. I had a friend who was in the hacker community and he went and joined the FBI. And he understood Tor. So I had a person inside the FBI who was a Tor ally. And he was telling me a story of being in a Department of Justice meeting where they said, you know what we should do today? We should make a law outlawing Tor. And he got to stand up, because he was in that meeting, and say, excuse me, are you trying to make my job impossible? I work for the FBI and I need Tor. So having him in that room meant that they didn't try to undermine my project that day. So we need a lot of people, not just who understand Tor, but who use it and rely on it in each of these cases. Okay, lesson two, how do you build a sustainable community? So in the Tor case, we approach it with organic growth and the fundamental building block that we have is altruism. It's people who have extra resources, bandwidth, computer connections, they're at a university or an ISP or a company, and bandwidth is essentially free for them, so they contribute that back to the Tor network. Or you could imagine the financial incentives world, where the building block is capitalism. You want to bring people in who are trying to maximize the revenue that they get out of participating. And there are some challenges on the capitalism side. Let's say that we built an alternative tour network where everybody's here to maximize their revenue. In that case, shouldn't you minimize your costs? And everybody should go to the same hosting provider in Germany that's cheapest. And that would be a tendency towards re-centralization. Or if you're in it for the money, why don't you sell the user data on the side? Make an extra buck, maximize your cost even more. So it's not just different ways to build a community that end up in the same destination. You get different properties of your network. Scalability might be better on the capitalism side, but attention to user privacy, I think, is better on the altruism, organic growth side. So it depends what you want out of your system. And I want to be really cognizant here. I don't want to show up and be like capitalism is inappropriate in every situation though there is a part of me that does want to say that. But part of the lesson here is think through what you want out of your network and make sure that you're building a community that has the values that are most important for you. And in my case, the thing that's most important is making sure that the users will remain safe against whatever attackers they might worry about. Okay, so that said, there are a bunch of other incentive approaches which can potentially be helpful. So gamification, badges, building community, making things fun like that, leaderboards, and community building of having meetups. Like at various hacker conferences, we have relay operator meetups and a bunch of people who do contribute talk to each other, share notes, meet each other. And it turns out that if somebody tries to get them to take down the relay, they feel like they're part of a community and they feel like they're letting their friends down if they decide to turn off the relay. And another piece of that is there are non-profits all around the world who get together and run tour exit relays in their community. There's one in France, there's one in the Netherlands, there's one in France, there's one in the Netherlands, there's one in Sweden, there are like three in Germany, two in Canada, several in the US. And the goal of that is, first of all, you have a non-profit that's actually running the relays, but also you can put all the donations together and get more economies of scale, better bandwidth, better ISPs and so on. And there are some other options that we don't do now but maybe we should. One of them would be maybe I don't want relay operators who are trying to maximize their revenue but maybe we could still take donations and subsidize their costs. So they're not profiting, it just costs them less to do the thing that I want them to do anyway. So that would be an example that doesn't ruin the incentive structure not profiting, it just costs them less to do the thing that I want them to do anyway. So that would be an example that doesn't ruin the incentive structure but would still be helpful for growing the community. And there are some other examples like maybe if you run a relay you get faster tour which has some problems that I will talk about afterwards. So the bigger picture here, consider your ground truth. So if you're trying to build in the economics world it's called mechanism design, you folks might call it smart contracts, but if you're trying to structure a situation to steer people the way you want to steer them, think about what you can actually verify. How are you going to verify that the operators are doing the thing that you want them to do? So for example if you're trying to incentivize bandwidth, if you're paying people based on how much bandwidth their relay provides in the current tour design relays basically self advertise how much bandwidth they've got and that's fine because they don't get paid for claiming a larger number but if we naively just set up a thing saying, oh, you're providing a big number, here's your big check, then there would be new opportunities for gaming and lying, and we would end up with a weird arms race where the community is lying to us about what they're doing. Or another example, let's say we want to pay people more if they're not all running at the same ISP in Germany. So we want to incentivize based on location around the world. How do you actually know where the relays are? So here's a fun example of somebody in, I think, Sweden set up a relay and he hacked all the GOIP databases in the world and listed himself as being in Antarctica. And he did it because it was funny. Great. I agree. It's funny. Imagine if we paid him more by saying, whoa, that's a really diverse location. The GOIP databases in the world are not robust. They're not secure to gaming. They're just people who are trying to help out on locating things. So that's an example where your ground truth doesn't actually enable you to do the things you want. So another lesson three, be critical infrastructure. Have other projects rely on you in a way that they need you to stick around. So Brave bundles Tor. We have a bunch of private messaging tools and whistleblowing tools and operating systems. New York Times has an Onion address, BBC, Deutsche Welle and so on. And there's a whole ecosystem of pluggable transports for transforming your Tor traffic in a way that makes it harder to censor, that are reused and reusable by VPN companies and so on to let their users go through censoring firewalls. And another fun bizarre example, more than half of the nodes in the Bitcoin network are reachable as Tor onion addresses. So the Bitcoin network relies on the Tor network for their connectivity, reachability, privacy, and so on. And if you know the people in the Bitcoin world who made this choice, please introduce them to me. And not just the Bitcoin world, there's also the Ethereum world. We have been talking a lot with the Funding the Commons people about how to have alternative ways of sustaining the open source community. And there are a bunch of hardware wallets. Shout out to Trezor. And we did an NFT a while ago, shout out to Trezor, and we did an NFT a while ago, shout out to PleaserDAO. And here we are at DEVCON in the cypherpunks track, so there's a lot of overlapping values here already in terms of people wanting privacy, people caring about decentralization, so there's a lot of overlap there. And then the fourth lesson, learn from past wins. So that HTTPS example that I talked about before, just 10 or 15 years ago, HTTPS was not pervasive. I was arguing with Google that they should turn on HTTPS for their websites. And they said, well, we can't allow encryption by default because schools would block us. Schools want to be able to spy on their students and learn what searches they're doing, so we can't allow encryption. And now, 10 years later, it's normal. Of course you would never go to a website without HTTPS. So we won that somehow, and it became pervasive. It became normal around the world. So how do we do that again? So the next phase that we have is these end-to-end messaging, the WhatsApps of the world, the signals of the world, where there are real adversaries trying to make it only criminals would want privacy on the internet. So it's all part of the bigger crypto, cryptography wars of whether people around the world should have control over their own information and they get to decide who gets to learn what they're doing on the internet. And this is especially challenging because US and England and Australia are the ones yelling, encryption is scary, society will collapse. And then when their foreign ministry goes to China and says, you're a dictator, stop doing that, China is like, look, I'm saying the same thing you're saying, you're scared of encryption, I'm scared of encryption. So there's a real challenge where we, this audience, need to somehow teach our governments and our legal people why encryption should be a basic human right. And there's some weird stuff going on there. Apple is on our side for this round. Facebook is on our side for this round. And that's because they know their customers actually want privacy. So there's something we can leverage here. And then a couple of last thoughts before we get to the Q&A, which I think we're ready for. EFF, Electronic Frontier Foundation, is running a campaign to get tour relays running in universities all around the world. And they'll give you an awesome challenge coin if you do it. So if you're connected to a university, please chat with me afterwards. I'd love to get you involved in that. Okay, and then last thoughts as we begin the Q&A. Consider what kind of incentives will lead to the network you want. Make allies, make sure to teach them about the importance of encryption, privacy, freedom around the world. Please join the Tor network as a relay or a bridge or a snowflake. If you don't know what those mean, happy to chat with you afterwards. But please help out in providing privacy to people all around the world. And yes, we're a nonprofit and I'd be happy to get your donations in whatever cryptocurrency you would like to provide. Thank you. Thank you so much for that, Roger. A little reminder that our privacy should be ours to choose. Now to get straight into the questions, what were the most challenging legal obstacles you encountered throughout Tor's history? Have any of them ever caused you to wake up in a cold sweat at night? Yeah. What a great question. So a lot of it, originally, we started out working with some government people, and soon after that, we were funded by EFF, Electronic Frontier Foundation, and they wanted us to exist as a demonstration of a good example of privacy. So if there were a lawsuit happening, like the Tornado Cache 1 now, they could say, look, you can't just make all this stuff illegal because Tor is one of those things and it exists, it already exists, you can't just get rid of it in the world. So I think in terms of the most challenging legal obstacles, I don't want to say that we've had a smooth ride, but we've worked really hard to try to make sure that everybody knows about the good uses of Tor, and I think that's a really important piece of it, which is challenging because there are governments out there trying to scare people about privacy and say only criminals need to have curtains on their walls, so we need to keep fighting on that one. Have any of them caused you to wake up in a cold sweat at night? Probably not the legal obstacles. How do we scale this thing? How do we keep communities going? How do we actually provide good safety, even in the face of governments trying to attack it or some jerk in Russia trying to tear it apart? So technically, more than legally. All right. theoretically more than legally. Is it possible to track a user's Tor activity by running a significant number of Tor nodes? Does this actually happen in practice? Theoretically, yes. If you run enough of the network, then it should be possible to match up the math looking at flows coming in and flows going out. Does it happen in practice? Not that we know of. We actually know a lot of the people who run the relays around the world. They're part of the relay associations that I talked about earlier. But that said, so that's the good news. Probably people are not successfully attacking the Tor network by running a lot of relays. If you've heard rumors like, I heard the FBI runs half the Tor network, that's garbage. We know the people running half the Tor network, they're not FBI. That's the good news. The bad news is the internet is too centralized. There are not very many cables going across the oceans. There are not very many web servers like Cloudflare and Akamai. So everything, there are not very many telephone companies. Most of the internet runs in Northern Europe. So there's too much centralization there in general. And that limits how much privacy we can actually provide on the internet. If you gave me a more decentralized internet, I could build you a stronger, safer Tor. All right. How can Ethereum help Tor? Yes. So one answer is we're a non-profit. We'd love to have your donations. That's the easy answer. Another answer is we need the world to understand why tools like this can be used for good. So you're in the same fight with the Tornado Cash situation, trying to figure out how do I explain the value of these things. And you've got some bad people using it. And similarly, Tor has something like 8 million daily users, including some jerk in Russia trying to do whatever they're doing. So how do we win the next round of crypto wars and help the whole world understand the importance of these things while at the same time Hollywood is making a new story about a new movie about a new movie about some dude in a hoodie and only bad people need privacy. So I think one of the big answers is we're in the same fight for the next crypto wars. All right we are out of time but maybe we can squeeze in this last one. Is it safe to run a Tor exit node? What are the potential security and legal issues? Yes so many people run Tor exit relays all are the potential security and legal issues on that? Yes, so many people run Tor exit relays all around the world and in almost every situation it is legal to do so. There are legal safe harbors that make you in the same category as the telephone companies. So legally it's actually pretty straightforward. You're all set. The challenge is your relationship with your ISP because you run a tour exit relay. Eventually, somebody sends an abuse complaint and your ISP says, oh my God, what's that? And that's your point where you have to communicate with them and have that relationship built so that people, so that they understand what you're doing. So it's not a legal question. It's about finding an ISP that agrees with you that this is important. All right. Thank you so much, Roger. Can we get a round of applause for Roger, please? Yeah. And I will be around. I'll be around to answer your questions for as long as you have them. I think there's nothing happening right here, so I'll do it here. Eventually, we'll go out there, ask your questions as long as you've got them. Thank you.", + "sources_streamethId": "673581319dbb7a90e145ffc9", "eventId": "devcon-7", - "slot_start": 1731651600000, - "slot_end": 1731653700000, + "slot_start": 1731556800000, + "slot_end": 1731558600000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1kL3YxEdhVaztgX9zv7TsWTOPmhhTZ7zGvjBwWKxc__E", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1ReFBU_bsCAkpa9iAfYEJf0LER_SIpmsSyIlr2UIGBVw", + "resources_slides": "https://drive.google.com/file/d/13AKv6FlPxWv_JfmZ5AAsdwdcscSQMrbr/view", "speakers": [ - "roger-dingledine" + "zac-williamson" ] }, "vector": [ @@ -449054,7 +447795,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -449095,6 +447835,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -449138,13 +447882,37 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -449561,36 +448329,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -449606,63 +448344,57 @@ 0, 0, 0, - 0, - 0, 0 ] }, { "session": { - "id": "keynote-make-ethereum-cypherpunk-again-why-we-need-privacy", - "sourceId": "NKMLNG", - "title": "Keynote: Make Ethereum Cypherpunk Again: Why we need privacy", - "description": "The Web3 revolution seeks to address the sins of Web2. However, in doing so, it’s created an even worse outcome for users - users’ data is publicly available and makes them vulnerable to state-level censorship and adverse actions.\r\n\r\nThis talk will address the philosophical as well as practical considerations of privacy in Web3. \r\nPrivacy is an industry-wide issue and sits at the heart of all that is Web3. Understanding why privacy matters involves recognizing that it is not an isolated concept bu", - "track": "Cypherpunk & Privacy", + "id": "keynote-making-sense-of-stablecoins", + "sourceId": "TDHR79", + "title": "Keynote: Making Sense of Stablecoins", + "description": "Everyone is talking about stablecoins now! In this talk I'll share what I learned about Tether on Tron in addition to stablecoins more broadly. Why are so many USDT transactions on Tron? Why did Bridge get acquired for $1.1B? What do L2s have to do with stablecoins? Are stablecoins a threat to Ethereum or an accelerant?", + "track": "Real World Ethereum", "type": "Talk", "expertise": "Beginner", - "audience": "Developer", + "audience": "Product", "featured": true, "doNotRecord": false, - "tags": [ - "Zk Rollups", - "Privacy", - "cypherpunk", - "Privacy", - "Zk Rollups" - ], "keywords": [ - "cypherpunk" + "Stablecoins", + "Layer 2", + "RWA" + ], + "tags": [ + "Ethereum for Good", + "Payment", + "RWA" ], - "duration": 1572, "language": "en", - "sources_swarmHash": "20a3cf340f6c43d5173a021c482ae5b81d70ebc0be6456fad506132345e86310", - "sources_youtubeId": "Sod3t2JdmOg", + "sources_swarmHash": "4b5b1851c7f19fc2d903b32c4df01cd2e7e01bb4c81cddf2419d59e845b353e1", + "sources_youtubeId": "_TDh8kCiwXw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673581319dbb7a90e145ffc9", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "liam-horne" + ], "eventId": "devcon-7", - "slot_start": 1731556800000, - "slot_end": 1731558600000, + "slot_start": 1731470400000, + "slot_end": 1731472200000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1ReFBU_bsCAkpa9iAfYEJf0LER_SIpmsSyIlr2UIGBVw", - "resources_slides": null, - "speakers": [ - "zac-williamson" - ] + "resources_presentation": "https://docs.google.com/presentation/d/1246DZFHYl7mJ0u_o2WRFQUGA1oxze-pQVaEpjC7wjPI", + "resources_slides": "https://drive.google.com/file/d/1B8eIA8flhTpnkq-MXcWHxaLMgHS5IoWX/view" }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -449985,6 +448717,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -450083,7 +448816,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -450471,7 +449203,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -450518,7 +449249,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -450532,6 +449262,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -450546,6 +449277,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -450558,6 +449290,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -450784,7 +449517,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -450971,9 +449703,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -450987,40 +449719,44 @@ }, { "session": { - "id": "keynote-making-sense-of-stablecoins", - "sourceId": "TDHR79", - "title": "Keynote: Making Sense of Stablecoins", - "description": "Everyone is talking about stablecoins now! In this talk I'll share what I learned about Tether on Tron in addition to stablecoins more broadly. Why are so many USDT transactions on Tron? Why did Bridge get acquired for $1.1B? What do L2s have to do with stablecoins? Are stablecoins a threat to Ethereum or an accelerant?", - "track": "Real World Ethereum", + "id": "keynote-nomic-foundations-vision-for-ethereums-tooling-ecosystem", + "sourceId": "VQKXUH", + "title": "Keynote: Nomic Foundation’s vision for Ethereum’s tooling ecosystem", + "description": "Nomic Foundation is the nonprofit behind Hardhat. Nomic’s co-founder and CTO will walk you through Nomic’s long-term vision for a community-driven developer tooling ecosystem for Ethereum.", + "track": "Developer Experience", "type": "Talk", - "expertise": "Beginner", - "audience": "Product", + "expertise": "Intermediate", + "audience": "Developer", "featured": true, "doNotRecord": false, - "keywords": [ - "Stablecoins", - "Layer 2", - "RWA" - ], "tags": [ - "Ethereum for Good", - "Payment", - "RWA" + "Developer Infrastructure", + "DevEx", + "Tooling" ], - "language": "en", - "speakers": [ - "liam-horne" + "keywords": [ + "ecosystem" ], + "duration": 1055, + "language": "en", + "sources_swarmHash": "ecaa24cd9ab18856aeb56a49704f9ed634d71821a77c890de54f9cc929267bd9", + "sources_youtubeId": "w1ObXCY1n-o", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6736c0839dbb7a90e1bcdc3d", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736c0c59dbb7a90e1cc98e5.vtt", + "transcript_text": " Leonardo Silva Reviewer:\" Elisabeth Buffard ...region in Latin America. So, two years ago, we had DEFCON 6 in Bogota. It was an event that brought thousands of minds to the city to get together and start improving the ways that we did things before DEF CON. So in DEF CON, it was a fantastic gathering in one venue, a big venue that we had in the city there in Bogota. It was a place where we were able to meet in between and start working together. A lot of things started at that moment in the whole region with leaders from every country of the region. And it sparkled, it started something that was starting joining together more community in the different cities of the region and having community events that brought a lot of people inside the different countries. There were a lot of new steps on how to bring blockchain education to the region through different methods all over the place, all over the different countries, starting using new programs to involve developers in the Ethereum ecosystem. And it started growing through alliances with universities and networks of universities. This was something that wasn't happening a lot before the DEF CON, but afterwards it started to pop up in different places all over the region, and it happened more to hackathons and these alliances with universities. And it just came all over the place. So right now, between all the different communities around Ethereum in the region, we have onboarded over 2,000 developers, mainly Web2 developers into the Web3 ecosystem and the Ethereum ecosystem itself to different programs and it's people who before DEF CON they had no idea of what was going on in the industry, but now we have this new sense of consciousness of what are the possibilities and mainly taking the message to the people in the region that it's not an industry only to be a user, but you can be a builder. And it's something that it's been made to attending three main points, that is community, education, and development. Two, community, education, and development. Two, alliances, events, and production. And it is growing and growing. There's more and more alliances happening all over the Latin region, between the communities and universities, all the different communities, and universities, all the different communities, social places, and in general, the actors of the ecosystem, like putting everything together to make things happen. And right now, there has been great improvement in the ways to do and to bring the education to the people through online programs and putting together developers and students from all the region together, developers and non-developers who are starting to build in this program. Have we had some impact? I'm just going to mention a few cases of the students that we have had that, I don't know, 18 months ago they had no idea about the industry, and now there's the case of Neil, who at 17 became a Deaf Connect 2023 scholar, and he was able to travel to Turkey and share his experience as a new developer with developers from all over the world and also after this time Christian and Juan Diego who are here today they after taking this these courses and joining to these processes and giving time to it they have been able to find jobs in the web3 industry and go to... They used to work at a call center before that, and now they are part of the industry, and they were able to travel here to DEF CON. So what needs to go next? We need regional coordination. We need to nurture an environment of trust between community builders and community users, so we can take all these to the next steps after the education and create spaces in common between the different actors of the whole region. So we need to push to encourage frequent blockchain interaction within the different actors of the whole region. So we need to push to encourage frequent blockchain interaction within the different communities and find places where we can share hackathons, opportunities to work open source and bring this to a next level. Before the end, I just want to mention between into this great impact that has been in the region there's have been also very difficult times he was Emerson David Silva he was part of the Ethiopian Colombia leadership and he was assassinated by the war this terrible war we have in Colombia so we lost him this year in March 27 and I asked myself why can't I do Emerson to preserve and honor your legacy he's present every day in my thoughts and the community and the leadership dots we want to make his legacy to go further so we have we need to support Emerson's family so if if you scan this QR code we have, we need to support Emerson's family. So if you scan this QR code, we have a Givet project. So we are trying to get funds to help his family and to keep on helping on the operations on education in Colombia and Arauca. So thank you very much. It's been a pleasure. And thank you very much. It's been a pleasure, and thank you. Thank you. We're going to do one quick question of Q&A. You mentioned you onboarded 2,000 developers to the ecosystem. How did you estimate this number? This number is estimated between data that we have from the different organizational processes that has been taken into the region. So we have more than 15 organizations onboarding new developers. So this number comes from what has been done in the last 18 months. And what do you think are the best next steps for the students that just learned about Ethereum? The next step is the most important thing because we need them to take what they learn to the different educational processes and apply that into hackathons, working in open source, working together, meeting other people. And this is something that we are pushing right now. Thank you very much. Thank you. That concludes the session, everyone, on the ripple effect of DevCon.", "eventId": "devcon-7", - "slot_start": 1731470400000, - "slot_end": 1731472200000, + "slot_start": 1731567600000, + "slot_end": 1731569400000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1246DZFHYl7mJ0u_o2WRFQUGA1oxze-pQVaEpjC7wjPI" + "resources_presentation": "https://docs.google.com/presentation/d/1kH4iHwoLEeXM3eu44ZJv-USuH2XZbecC-mTN78JbaFE", + "resources_slides": "https://drive.google.com/file/d/1PfbexOSxKAevYkHs9Zco-j-XI2nR5Onw/view", + "speakers": [ + "patricio-palladino" + ] }, "vector": [ - 0, - 0, - 0, 0, 0, 0, @@ -451348,11 +450084,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -451408,6 +450139,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -451794,6 +450526,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -451802,6 +450535,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -451825,6 +450559,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -451895,7 +450630,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -451910,7 +450644,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -451923,7 +450656,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -452349,53 +451081,52 @@ 0, 0, 0, + 0, + 0, 0 ] }, { "session": { - "id": "keynote-nomic-foundations-vision-for-ethereums-tooling-ecosystem", - "sourceId": "VQKXUH", - "title": "Keynote: Nomic Foundation’s vision for Ethereum’s tooling ecosystem", - "description": "Nomic Foundation is the nonprofit behind Hardhat. Nomic’s co-founder and CTO will walk you through Nomic’s long-term vision for a community-driven developer tooling ecosystem for Ethereum.", - "track": "Developer Experience", + "id": "keynote-programmable-cryptography-and-ethereum", + "sourceId": "MQ8T8Z", + "title": "Keynote: Programmable Cryptography and Ethereum", + "description": "Programmable Cryptography is a \"second generation\" of cryptographic primitives - primitives that allow arbitrary programs to be executed \"inside of\" or \"on top of\" cryptographic objects. Programmable cryptography provides three key affordances that complement and amplify the affordances of Ethereum--verifiability, confidentiality, and non-interactivity. We'll discuss how these technologies can reshape the Internet over the next 50 years.", + "track": "Applied Cryptography", "type": "Talk", - "expertise": "Intermediate", - "audience": "Developer", + "expertise": "Beginner", + "audience": "Engineering", "featured": true, "doNotRecord": false, "tags": [ - "Developer Infrastructure", - "DevEx", - "Tooling" + "Cryptography", + "Use cases of cryptography" ], "keywords": [ - "ecosystem" + "Programmable", + "Cryptography" ], - "duration": 1055, + "duration": 1517, "language": "en", - "sources_swarmHash": "ecaa24cd9ab18856aeb56a49704f9ed634d71821a77c890de54f9cc929267bd9", - "sources_youtubeId": "w1ObXCY1n-o", + "sources_swarmHash": "e13e6bd7be8fffa7336eb9daa88cf857ddb07345077867d9a45fa4fda0586ac9", + "sources_youtubeId": "UWPg_AmWtlw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736c0839dbb7a90e1bcdc3d", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736c0c59dbb7a90e1cc98e5.vtt", - "transcript_text": " Leonardo Silva Reviewer:\" Elisabeth Buffard ...region in Latin America. So, two years ago, we had DEFCON 6 in Bogota. It was an event that brought thousands of minds to the city to get together and start improving the ways that we did things before DEF CON. So in DEF CON, it was a fantastic gathering in one venue, a big venue that we had in the city there in Bogota. It was a place where we were able to meet in between and start working together. A lot of things started at that moment in the whole region with leaders from every country of the region. And it sparkled, it started something that was starting joining together more community in the different cities of the region and having community events that brought a lot of people inside the different countries. There were a lot of new steps on how to bring blockchain education to the region through different methods all over the place, all over the different countries, starting using new programs to involve developers in the Ethereum ecosystem. And it started growing through alliances with universities and networks of universities. This was something that wasn't happening a lot before the DEF CON, but afterwards it started to pop up in different places all over the region, and it happened more to hackathons and these alliances with universities. And it just came all over the place. So right now, between all the different communities around Ethereum in the region, we have onboarded over 2,000 developers, mainly Web2 developers into the Web3 ecosystem and the Ethereum ecosystem itself to different programs and it's people who before DEF CON they had no idea of what was going on in the industry, but now we have this new sense of consciousness of what are the possibilities and mainly taking the message to the people in the region that it's not an industry only to be a user, but you can be a builder. And it's something that it's been made to attending three main points, that is community, education, and development. Two, community, education, and development. Two, alliances, events, and production. And it is growing and growing. There's more and more alliances happening all over the Latin region, between the communities and universities, all the different communities, and universities, all the different communities, social places, and in general, the actors of the ecosystem, like putting everything together to make things happen. And right now, there has been great improvement in the ways to do and to bring the education to the people through online programs and putting together developers and students from all the region together, developers and non-developers who are starting to build in this program. Have we had some impact? I'm just going to mention a few cases of the students that we have had that, I don't know, 18 months ago they had no idea about the industry, and now there's the case of Neil, who at 17 became a Deaf Connect 2023 scholar, and he was able to travel to Turkey and share his experience as a new developer with developers from all over the world and also after this time Christian and Juan Diego who are here today they after taking this these courses and joining to these processes and giving time to it they have been able to find jobs in the web3 industry and go to... They used to work at a call center before that, and now they are part of the industry, and they were able to travel here to DEF CON. So what needs to go next? We need regional coordination. We need to nurture an environment of trust between community builders and community users, so we can take all these to the next steps after the education and create spaces in common between the different actors of the whole region. So we need to push to encourage frequent blockchain interaction within the different actors of the whole region. So we need to push to encourage frequent blockchain interaction within the different communities and find places where we can share hackathons, opportunities to work open source and bring this to a next level. Before the end, I just want to mention between into this great impact that has been in the region there's have been also very difficult times he was Emerson David Silva he was part of the Ethiopian Colombia leadership and he was assassinated by the war this terrible war we have in Colombia so we lost him this year in March 27 and I asked myself why can't I do Emerson to preserve and honor your legacy he's present every day in my thoughts and the community and the leadership dots we want to make his legacy to go further so we have we need to support Emerson's family so if if you scan this QR code we have, we need to support Emerson's family. So if you scan this QR code, we have a Givet project. So we are trying to get funds to help his family and to keep on helping on the operations on education in Colombia and Arauca. So thank you very much. It's been a pleasure. And thank you very much. It's been a pleasure, and thank you. Thank you. We're going to do one quick question of Q&A. You mentioned you onboarded 2,000 developers to the ecosystem. How did you estimate this number? This number is estimated between data that we have from the different organizational processes that has been taken into the region. So we have more than 15 organizations onboarding new developers. So this number comes from what has been done in the last 18 months. And what do you think are the best next steps for the students that just learned about Ethereum? The next step is the most important thing because we need them to take what they learn to the different educational processes and apply that into hackathons, working in open source, working together, meeting other people. And this is something that we are pushing right now. Thank you very much. Thank you. That concludes the session, everyone, on the ripple effect of DevCon.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731567600000, - "slot_end": 1731569400000, + "slot_start": 1731398400000, + "slot_end": 1731400200000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1kH4iHwoLEeXM3eu44ZJv-USuH2XZbecC-mTN78JbaFE", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1xCnHIn3N6_CE75tyV-Jo2eMU07wZIBXFedFxwrk7xf4", + "resources_slides": "https://drive.google.com/file/d/1FsDYkbfv0MstTDS-NlxxkvtPyiR-Fq2V/view", "speakers": [ - "patricio-palladino" + "gubsheep" ] }, "vector": [ 0, 0, 0, - 6, 0, 0, 0, @@ -452403,6 +451134,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -452575,6 +451307,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -452775,7 +451508,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -453153,6 +451885,10 @@ 0, 0, 0, + 6, + 0, + 0, + 0, 0, 0, 0, @@ -453173,68 +451909,61 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -453708,13 +452437,11 @@ 0, 2, 0, + 2, 0, 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -453728,39 +452455,47 @@ }, { "session": { - "id": "keynote-programmable-cryptography-and-ethereum", - "sourceId": "MQ8T8Z", - "title": "Keynote: Programmable Cryptography and Ethereum", - "description": "Programmable Cryptography is a \"second generation\" of cryptographic primitives - primitives that allow arbitrary programs to be executed \"inside of\" or \"on top of\" cryptographic objects. Programmable cryptography provides three key affordances that complement and amplify the affordances of Ethereum--verifiability, confidentiality, and non-interactivity. We'll discuss how these technologies can reshape the Internet over the next 50 years.", - "track": "Applied Cryptography", + "id": "keynote-the-next-10-years-of-web3-in-africa", + "sourceId": "GSNQLC", + "title": "Keynote: The next 10 years of Web3 in Africa", + "description": "When Africa reaches 2 billion people, what are the profound ways Web3 shapes its economy? Historically, millions of Africans repurposed and stitched together crypto tools for real-world utility. Now, a new generation of builders is developing tailored solutions. In the next 10 years, what can we expect to be built that redefines trust and finance in Africa? And what needs to be true for more than half of African economies to be powered by decentralized technologies?", + "track": "Real World Ethereum", "type": "Talk", - "expertise": "Beginner", - "audience": "Engineering", + "expertise": "Intermediate", + "audience": "Product", "featured": true, "doNotRecord": false, "tags": [ - "Cryptography", - "Use cases of cryptography" + "Ethereum Roadmap", + "Use Cases", + "macro/micro economics", + "adoption", + "africa", + "mass", + "Ethereum Roadmap", + "macro/micro economics", + "Use Cases" ], "keywords": [ - "Programmable", - "Cryptography" + "Africa", + "Mass adoption", + "" ], - "duration": 1517, + "duration": 1531, "language": "en", - "sources_swarmHash": "e13e6bd7be8fffa7336eb9daa88cf857ddb07345077867d9a45fa4fda0586ac9", - "sources_youtubeId": "UWPg_AmWtlw", + "sources_swarmHash": "2415d9c9f111fd9ab297194c0cc7b8de5a938accb641bfdb907c2ca01e0958d3", + "sources_youtubeId": "DRAs6Yh7g4I", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6733f64b3a168eb53542528d", "eventId": "devcon-7", - "slot_start": 1731398400000, - "slot_end": 1731400200000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1xCnHIn3N6_CE75tyV-Jo2eMU07wZIBXFedFxwrk7xf4", - "resources_slides": null, + "slot_start": 1731407400000, + "slot_end": 1731409200000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1IAQR41JAk7FPn24OGhprL4uyoP17OlBMG8dv6oyQ_n8", + "resources_slides": "https://drive.google.com/file/d/1SIHPEYzOTtYz9BIoMkgVFbQFlxH_My1Q/view", "speakers": [ - "gubsheep" + "yoseph-ayele" ] }, "vector": [ @@ -453770,10 +452505,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -453945,7 +452676,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -454154,6 +452884,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -454528,7 +453259,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -454543,7 +453273,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -454597,6 +453326,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -454675,6 +453405,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -454707,6 +453439,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -454727,6 +453460,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -454907,6 +453641,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -455073,6 +453808,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -455082,12 +453818,6 @@ 0, 2, 0, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -455100,47 +453830,45 @@ }, { "session": { - "id": "keynote-the-next-10-years-of-web3-in-africa", - "sourceId": "GSNQLC", - "title": "Keynote: The next 10 years of Web3 in Africa", - "description": "When Africa reaches 2 billion people, what are the profound ways Web3 shapes its economy? Historically, millions of Africans repurposed and stitched together crypto tools for real-world utility. Now, a new generation of builders is developing tailored solutions. In the next 10 years, what can we expect to be built that redefines trust and finance in Africa? And what needs to be true for more than half of African economies to be powered by decentralized technologies?", - "track": "Real World Ethereum", + "id": "keynote-the-real-state-of-l2s", + "sourceId": "HCXUU8", + "title": "Keynote: The REAL state of L2s", + "description": "The evolution of Layer 2 solutions has been pivotal in scaling blockchain technologies. This talk, led by L2BEAT founder Bartek Kiepuszewski, delves into the current landscape, recent advancements, and future potential of L2 ecosystems. It will try to address some myths and current challenges of the space. Some important changes to L2BEAT risk framework will also be announced.", + "track": "Layer 2", "type": "Talk", "expertise": "Intermediate", - "audience": "Product", + "audience": "Community", "featured": true, "doNotRecord": false, "tags": [ - "Ethereum Roadmap", - "Use Cases", - "macro/micro economics", - "adoption", - "africa", - "mass", - "Ethereum Roadmap", - "macro/micro economics", - "Use Cases" + "Architecture", + "Layer 2s", + "Best Practices", + "myths", + "reality", + "Architecture", + "Best Practices", + "Layer 2s" ], "keywords": [ - "Africa", - "Mass adoption", - "" + "L2Risks", + "Myths&Reality" ], - "duration": 1531, + "duration": 1530, "language": "en", - "sources_swarmHash": "2415d9c9f111fd9ab297194c0cc7b8de5a938accb641bfdb907c2ca01e0958d3", - "sources_youtubeId": "DRAs6Yh7g4I", + "sources_swarmHash": "ce47bad7c88511a2fe90b963746e3839e63587ad5b80c0becc0f6fb546dba05c", + "sources_youtubeId": "Khur38hvCs0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6733f64b3a168eb53542528d", + "sources_streamethId": "673437a69dbb7a90e129cb7d", "eventId": "devcon-7", - "slot_start": 1731407400000, - "slot_end": 1731409200000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1IAQR41JAk7FPn24OGhprL4uyoP17OlBMG8dv6oyQ_n8", - "resources_slides": null, + "slot_start": 1731472200000, + "slot_end": 1731474000000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1NxPv65UP8MJMX2f8NWmiAL-GETRNifiDtkZS5evBvV0", + "resources_slides": "https://drive.google.com/file/d/1DcThD-G-5SlT4Tx2WDp3IENxGIGFf-LL/view", "speakers": [ - "yoseph-ayele" + "bartek-kiepuszewski" ] }, "vector": [ @@ -455150,10 +453878,8 @@ 0, 0, 0, - 6, - 0, - 0, 0, + 6, 0, 0, 0, @@ -455533,7 +454259,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -455579,6 +454304,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -455925,6 +454651,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -455955,9 +454682,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -455974,7 +454703,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -456053,8 +454781,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -456087,7 +454813,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -456108,7 +454833,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -456144,6 +454868,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -456456,8 +455181,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -456466,8 +455189,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -456480,45 +455203,39 @@ }, { "session": { - "id": "keynote-the-real-state-of-l2s", - "sourceId": "HCXUU8", - "title": "Keynote: The REAL state of L2s", - "description": "The evolution of Layer 2 solutions has been pivotal in scaling blockchain technologies. This talk, led by L2BEAT founder Bartek Kiepuszewski, delves into the current landscape, recent advancements, and future potential of L2 ecosystems. It will try to address some myths and current challenges of the space. Some important changes to L2BEAT risk framework will also be announced.", - "track": "Layer 2", + "id": "keynote-the-universal-cryptographic-adapter", + "sourceId": "R9X9ZG", + "title": "Keynote: The Universal Cryptographic Adapter", + "description": "The \"secret\" third affordance of Zero-Knowledge proof after 1) Privacy and 2) Succinctness is Interoperability. ZK enables us to continuously refactor data, aggregate it from different sources, and transforming it without loosing its integrity.\r\nStarting with the Zupass project, and now with the broader adoption of the POD and GPC format, 0xPARC has been exploring using ZK for data sovereignty and creating more interoperable data ecosystem. We will cover our learnings and progress in this talk.", + "track": "Applied Cryptography", "type": "Talk", - "expertise": "Intermediate", - "audience": "Community", + "expertise": "Expert", + "audience": "Engineering", "featured": true, "doNotRecord": false, "tags": [ - "Architecture", - "Layer 2s", - "Best Practices", - "myths", - "reality", - "Architecture", - "Best Practices", - "Layer 2s" + "Not financial", + "Permissionless", + "ZKP" ], "keywords": [ - "L2Risks", - "Myths&Reality" + "None" ], - "duration": 1530, + "duration": 1186, "language": "en", - "sources_swarmHash": "ce47bad7c88511a2fe90b963746e3839e63587ad5b80c0becc0f6fb546dba05c", - "sources_youtubeId": "Khur38hvCs0", + "sources_swarmHash": "0fed334b4f3cf12a05223fe8bfbc21fcefcf7147b55c4d9a16be9d98a8f7200c", + "sources_youtubeId": "Qob-AsX0mxY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673437a69dbb7a90e129cb7d", + "sources_streamethId": "67345eef9dbb7a90e15752b1", "eventId": "devcon-7", - "slot_start": 1731472200000, - "slot_end": 1731474000000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1NxPv65UP8MJMX2f8NWmiAL-GETRNifiDtkZS5evBvV0", - "resources_slides": null, + "slot_start": 1731483000000, + "slot_end": 1731484800000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1DIuykDDTe3d5hT9NzR3bnBAg1TQAoLS7n9JoGbIFyAg", + "resources_slides": "https://drive.google.com/file/d/1xh1CIkzROhhehPBlIRdudtMvCmaaz7SP/view", "speakers": [ - "bartek-kiepuszewski" + "justin-glibert" ] }, "vector": [ @@ -456529,6 +455246,9 @@ 0, 0, 0, + 0, + 0, + 0, 6, 0, 0, @@ -456701,6 +455421,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -456955,7 +455676,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -457304,7 +456024,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -457335,18 +456054,15 @@ 0, 0, 0, - 2, - 0, 0, 0, - 2, - 0, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -457508,6 +456224,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -457521,7 +456238,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -457538,6 +456254,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -457669,8 +456386,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -457837,6 +456552,7 @@ 0, 0, 2, + 2, 0, 0, 0, @@ -457845,10 +456561,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -457858,48 +456570,47 @@ }, { "session": { - "id": "keynote-the-universal-cryptographic-adapter", - "sourceId": "R9X9ZG", - "title": "Keynote: The Universal Cryptographic Adapter", - "description": "The \"secret\" third affordance of Zero-Knowledge proof after 1) Privacy and 2) Succinctness is Interoperability. ZK enables us to continuously refactor data, aggregate it from different sources, and transforming it without loosing its integrity.\r\nStarting with the Zupass project, and now with the broader adoption of the POD and GPC format, 0xPARC has been exploring using ZK for data sovereignty and creating more interoperable data ecosystem. We will cover our learnings and progress in this talk.", - "track": "Applied Cryptography", + "id": "keynote-title-redacted", + "sourceId": "8GH8TR", + "title": "Keynote: [title redacted]", + "description": "[description redacted]", + "track": "Core Protocol", "type": "Talk", - "expertise": "Expert", - "audience": "Engineering", + "expertise": "Intermediate", + "audience": "Community", "featured": true, "doNotRecord": false, "tags": [ - "Not financial", - "Permissionless", - "ZKP" + "Consensus", + "Ethereum Roadmap", + "cryptoeconomy", + "Consensus", + "Core Protocol", + "Ethereum Roadmap" ], "keywords": [ - "None" + "beacon chain", + "research", + "cryptoeconomics" ], - "duration": 1186, + "duration": 1582, "language": "en", - "sources_swarmHash": "0fed334b4f3cf12a05223fe8bfbc21fcefcf7147b55c4d9a16be9d98a8f7200c", - "sources_youtubeId": "Qob-AsX0mxY", + "sources_swarmHash": "93e32548a38d598d71fdd21d2c49f3012ba3a510cc1214362a4ebbda8529763e", + "sources_youtubeId": "lRqnFrqpq4k", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67345eef9dbb7a90e15752b1", + "sources_streamethId": "67338ac53a168eb53526cd0d", "eventId": "devcon-7", - "slot_start": 1731483000000, - "slot_end": 1731484800000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1DIuykDDTe3d5hT9NzR3bnBAg1TQAoLS7n9JoGbIFyAg", - "resources_slides": null, + "slot_start": 1731405600000, + "slot_end": 1731407400000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1hcsmjIHu5W9-usVg_e3DGrH4QnmLER-OPOZ_0ccXjKU", + "resources_slides": "https://drive.google.com/file/d/1qTsFVcfzJoIbybFV-_c6WhA77ExxnGNH/view", "speakers": [ - "justin-glibert" + "justin-drake" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -458076,7 +456787,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -458334,6 +457044,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -458653,6 +457364,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -458666,6 +457378,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -458720,7 +457433,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -458839,6 +457551,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -458882,7 +457595,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -458913,7 +457625,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -459044,6 +457755,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -459208,16 +457920,16 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, - 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -459230,44 +457942,45 @@ }, { "session": { - "id": "keynote-title-redacted", - "sourceId": "8GH8TR", - "title": "Keynote: [title redacted]", - "description": "[description redacted]", - "track": "Core Protocol", + "id": "keynote-unifying-ethereum-through-intents-and-erc-7683", + "sourceId": "WHYZCD", + "title": "Keynote: Unifying Ethereum Through Intents and ERC-7683", + "description": "Ethereum has scaled with a diverse ecosystem of L2s—but this created a new challenge: how can this fragmented landscape of potentially millions of rollups feel like a **unified Ethereum**? In this talk, I’ll discuss how intent-based architectures—and new standards like ERC-7683—can help unify Ethereum while maintaining the benefits of Ethereum’s rollup centric architecture.", + "track": "Layer 2", "type": "Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Product", "featured": true, "doNotRecord": false, "tags": [ - "Consensus", - "Ethereum Roadmap", - "cryptoeconomy", - "Consensus", - "Core Protocol", - "Ethereum Roadmap" + "Cross-L2", + "UI/UX", + "Intents", + "interoperability", + "erc-7683", + "Cross-L2", + "Intents", + "UI/UX" ], "keywords": [ - "beacon chain", - "research", - "cryptoeconomics" + "ERC-7683", + "Interoperability" ], - "duration": 1582, + "duration": 1543, "language": "en", - "sources_swarmHash": "93e32548a38d598d71fdd21d2c49f3012ba3a510cc1214362a4ebbda8529763e", - "sources_youtubeId": "lRqnFrqpq4k", + "sources_swarmHash": "2191e709cbfc256b9d47ddae48814a876c2d304b09cbc7c61b9fe25875504fda", + "sources_youtubeId": "jjBxfIsTrLE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67338ac53a168eb53526cd0d", + "sources_streamethId": "67345cf19dbb7a90e14fb5ef", "eventId": "devcon-7", - "slot_start": 1731405600000, - "slot_end": 1731407400000, + "slot_start": 1731483000000, + "slot_end": 1731484800000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1hcsmjIHu5W9-usVg_e3DGrH4QnmLER-OPOZ_0ccXjKU", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1HFUKCdHq2CnGEM-2BvyaHipzeUf2aeP32TKRHPxKnWY", + "resources_slides": "https://drive.google.com/file/d/1GcTGXXeQJC-xSRme_K5zxzlB7te95zzO/view", "speakers": [ - "justin-drake" + "hart-lambur" ] }, "vector": [ @@ -459275,10 +457988,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -460027,10 +458740,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -460041,10 +458750,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -460082,9 +458787,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -460198,6 +458905,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -460214,7 +458922,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -460225,6 +458932,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -460412,6 +459120,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -460419,7 +459128,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -460593,7 +459301,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -460602,50 +459309,45 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "keynote-unifying-ethereum-through-intents-and-erc-7683", - "sourceId": "WHYZCD", - "title": "Keynote: Unifying Ethereum Through Intents and ERC-7683", - "description": "Ethereum has scaled with a diverse ecosystem of L2s—but this created a new challenge: how can this fragmented landscape of potentially millions of rollups feel like a **unified Ethereum**? In this talk, I’ll discuss how intent-based architectures—and new standards like ERC-7683—can help unify Ethereum while maintaining the benefits of Ethereum’s rollup centric architecture.", - "track": "Layer 2", + "id": "keynote-world-politics-world-building", + "sourceId": "ERQKUX", + "title": "Keynote: World Politics, World Building", + "description": "World politics has changed. Geopolitics is no longer simply a contest to control territory: in this age of advanced technology, it has become a contest to create the territory. Great powers seek to build a world for other states to inhabit, while keeping the ability to change the rules or the state of the world when necessary. At a moment when the old concepts no longer work, this book aims to introduce a radically new theory of world politics and technology. The end goal: god mode", + "track": "Real World Ethereum", "type": "Talk", - "expertise": "Intermediate", - "audience": "Product", + "expertise": "Beginner", + "audience": "Academic", "featured": true, "doNotRecord": false, - "tags": [ - "Cross-L2", - "UI/UX", - "Intents", - "interoperability", - "erc-7683", - "Cross-L2", - "Intents", - "UI/UX" - ], + "tags": [], "keywords": [ - "ERC-7683", - "Interoperability" + "World Building", + "Technology", + "Geopolitics" ], - "duration": 1543, + "duration": 1540, "language": "en", - "sources_swarmHash": "2191e709cbfc256b9d47ddae48814a876c2d304b09cbc7c61b9fe25875504fda", - "sources_youtubeId": "jjBxfIsTrLE", + "sources_swarmHash": "814ca0945d9b0b5cc40d580f6819e1a7b896f35b60c8c32954f4e6a92ca7e40c", + "sources_youtubeId": "uz0GN4Jcywk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67345cf19dbb7a90e14fb5ef", + "sources_streamethId": "673701f81b0f83434dc99e4d", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673701f81b0f83434dc99e4d.vtt", + "transcript_text": " . Starting speaking. Thank you for being here. I know at DEF CON you have many, many options. I'm glad you chose to be here. Two years ago in Bogota, I spoke about money as virtual and as necessarily virtual. And today I want to take that a little bit farther and talk about geopolitics as virtual. Let's see how that works. It's the topic of my new book that is coming out in February, but this is actually the first time that I test these ideas in public. Here's a nice little video by Calder Moore, an artist that I recommend you check on Twitter or Instagram, and I hope he doesn't mind that I promote him a little bit here. This is really the idea behind my book, that the world as we know it is always in the process of being built from scratch, from nothing. And the process is naturally contested. Great powers tend to build on top of the work of the building done by other great powers. You know that geopolitics as a word was invented at the beginning of the 20th century, and it was meant to mean the fight or the struggle to control territory. Over the next hundred years, the word lost a lot of its meaning. Why? Because we suddenly started to realize that physical territory might not be that important. There were things that were more important. Communication networks, the economy, energy systems, communication systems, currency systems. And so we moved to something that people like to call international relations. But then something quite odd happened, and I think in the last 10 years. We realized that even if the world is artificial, even if it is built, that doesn't remove competition. Geopolitics moves from the control of physical territory to the control of artificial territories. In fact, I would argue that geopolitics becomes more intense and more existential, because if you are building an artificial world, others have to inhabit that world. Others are in a way captured in the world you built. This is a diagram from my previous book that really set up the problem but didn't quite solve it. We have states, self-contained entities of some kind, that then open up and start developing very intense relations between themselves and so create a kind of space of interaction between different states but then a question arises obviously if there is this space of interaction well who builds it who regulates it who creates the rules for this space of interaction? Is it the states? Well, the states are part of it. Is there something above the states? What is that something above the states? Now, many people would say that's nature. That's the physical world and states compete in the physical world. And I left the question at that point in my previous book, but let's see what my current answer is. There's a sentence, kind of a motto, a proverb, very common in the tech world, particularly in Silicon Valley, truth seekers take feedback from nature. And you could see a certain theory of geopolitics where the feedback would come from nature. Is nature the background for world politics? Well, you could say that the states that do better in state competition are the states that succeed in mastering nature, in controlling nature, in operating in nature. We all know this from engineering. The companies that do better at building aircraft are the companies that understand the laws of physics and understand the processes of engineering. And there was something about this. This view is still popular. You could say, for example, the West has risen to global dominance because he was better at mastering and controlling nature through processes of science, engineering, discovery, and so on. There's even a certain theory of the Cold War. You say the West came out on top because in the end, it was more successful at these processes, either of mastering nature or competition in the marketplace. But you see, in all these cases, there is a background for state competition, and that background is, in a way, natural. But now, imagine if a country becomes so powerful that it acquires the ability to create a virtual world. And this virtual world is so much more attractive than the physical world that other states, other countries eventually migrate to it. What then determines state competition? Well, the interesting thing is that at this point, the world is no longer natural. The world has been built by a powerful state actor, by, I would call it, a superpower. In practice, this means that the world might become an American world or a Chinese world, which are expressions that we hear a lot. And I would suggest that we take those expressions as literally as possible. In fact, the world can become Chinese. The world can become American. Why is that the case? Well, in a virtual world, essentially made up of communication networks, essentially made up of systems that are purely artificial and virtual or tending towards becoming virtual, they are necessarily built by one state actor and they acquire the characteristics that that state actor has impressed on them. What has changed is that we no longer believe in a neutral playing field and the reason we no longer believe in a neutral playing field is that we believe the game environment is built by the players. This is the point where it's quite interesting to start talking about game environments or a world game. It's not the natural world, it's not the world of nature, it's a game that has been built. We live after nature and that must necessarily change the terms of geopolitical rivalry. When your opponent is building a fully artificial or technological world that could redefine your own reality, then geopolitics is not merely existential, not merely a question of survival or disappearance. It becomes ontological. The superpowers are competing to build reality, to determine reality. And others that are not as successful as that will live in a reality created by others. This is the premise or the gambit that I'm trying to explore. In fact, at this point, you start to understand what geopolitics is all about. It's about the building of these virtual worlds. And suddenly there are two levels. There are the levels of those who build the world and those that live or inhabit in that world. The level of those that are playing the video game and The level of those that are playing the video game and the level of those that are programming the video game. And this, in my view, is the nature of geopolitical power, this break between two separate levels. Superpower is a system administrator. Now, I'm quite excited to present these ideas here because I think this kind of language would sound very strange to my usual colleagues in geopolitics but it will sound a lot more familiar to you I'm interested in the reactions to that both today and feel free to to send me a message on on Twitter or or by email you can crack down on offenders and pursue your aims by manipulating this the system itself. I would say this approach characterizes the Western response to the war in Ukraine. We didn't go to war. Western democracies adopted a set of targeted economic tools that were designed to reduce the Russian threat to the existing system. In this kind of cybernetic model that I'm talking about, you could even compare our reaction to an antivirus software or even the agents in the matrix. What did particularly the United States do? It didn't go to war with Russia, but it changed the game environment for Russia in very detrimental ways. And the most obvious way it changed the game environment was to transform Ukraine into something quite different from what it was at the beginning, quite different from what it was on February 2022. And you know how the agents in Matrix occupy the physical bodies of people on the street. If you are a global system administrator, if you have control over the system panel of global politics, you are able to introduce sudden and rather dramatic changes in the game environment. Suddenly, this player, Ukraine, is no longer the same player that it was at the beginning. You can transform the game environment for other players that are not operating at the same level that you are operating. It's a way to transform the ground world that lower level users face on the battlefield. There is the system level, there is the ground world, and the system administrator can change the reality that the lower level users face. So in one sense, geopolitics is a struggle not to control territory, but to create the territory. Think of it as a clash between two versions of the world. Imagine a simulated landscape. Go back to the little video at the beginning. Two or more computer programmers are fighting to redesign what appears on the monitor. This is, I think, evident in the case of the Ukraine war. It's perhaps even more evident in the case of the Ukraine war. It's perhaps even more evident in the Cold War between the United States and China. What you see is not a direct clash, and I very much doubt we will see a direct clash. What you see is an attempt to penetrate deeper and deeper into reality as we experience it and try to take control over it. There is intense competition between the United States and China to redesign the communication networks of the future, and these will be communication networks that will apply to people but to objects. The world will come alive with 5G, 6G, 7G communication networks that replace the physical world by increasingly a virtual world. Who will build this world? And then, of course, energy as well. We enter a new energy system. Who will build this energy system? Will it be the United States? Will it be China? And what form will this energy system take? We already know well enough that it won't be purely an energy system, but this has happened in history many times. The second energy revolution at the beginning end of the 19th century, beginning of the 20th century, it wasn't purely about energy. It was a kind of a complex where electricity and the modern factory formed a single complex. Henry Ford started his career working for Edison, and many people speculate that he was able to come up with the idea of the assembly line because he had seen how electricity worked as a constant flow, and he adopted the logic of electricity to the modern factory. The modern factory together with electricity created a single complex that was dominated by the United States. Now, we already know enough about the third industrial revolution. The first was, of course, coal. Even then, there was a complex, coal and the steam engine. The third industrial revolution will also be a complex, and it's already pretty clear that it will be a complex of AI together with an energy source. There seems to be a difference about what the fundamental energy source will be, whether it will be nuclear or solar. In my book, I take a clear choice for solar, in part based on costs, but not only on costs. But we'll see what the final answer is. In the United States, clearly there's a certain preference for a complex form of nuclear power and AI. I see the geopolitical competition as this kind of struggle between two programmers to see who is able to redesign the world. Now an interesting question that is raised here is, does the world have to be interesting question that is raised here is does the world have to be designed by a single player or can we have something like modularity, composability, I think terms that will make sense to you where parts of the system may be designed by different administrators, by different builders and this is perhaps a way that one could find balance in world politics that China the United States and other superpowers will be able to divide the task of building these virtual worlds and we could even perhaps have a kind of a system of percentages where the world system would be built 80% by the United States and 15% by China. Some rules would be predominantly Chinese, but others would be American. In a computer program, this works because there's a principle of modularity. And one suspects that these principles could be applied to world politics as well. What I'm arguing essentially is that just as new technologies like nuclear weapons raise the destructive potential of direct conflict, there was another avenue open for power. You didn't have to win a nuclear war against a determined opponent. You could build an artificial world that others would be forced to inhabit. Call it a form of indirect government or indirect power. Perhaps your opponent will even assume that these rules are natural. Go back to the beginning. Perhaps from his point of view, these rules are not really your rules. They are just the way the world works. That is the highest possible form of power. When you design the world in such a way, there are the people take that world as natural rather than constructed. You have moved one level up in the great game. Your opponent is playing a video game. You are coding it. This is the highest aspiration of geopolitical power to build a world that others have to inhabit. Now, something directly addressed to you. Builders tend to think that this idea of a world game is more than a metaphor. I think you have the intuition that you're building something much bigger than just a gadget or an app. It's particularly clear in the case of Ethereum, where many times it seems to me your aspiration is to build an artificial world. With principles of decentralization that go back to my comments about composability and modularity. Builders are building a global platform that eventually will set the terms for global power competition. But many times when we talk about technology, we fail to ask whether technology exists as a kind of background for our actions or whether something else is the background. I am arguing very strongly that technology is the background and is the ultimate background. Sometimes we assume technology is a set of gadgets. The idea that it could become our environment, that it could become our only environment, is still, I think, for many people, too bold. We already talk about smart cities, which it seems to suggest technology rather than the physical city is the environment but that is still not enough we're moving in that direction but still hesitantly sorry just just tell me the sign language is confusing I'll share of course then I'm getting in front of the people I think if you're you mean crypto are even a step farther than smart cities because they already contain the idea of an artificial world, of world building, of an autonomous world that slowly, gradually, in ways that are not entirely clear, is supposed to replace or subsume to be built over the physical world as we know it. Now, the problem is, in this human-made world that I'm describing, there is no external authority. The engineering power has set the rules in advance and enjoys root access to those rules. In my book, I'm very attracted by this idea of a computer programming language is more helpful to understand geopolitics than any other kind of language I know. And one should wonder why that is the case. Obviously, the answer is that we increasingly live in virtual programmable worlds. Other actors may take the external environment as natural or inescapable. They have no way to access the most important levers and switches of power. The world, in this case, becomes American or Chinese. But in a Chinese world, the outcomes are naturally predetermined to favor China. China has God mode. And conversely, in an American world. The great game, an expression many times used about geopolitics, is a game, but it's a meta game. It's a game whose purpose is to create the rules of the lower-level game. To conclude, I reserve the term superpower for those states that are engaged in a battle to shape the global system everyone else is competing under those rules for the superpowers or the system administrators the power to create the game environment the game world has two main advantages which have been quite obvious during the Ukraine war. It allows the system administrators, the superpowers to set their preferred rules or the rules that best fit their capacities. If you look at the global system today, you know, have no illusions. These rules have been set by the West, have been set by the United States, and they are not neutral. There are rules that favor the capacities that the United States has, and in a way, of course, predetermine the outcomes. You should have no illusions about that, but in a different system, that would be different. And more dramatically, and less obvious perhaps, although it's become very obvious in the last two years, it allows the superpowers to change the rules or the game state when exceptional circumstances demanded in computer programming language God mode is the ability to access the control panel settings of a system the best example of this is when the United States in Europe decided to freeze Russia's reserves Russia's central bank reserves they were of course not physical money they were just an entry in a computer in the Federal Reserve or the European Central Bank. They could be frozen or even deleted with just the stroke of a button and that's what the United States and Europe did two or three days after the invasion. How would you understand this? You know political science struggle to understand what happened but in computer programming language it's very easy. You access the control panels of the system and you change the game environment. At least I would suggest this is what's happening. I'm sure you'd be able to have an even more sophisticated understanding of this. Other countries, what can they do? And I'll conclude with this. They can play by the existing rules, not worry about shaping the rules. You even have countries that are so good at playing by the rules that sometimes they seem to be better at playing by the rules than the countries that shaped and created the rules, which is a funny thing. I think Switzerland and Singapore are countries that are better at playing by the existing rules than the United States is. They can specialize in a certain element or elements of the global system and attempt to shape the rules in that limited area. Russia, I think, has done this for energy, particularly before the invasion, and the European Union has done this to some extent regarding trade. So I'm not suggesting that these systems are built entirely by one player. Again, we go back to the idea of modularity. Or finally, they can align with one of the superpowers and just go along with those plans for world building. The United Kingdom, I would argue, is just entirely aligned with the United States when it comes to the project of world building. Well, remarkably, I finished exactly on time. Questions? Thank you so much for this interesting presentation in fact we have already received a number of questions so I start with the first one so why is it a big deal to be first to create a virtual world isn't it feasible that state with more resources improves on a world that more, more, sorry, more innovative state created and creates its own version? Yes, it's always, the game is never fully determined. We see with China that there's an attempt to, in a way, create a world that is more virtual than the existing one, and therefore more powerful and capable of replacing the existing world. So I'm not saying that the game is finished, but there is a lot of power in being there first. After all, those who want to replace the existing world by another one still have to operate under the existing rules to a considerable extent. I think China is a country that understands this dynamic very well because you play by the existing rules up to a point where you have enough resources to slowly try to change them and change them from the inside. Thank you. Then we have, yes, thanks to the audience for lots of questions coming in. So the next one being, do you believe something beyond the nation state will emerge online, for example, a network state. Yeah, but you know what I'm arguing here is that we kind of already have a network state. I'm arguing that contrary to what we might think, we already live in a purely virtual world made up of connections. And this is built by either one dominant player or two dominant players so I'm I'm still unconvinced that we you know what Balaji talks about I think we already living it it's not entirely in the future then we have another question is there anything that cannot be virtualized and will always remain outside of the control of the game master? I wanted the book to be very ambitious. So in the book, I argue there is nothing. And the effort of the book, why it took more than a year to write, is that sometimes it gets quite difficult to argue, particularly when it comes to energy. But I try to argue that solar energy in particular is a virtual form of energy. It has no supplies, has no waste. It's pure flow, no component parts. Pure flow from the photon to the chip that operates an AI system. So you'll have to read that there to see if I was successful. But my gambit is that, yes, everything can be virtualized. So the next one is a spicy one. USA or China, who will win? everything can be virtualized. So the next one is a spicy one. USA or China, who will win? You know, the great thing I think about my framework is that it's a game. So the outcome is open and depends on the plays that the players make, right? You know, I say that right at the beginning that there's no way to know the outcome in advance. You can know the structures of the game, that's what I try to do, particularly this metagame that I talked about, but for the time being, there's no way to know who's going to win in the end. Can a superpower be decentralized? What I think can be decentralized is the world game. So I see no better solution to the problem of war and conflict than this idea that the rules of the game can be built by different players. I don't think it can be fully decentralized in the sense of, let us say, something like proof of work, because that doesn't exist in geopolitics, but a kind of a proof of stake where the global system is so Decentralized that no power has full control over it and that perhaps is something we could we could aim for We have another question asking who's the referee in the world game can Ethereum or web3 take this role? No, I don't see any referee. I see just a permanent struggle between different programmers. The good thing, perhaps, is that this is a world subject to rules, right? I never argue in my book that this is the jungle. States are not competing directly. They are competing subject to rules, and they are competing to change the rules. But there is a certain stability given by the rules. So if you want to say the referee is competing to change the rules but there is a certain stability given by the rules so if you want to say the referee as the rules yes but it's a referee that is simultaneously being controlled and shaped by the players and with that we unfortunately already have to come to the final question do you see any room for independent communities to remain so should an actor like the u complete a fully pervasive virtual world or will all be forced to contribute to it in some way? If it's a virtual world and if we live it there is no way to escape outside. You know the book is also a call to join the game or join the metagame. There's no other way to live in freedom than to join the competition or join the metagame. There's no other way to live in freedom than to join the competition, to shape the rules, because you're always going to be subject to those rules. Sometimes it might not seem. It might seem like you're just living in the natural world, but no, it's a world shaped by someone somewhere. Thanks so much. Thanks again to Bruno Massas.", "eventId": "devcon-7", - "slot_start": 1731483000000, - "slot_end": 1731484800000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1HFUKCdHq2CnGEM-2BvyaHipzeUf2aeP32TKRHPxKnWY", - "resources_slides": null, + "slot_start": 1731652200000, + "slot_end": 1731654000000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/171MvUF1M-7FvPkuWLfzY3WGZzA0pW2lZXE-foWeOt4Q", + "resources_slides": "https://drive.google.com/file/d/1vbPdcrS1cu1U46bIY3zDP8kRxiUs_y2P/view", "speakers": [ - "hart-lambur" + "bruno-macaes" ] }, "vector": [ @@ -460655,7 +459357,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -461084,6 +459785,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -461455,11 +460157,9 @@ 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, @@ -461573,7 +460273,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -461600,7 +460299,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -461789,7 +460487,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -461963,20 +460660,20 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -461985,39 +460682,47 @@ }, { "session": { - "id": "keynote-world-politics-world-building", - "sourceId": "ERQKUX", - "title": "Keynote: World Politics, World Building", - "description": "World politics has changed. Geopolitics is no longer simply a contest to control territory: in this age of advanced technology, it has become a contest to create the territory. Great powers seek to build a world for other states to inhabit, while keeping the ability to change the rules or the state of the world when necessary. At a moment when the old concepts no longer work, this book aims to introduce a radically new theory of world politics and technology. The end goal: god mode", + "id": "kickstarting-impact-funding-with-hypercerts", + "sourceId": "VGZ7PP", + "title": "Kickstarting impact funding with hypercerts", + "description": "Create hypercerts, evaluate their content and fund what matters by building on top of the hypercerts ecosystem. Building on top of a decentralised registry of impactful work, the hypercerts ecosystem empowers impact creators to explore novel forms of impact funding and resource coordination. \r\n\r\nDuring this workshop we'll explore the hypercerts stack and help you mint, evaluate and trade your first on-chain impact certificates.", "track": "Real World Ethereum", - "type": "Talk", - "expertise": "Beginner", - "audience": "Academic", - "featured": true, + "type": "Workshop", + "expertise": "Intermediate", + "audience": "Engineering", + "featured": false, "doNotRecord": false, - "tags": [], + "tags": [ + "DevEx", + "RPGF", + "Best Practices", + "funding", + "Best Practices", + "DevEx", + "RPGF" + ], "keywords": [ - "World Building", - "Technology", - "Geopolitics" + "Impact", + "Funding" ], - "duration": 1540, + "duration": 4872, "language": "en", - "sources_swarmHash": "814ca0945d9b0b5cc40d580f6819e1a7b896f35b60c8c32954f4e6a92ca7e40c", - "sources_youtubeId": "uz0GN4Jcywk", + "sources_swarmHash": "68b95037f727bd07f5ee9fb3035c4efc4b0cd90bf2ba621b902764ea826d33ef", + "sources_youtubeId": "Ozf8X4UeDY0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673701f81b0f83434dc99e4d", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673701f81b0f83434dc99e4d.vtt", - "transcript_text": " . Starting speaking. Thank you for being here. I know at DEF CON you have many, many options. I'm glad you chose to be here. Two years ago in Bogota, I spoke about money as virtual and as necessarily virtual. And today I want to take that a little bit farther and talk about geopolitics as virtual. Let's see how that works. It's the topic of my new book that is coming out in February, but this is actually the first time that I test these ideas in public. Here's a nice little video by Calder Moore, an artist that I recommend you check on Twitter or Instagram, and I hope he doesn't mind that I promote him a little bit here. This is really the idea behind my book, that the world as we know it is always in the process of being built from scratch, from nothing. And the process is naturally contested. Great powers tend to build on top of the work of the building done by other great powers. You know that geopolitics as a word was invented at the beginning of the 20th century, and it was meant to mean the fight or the struggle to control territory. Over the next hundred years, the word lost a lot of its meaning. Why? Because we suddenly started to realize that physical territory might not be that important. There were things that were more important. Communication networks, the economy, energy systems, communication systems, currency systems. And so we moved to something that people like to call international relations. But then something quite odd happened, and I think in the last 10 years. We realized that even if the world is artificial, even if it is built, that doesn't remove competition. Geopolitics moves from the control of physical territory to the control of artificial territories. In fact, I would argue that geopolitics becomes more intense and more existential, because if you are building an artificial world, others have to inhabit that world. Others are in a way captured in the world you built. This is a diagram from my previous book that really set up the problem but didn't quite solve it. We have states, self-contained entities of some kind, that then open up and start developing very intense relations between themselves and so create a kind of space of interaction between different states but then a question arises obviously if there is this space of interaction well who builds it who regulates it who creates the rules for this space of interaction? Is it the states? Well, the states are part of it. Is there something above the states? What is that something above the states? Now, many people would say that's nature. That's the physical world and states compete in the physical world. And I left the question at that point in my previous book, but let's see what my current answer is. There's a sentence, kind of a motto, a proverb, very common in the tech world, particularly in Silicon Valley, truth seekers take feedback from nature. And you could see a certain theory of geopolitics where the feedback would come from nature. Is nature the background for world politics? Well, you could say that the states that do better in state competition are the states that succeed in mastering nature, in controlling nature, in operating in nature. We all know this from engineering. The companies that do better at building aircraft are the companies that understand the laws of physics and understand the processes of engineering. And there was something about this. This view is still popular. You could say, for example, the West has risen to global dominance because he was better at mastering and controlling nature through processes of science, engineering, discovery, and so on. There's even a certain theory of the Cold War. You say the West came out on top because in the end, it was more successful at these processes, either of mastering nature or competition in the marketplace. But you see, in all these cases, there is a background for state competition, and that background is, in a way, natural. But now, imagine if a country becomes so powerful that it acquires the ability to create a virtual world. And this virtual world is so much more attractive than the physical world that other states, other countries eventually migrate to it. What then determines state competition? Well, the interesting thing is that at this point, the world is no longer natural. The world has been built by a powerful state actor, by, I would call it, a superpower. In practice, this means that the world might become an American world or a Chinese world, which are expressions that we hear a lot. And I would suggest that we take those expressions as literally as possible. In fact, the world can become Chinese. The world can become American. Why is that the case? Well, in a virtual world, essentially made up of communication networks, essentially made up of systems that are purely artificial and virtual or tending towards becoming virtual, they are necessarily built by one state actor and they acquire the characteristics that that state actor has impressed on them. What has changed is that we no longer believe in a neutral playing field and the reason we no longer believe in a neutral playing field is that we believe the game environment is built by the players. This is the point where it's quite interesting to start talking about game environments or a world game. It's not the natural world, it's not the world of nature, it's a game that has been built. We live after nature and that must necessarily change the terms of geopolitical rivalry. When your opponent is building a fully artificial or technological world that could redefine your own reality, then geopolitics is not merely existential, not merely a question of survival or disappearance. It becomes ontological. The superpowers are competing to build reality, to determine reality. And others that are not as successful as that will live in a reality created by others. This is the premise or the gambit that I'm trying to explore. In fact, at this point, you start to understand what geopolitics is all about. It's about the building of these virtual worlds. And suddenly there are two levels. There are the levels of those who build the world and those that live or inhabit in that world. The level of those that are playing the video game and The level of those that are playing the video game and the level of those that are programming the video game. And this, in my view, is the nature of geopolitical power, this break between two separate levels. Superpower is a system administrator. Now, I'm quite excited to present these ideas here because I think this kind of language would sound very strange to my usual colleagues in geopolitics but it will sound a lot more familiar to you I'm interested in the reactions to that both today and feel free to to send me a message on on Twitter or or by email you can crack down on offenders and pursue your aims by manipulating this the system itself. I would say this approach characterizes the Western response to the war in Ukraine. We didn't go to war. Western democracies adopted a set of targeted economic tools that were designed to reduce the Russian threat to the existing system. In this kind of cybernetic model that I'm talking about, you could even compare our reaction to an antivirus software or even the agents in the matrix. What did particularly the United States do? It didn't go to war with Russia, but it changed the game environment for Russia in very detrimental ways. And the most obvious way it changed the game environment was to transform Ukraine into something quite different from what it was at the beginning, quite different from what it was on February 2022. And you know how the agents in Matrix occupy the physical bodies of people on the street. If you are a global system administrator, if you have control over the system panel of global politics, you are able to introduce sudden and rather dramatic changes in the game environment. Suddenly, this player, Ukraine, is no longer the same player that it was at the beginning. You can transform the game environment for other players that are not operating at the same level that you are operating. It's a way to transform the ground world that lower level users face on the battlefield. There is the system level, there is the ground world, and the system administrator can change the reality that the lower level users face. So in one sense, geopolitics is a struggle not to control territory, but to create the territory. Think of it as a clash between two versions of the world. Imagine a simulated landscape. Go back to the little video at the beginning. Two or more computer programmers are fighting to redesign what appears on the monitor. This is, I think, evident in the case of the Ukraine war. It's perhaps even more evident in the case of the Ukraine war. It's perhaps even more evident in the Cold War between the United States and China. What you see is not a direct clash, and I very much doubt we will see a direct clash. What you see is an attempt to penetrate deeper and deeper into reality as we experience it and try to take control over it. There is intense competition between the United States and China to redesign the communication networks of the future, and these will be communication networks that will apply to people but to objects. The world will come alive with 5G, 6G, 7G communication networks that replace the physical world by increasingly a virtual world. Who will build this world? And then, of course, energy as well. We enter a new energy system. Who will build this energy system? Will it be the United States? Will it be China? And what form will this energy system take? We already know well enough that it won't be purely an energy system, but this has happened in history many times. The second energy revolution at the beginning end of the 19th century, beginning of the 20th century, it wasn't purely about energy. It was a kind of a complex where electricity and the modern factory formed a single complex. Henry Ford started his career working for Edison, and many people speculate that he was able to come up with the idea of the assembly line because he had seen how electricity worked as a constant flow, and he adopted the logic of electricity to the modern factory. The modern factory together with electricity created a single complex that was dominated by the United States. Now, we already know enough about the third industrial revolution. The first was, of course, coal. Even then, there was a complex, coal and the steam engine. The third industrial revolution will also be a complex, and it's already pretty clear that it will be a complex of AI together with an energy source. There seems to be a difference about what the fundamental energy source will be, whether it will be nuclear or solar. In my book, I take a clear choice for solar, in part based on costs, but not only on costs. But we'll see what the final answer is. In the United States, clearly there's a certain preference for a complex form of nuclear power and AI. I see the geopolitical competition as this kind of struggle between two programmers to see who is able to redesign the world. Now an interesting question that is raised here is, does the world have to be interesting question that is raised here is does the world have to be designed by a single player or can we have something like modularity, composability, I think terms that will make sense to you where parts of the system may be designed by different administrators, by different builders and this is perhaps a way that one could find balance in world politics that China the United States and other superpowers will be able to divide the task of building these virtual worlds and we could even perhaps have a kind of a system of percentages where the world system would be built 80% by the United States and 15% by China. Some rules would be predominantly Chinese, but others would be American. In a computer program, this works because there's a principle of modularity. And one suspects that these principles could be applied to world politics as well. What I'm arguing essentially is that just as new technologies like nuclear weapons raise the destructive potential of direct conflict, there was another avenue open for power. You didn't have to win a nuclear war against a determined opponent. You could build an artificial world that others would be forced to inhabit. Call it a form of indirect government or indirect power. Perhaps your opponent will even assume that these rules are natural. Go back to the beginning. Perhaps from his point of view, these rules are not really your rules. They are just the way the world works. That is the highest possible form of power. When you design the world in such a way, there are the people take that world as natural rather than constructed. You have moved one level up in the great game. Your opponent is playing a video game. You are coding it. This is the highest aspiration of geopolitical power to build a world that others have to inhabit. Now, something directly addressed to you. Builders tend to think that this idea of a world game is more than a metaphor. I think you have the intuition that you're building something much bigger than just a gadget or an app. It's particularly clear in the case of Ethereum, where many times it seems to me your aspiration is to build an artificial world. With principles of decentralization that go back to my comments about composability and modularity. Builders are building a global platform that eventually will set the terms for global power competition. But many times when we talk about technology, we fail to ask whether technology exists as a kind of background for our actions or whether something else is the background. I am arguing very strongly that technology is the background and is the ultimate background. Sometimes we assume technology is a set of gadgets. The idea that it could become our environment, that it could become our only environment, is still, I think, for many people, too bold. We already talk about smart cities, which it seems to suggest technology rather than the physical city is the environment but that is still not enough we're moving in that direction but still hesitantly sorry just just tell me the sign language is confusing I'll share of course then I'm getting in front of the people I think if you're you mean crypto are even a step farther than smart cities because they already contain the idea of an artificial world, of world building, of an autonomous world that slowly, gradually, in ways that are not entirely clear, is supposed to replace or subsume to be built over the physical world as we know it. Now, the problem is, in this human-made world that I'm describing, there is no external authority. The engineering power has set the rules in advance and enjoys root access to those rules. In my book, I'm very attracted by this idea of a computer programming language is more helpful to understand geopolitics than any other kind of language I know. And one should wonder why that is the case. Obviously, the answer is that we increasingly live in virtual programmable worlds. Other actors may take the external environment as natural or inescapable. They have no way to access the most important levers and switches of power. The world, in this case, becomes American or Chinese. But in a Chinese world, the outcomes are naturally predetermined to favor China. China has God mode. And conversely, in an American world. The great game, an expression many times used about geopolitics, is a game, but it's a meta game. It's a game whose purpose is to create the rules of the lower-level game. To conclude, I reserve the term superpower for those states that are engaged in a battle to shape the global system everyone else is competing under those rules for the superpowers or the system administrators the power to create the game environment the game world has two main advantages which have been quite obvious during the Ukraine war. It allows the system administrators, the superpowers to set their preferred rules or the rules that best fit their capacities. If you look at the global system today, you know, have no illusions. These rules have been set by the West, have been set by the United States, and they are not neutral. There are rules that favor the capacities that the United States has, and in a way, of course, predetermine the outcomes. You should have no illusions about that, but in a different system, that would be different. And more dramatically, and less obvious perhaps, although it's become very obvious in the last two years, it allows the superpowers to change the rules or the game state when exceptional circumstances demanded in computer programming language God mode is the ability to access the control panel settings of a system the best example of this is when the United States in Europe decided to freeze Russia's reserves Russia's central bank reserves they were of course not physical money they were just an entry in a computer in the Federal Reserve or the European Central Bank. They could be frozen or even deleted with just the stroke of a button and that's what the United States and Europe did two or three days after the invasion. How would you understand this? You know political science struggle to understand what happened but in computer programming language it's very easy. You access the control panels of the system and you change the game environment. At least I would suggest this is what's happening. I'm sure you'd be able to have an even more sophisticated understanding of this. Other countries, what can they do? And I'll conclude with this. They can play by the existing rules, not worry about shaping the rules. You even have countries that are so good at playing by the rules that sometimes they seem to be better at playing by the rules than the countries that shaped and created the rules, which is a funny thing. I think Switzerland and Singapore are countries that are better at playing by the existing rules than the United States is. They can specialize in a certain element or elements of the global system and attempt to shape the rules in that limited area. Russia, I think, has done this for energy, particularly before the invasion, and the European Union has done this to some extent regarding trade. So I'm not suggesting that these systems are built entirely by one player. Again, we go back to the idea of modularity. Or finally, they can align with one of the superpowers and just go along with those plans for world building. The United Kingdom, I would argue, is just entirely aligned with the United States when it comes to the project of world building. Well, remarkably, I finished exactly on time. Questions? Thank you so much for this interesting presentation in fact we have already received a number of questions so I start with the first one so why is it a big deal to be first to create a virtual world isn't it feasible that state with more resources improves on a world that more, more, sorry, more innovative state created and creates its own version? Yes, it's always, the game is never fully determined. We see with China that there's an attempt to, in a way, create a world that is more virtual than the existing one, and therefore more powerful and capable of replacing the existing world. So I'm not saying that the game is finished, but there is a lot of power in being there first. After all, those who want to replace the existing world by another one still have to operate under the existing rules to a considerable extent. I think China is a country that understands this dynamic very well because you play by the existing rules up to a point where you have enough resources to slowly try to change them and change them from the inside. Thank you. Then we have, yes, thanks to the audience for lots of questions coming in. So the next one being, do you believe something beyond the nation state will emerge online, for example, a network state. Yeah, but you know what I'm arguing here is that we kind of already have a network state. I'm arguing that contrary to what we might think, we already live in a purely virtual world made up of connections. And this is built by either one dominant player or two dominant players so I'm I'm still unconvinced that we you know what Balaji talks about I think we already living it it's not entirely in the future then we have another question is there anything that cannot be virtualized and will always remain outside of the control of the game master? I wanted the book to be very ambitious. So in the book, I argue there is nothing. And the effort of the book, why it took more than a year to write, is that sometimes it gets quite difficult to argue, particularly when it comes to energy. But I try to argue that solar energy in particular is a virtual form of energy. It has no supplies, has no waste. It's pure flow, no component parts. Pure flow from the photon to the chip that operates an AI system. So you'll have to read that there to see if I was successful. But my gambit is that, yes, everything can be virtualized. So the next one is a spicy one. USA or China, who will win? everything can be virtualized. So the next one is a spicy one. USA or China, who will win? You know, the great thing I think about my framework is that it's a game. So the outcome is open and depends on the plays that the players make, right? You know, I say that right at the beginning that there's no way to know the outcome in advance. You can know the structures of the game, that's what I try to do, particularly this metagame that I talked about, but for the time being, there's no way to know who's going to win in the end. Can a superpower be decentralized? What I think can be decentralized is the world game. So I see no better solution to the problem of war and conflict than this idea that the rules of the game can be built by different players. I don't think it can be fully decentralized in the sense of, let us say, something like proof of work, because that doesn't exist in geopolitics, but a kind of a proof of stake where the global system is so Decentralized that no power has full control over it and that perhaps is something we could we could aim for We have another question asking who's the referee in the world game can Ethereum or web3 take this role? No, I don't see any referee. I see just a permanent struggle between different programmers. The good thing, perhaps, is that this is a world subject to rules, right? I never argue in my book that this is the jungle. States are not competing directly. They are competing subject to rules, and they are competing to change the rules. But there is a certain stability given by the rules. So if you want to say the referee is competing to change the rules but there is a certain stability given by the rules so if you want to say the referee as the rules yes but it's a referee that is simultaneously being controlled and shaped by the players and with that we unfortunately already have to come to the final question do you see any room for independent communities to remain so should an actor like the u complete a fully pervasive virtual world or will all be forced to contribute to it in some way? If it's a virtual world and if we live it there is no way to escape outside. You know the book is also a call to join the game or join the metagame. There's no other way to live in freedom than to join the competition or join the metagame. There's no other way to live in freedom than to join the competition, to shape the rules, because you're always going to be subject to those rules. Sometimes it might not seem. It might seem like you're just living in the natural world, but no, it's a world shaped by someone somewhere. Thanks so much. Thanks again to Bruno Massas.", + "sources_streamethId": "6735e4169dbb7a90e176e775", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735e0479dbb7a90e1e78f69.vtt", + "transcript_text": " Session of the day. Well done. I think you guys should all give yourself a round of applause for making it to the day three, last session. Well done. Awesome. My name's Mark, Englishman living in New Zealand. Any Kiwis here? No Kiwis? No, they've all gone home. Okay, last session. We're going to talk about corruption, KYC, and the cost of compliance. So Jared Hope, co-founder of Status, and Lagos, welcome, Jared. Cool. All right. Well, yeah, thanks for making it to the end. I'm sure you're hungry. I'm sure you're tired. I am too. So I'm here to talk to you about corruption, KYC, and the cost of compliance. And, I mean, I think a lot of people intuitively don't like KYC when it's applied to them, but don't really have, never really kind of looked into it or maybe some other alternate narratives. So I'm here to give you another narrative form. So let's rewind back the clock, back to September the 10th, 2001. Pay attention to this date. It might look familiar to you, right? Donald Rumsfeld, the Defense Secretary of the United States, held a press conference in which he admitted that $2.3 trillion of the Pentagon's budget was completely unaccounted for, had no idea what it was spent on. In fact, in that press conference, he went to say that the adversary wasn't terrorists. In fact, it was closer to home. It was the Pentagon bureaucracy. Now, the following year, Jim Minery took it on himself to track down just $300 million of this, right? He traveled all over the United States, and unfortunately, couldn't find it. He said, he's on quote saying, we know it's gone, but we don't know what they spent it on. Kind of a problem, right? Now, fast forward to 2015, right? Catherine Austin Fitz, who is the Assistant Secretary for the Housing Department of Housing and Urban Development, looked into her own department as well as the Department of Defense, and found for the year of 2015, $6.5 trillion was known as unsupported adjustments, right? This was 54 times the actual approved spend for the DoD authorized by Congress, which is $122 billion. Now, Mark Skidmore, not sure why he's not appearing there, but he has a face, thought that this might have been some kind of typo, right? Surely it can't point $6.5 trillion. Surely Catherine meant $6.5 billion. So he got together with Catherine, and they really dove into it. What they found through their research is that there was actually 21 trillion dollars unaccounted for between the period of 1998 and 2015. Oh, okay. There he is. Now, none of this would have been... This is old news for Franklin Spinney, right? He was a 1983 military whistleblower who had already come out in Time magazine trying to alert the public of this kind of spending, right? He's on quote saying the books are cooked routinely year after year. So this isn't something that happened just in that period. It's been happening for a long time. In fact, it's still happening, right? So here is something that's from this year where $8.2 billion of improperly valued material went to Ukraine, right? Again, no idea what that material was, it's just the money got spent. And here is Senator Chuck Grassley of 2024 on quote, saying that when it comes to catching fraud, the Department of Defense internal controls are a complete failure. We've known it for decades, And he's looking quite grumpy about it. And the reason for that is because he's basically made most of his term tracking down this kind of spending. So he's found a gas station that was created in Afghanistan that should have cost half a million dollars actually went for 43 million dollars, right? He's found soap dishes that cost $117. Hammers, $400. Pliers, $1,000. Forks and spoons, $57 each. I hope they were plated in gold. But the most comical one is probably toilet seat lid covers. Maybe you remember this, maybe you heard about it. It's happened a few times. Once in the military where they were going for roughly $640 per toilet seat, just the lid, right? Like not the entire thing, just the top part, right? In the Air Force recently, it's gone for $10,000 for each lid. It's a hell of a lid. So how does this happen? How on earth can you spend 54 times your allocated budget in a single year, right? Well, it's actually because of the oldest living program that's still in operation today. This is called MoCos, right? It's written in COBOL. I think it was deployed in 1958. Now, what its purpose is for is for tendering, procurement and so on. But what it also happens to do is it finds unspent money or funds in other programs that have kind of gone stale. This is completely illegal to do for any other governed department, but here apparently everyone seems to look the other way. completely illegal to do for any other government department, but Here apparently everyone seems to look the other way now Just to give you a sense of proportion going into this right on the on the left hand side here We've got the the unaccounted adjustments. I've just been talking about right this 1.2 trillion dollars on average For that's being spent per year according to that time span of six and a half trillion, right? This $1.2 trillion on average that's being spent per year according to that time span of $6.5 trillion, right? Adjusted for inflation, that might be around $2.5 trillion, let's say, right? On the other side, we've got the United Nations estimates for illicit financial flows on a given year, right? It's somewhere between 2% to 5% of global GDP. So that's the 1.6 trillion and the 4 trillion balance. Now, the 3.1 trillion that's on there, this sort of middle bracket, is an estimate of illicit financial flows for last year according to NASDAQ's global crime financial crime report that came out this year right just to give you a sense of proportion right this is just one government agency the entire planet's illicit financial flows okay so going back to that press conference if you recall at the beginning of this right the day after 9-11 happened, right? Tragedy, absolute tragedy. And as those towers fell and hit the ground, the world changed, right? Not only in terms of the surveillance, in terms of like our communications, but also the deployment of financial surveillance around the entire planet, right? In just one month, the Financial Action Task Force issued out the eight of their nine special recommendations, and actually, with the installment of the Patriot Act, they have basically a whole bunch of provisions for KYC and AML, specifically around Sections 311, 326, 351, and 352. So the 311 is about giving the Secretary of Treasury the authority to target specific money laundering and terrorist financing risks. 326 is about establishing minimum standards for financial institutions. So this is why you have to hold your passport up and take a photo of yourself whenever you sign up for an exchange or so on. 351 is immunity for anyone who likes to squeal, right? So reporting, and if there's any issues around reporting, they're kind of immune if it's in the name of this particular goal. And 352 is basically the mandate for financial institutions to implement the program, right? Now, I say this goes around the globe because it was felt in Asian banks. So, according to the LexisNexis Resolutions, the True Cost of Anti-Money Laundering Compliance Survey in 2015, even though local regulations were cited the most frequently as having the greatest impact, it was known that the US regulation embodied by the Patriot Act, which was actually what enforced these particular implementations of policy. Now, we've established agency, illicit financial flows, just to give you a sense of proportion, right? Now, we've also seen that it was implemented in the name of terrorism. Can you see on here the amount that has gone to terrorism? Yet? Maybe? Oh, there we go, right? $11.5 billion have gone to terrorism financing according to the latest NASDAQ Global Financial Crime Report. That's what we sold out our civil liberties for, just to give you an idea of that, right? Now, of course, you might look at that and go, well, you know, it's been two and a half decades since then. Surely we've solved terrorism. Well, actually, if you roll back the clock and look at some of the estimates of what that terrorism financing looks like, the World Bank and the IMF basically viewed the terrorism financing organizations for the entire year of 2001 to be less than half a billion dollars, right? Just to give you a sense of proportion here. In 2005, Rand found that al-Qaeda's annual budget was between $30 to $50 million. In 2015, the Department of Treasury estimated that ISIS generated roughly a billion dollars. And in 2016, Europol estimated that that was doubled to $2 billion. Now, in contrast to that, we have this great report from the Simpson Study Group on counterterrorism spending, right? And they believe that, according to that report, that between the period of 2002 and 2017, this came to a total spend of $2.8 trillion, or $186 billion on average per year. So this spending on counterterrorism is 16 times larger than what our estimates are for spending that goes towards terrorism, right? That's a pretty good margin if you're playing both sides. Anyway. So, I mentioned this notion of illicit financial flows, right? What are they? Well, there isn't really an agreed consensus on the definition. But generally speaking, it comprises of tax evasion, multinational tax avoidance, the theft of state assets, the laundering of prostates of crime, and they cover a broad range of market and regulatory abuses, such as corruption, drug smuggling, and human trafficking, right? And it is important, like I'm not trying to dismiss this at all, right? So don't get that from me, I'm just trying to give you a sense of proportion on this, right? It is important because Donato believes that for every dollar that goes towards crime gets reinvested in crime and therefore you get more crime, right? Perhaps a better analysis of this is actually Bjorn Lomborg who got some of the top economists in the world together and tried to rank the United Nations Sustainable Development Goals on a dollar-for-dollar spend, right? And they found that, you know, addressing illicit financial flows is a top priority. So why is this important? Why is addressing IFFs important? Well, they're basically associated with ineffective state functioning, illiterate use of state power, and they're kind of an international problem because even though you might solve it within your own state or within your own country, that capital will probably flow to other countries that support it in some way, right? At the end of the day, it results in lost GDP, which means there's lost tax revenue, and the argumentation is that that leads to poor governance. And higher quality governance is obviously desirable. So what exactly are IFFs? And, well, that depends just like the definition. So there's a myriad of different methodologies, and it's unclear exactly what comprises IFFs. And this is kind of part of the problem, because it's actually hard to understand what they are due to their nature. So Raymond Baker here basically views that 65% goes towards trade misinvoicing. Trade misinvoicing is like when a multinational corporation will do, say, for example, natural resource extraction in one country, but misprice the cost of extraction and the value of resource that's extracted, and then when moving it to another country, changes those numbers, right? 30% towards criminal activity and 5% towards corruption. The Cardamon view is that 80% goes towards trade misinvoicing, so this same problem, and 20% towards corruption. And the NASDAQ Global Financial Crime Report focuses more on the humanitarian aspects of illicit financial flows. So 25% towards drug trafficking and 12% to human trafficking, with 63% just towards other, which is corruption, crime, and so on. So even though we had the Financial Action Task Force and the Patriot Act implementing these regulations, it wasn't really until 2015, sorry, 2005, that illicit financial flows as an argumentation came onto the scene as a public policy issue, right? And this was actually through Raymond Baker's book, Capitalism's Achilles' Heel. The issue is that he's pushing a model and this idea that isn't well substantiated, and Peter Rudd was quite critical of it in 2015, where we didn't really know much more about the issue 10 years beyond, right? And in fact, there's more people who also feel the same way, right? Even though we use different measures and models to use to estimate money laundering, there is actually no model that can be used to quantify and accept it globally. Even the Financial Action Task Force's own methodology for assessing compliance with the FTAF recommendations and effectiveness of AML systems is completely vague. It relies on the general use of crime statistics, anecdotal evidence, and subjective conclusions. And this is part of the nature of what makes IFFs so difficult to deal with. Now, to their credit, there is an ongoing pilot program by the United Nations that is trying to provide the highest quality data around IFFs. But, yeah, I mean, it's largely focused on drug traffic in developing countries and it's roughly $1 billion we're talking about, some of the bigger ones. So, are follow-the-money methods effective? Well, according to the Financial Action Task Force's own admission last year, less than 1% of global illicit financial flows is recovered. Ronald F. Paul believes that less than 0.1% of criminal finances is impacted by follow-the-money methods. And according to the UN, this is roughly 0.2%. In other words, 99.8% of criminal proceeds are completely unaffected by all of the KYC that we've implemented. So what can we measure? We can measure the cost of implementing compliance, right? And according to the LexisNexis Risk Solutions report, for the United States banks alone, this is roughly $26.4 billion annually. That's double the amount that's going towards terrorism financing in the US alone. In fact, Ronald F. Paul believes that compliance costs exceed recovered criminal funds more than 100 times over. And the banks and the taxpayers and ordinary citizens are penalized more than criminal enterprises. That's you and that's me. In fact, this has been known at the OECD and B20 for several years. In fact, all of those costs get passed on to us. And in particular, when it comes to traditional trade finance, 50 to 60% of these costs, or more than half the price of your trade, is charged to clients. And a large portion of that is associated with compliance. Now, so why am I talking about this? Well, there's another inverse way to think about this. And there's a great thing that we understand in institutional economics, that if we can reduce transaction costs in the abstract sense by 0.1%, this enables a country to be able to quadruple its wealth. That's the difference between Argentina and Switzerland. So we can actually create better and more efficient institutions that lower transactions. I mean, when you line up in the DMV or if you're filling out a form, that's associated with transaction costs in an institutional sense. If you can reduce that by using, say, smart contracts or smart paper or whatever, we can enable more wealth, which will probably lift a lot more people out of... No, it will just make a higher quality of life. In fact, Michael Levy basically says that no one could rationally think that AML controls in general or financial investigation in particular will solve organized crime completely or eliminate high-level offending. For there even to be a chance to achieve that, there would need to be a step in change in transparency and effective action against high-level corruption along all possible supply chains. It's a huge problem. Why is that a huge problem? Well, before I get into that, Amnesty International created a report called Weaponizing Counterterrorism. I highly recommend you read it. But they basically showed how the Indian government has been exploiting the 2010 and 2013 FTFS assessments to target human rights activists, journalists, students, academics, and so on, right? And so this is one of the issues with these kinds of recommendations. What happens is that if you don't implement them on time, you can be excluded from the economic system, right? You can be excluded from member states. And so this leads to rushing bad law through their parliaments. And when they're in place, politicians can then use them for political gain. They've been used to suppress political opponents. It's not just Amnesty International talking about that. It's also the Royal United Services Institute in the UK. This is a 100-year-old think tank, very prestigious and well-respected. They focus on authoritarian regimes, but as we've just seen, it doesn't apply just to authoritarian regimes. So, who is perpetrating it? Are we really ones to blame? Alex Cobham doesn't believe so. In fact, he says that ISS are likely to be, by and large, an elite phenomenon. So the financial and political elite. And the reason for this is because that's where the systems of abuse are likely to be strongest and where the capital is concentrated. They're the ones who make the rules, and therefore they're the ones who can break them. And to give an example of this, you might recall Jamie Dimon, CEO of JPMorgan Trace, is on record basically saying crypto is a tool for criminal drug traffickers and money laundering and tax avoidance. And if he was the government, he would shut us down. There's a certain irony in that, that basically the following year, no, following weeks, I think, after that statement, he was hit for, well, not him, but JPMorgan Chase, was hit for fines totaling roughly $39 billion for doing exactly those things. This was part of the FinCEN files, which basically shows it's not just exclusively the JPMorgan Chase, but many major banks. Now when it comes to financial secrecy, you might think of tax havens and nice, lush, exotic Caribbean islands. You might think of even Switzerland. But I wanted to point out the financial secrecy index, and particularly look at the global scale weight here. The United States stands out at roughly 25%, in contrast to the second position of Switzerland, which has it less than 4%. Their policies are literally, as Rumsfeld said, closer to home. Puerto Rota goes on to say, asking a kleptocratic state to create an effective AML system is like asking the fox to create a better hen house. It is the governing political elite that benefits the most substantially from the weakness of existing systems and controls. And this is a problem, because this kleptocracy issue is real. According to this Bloomberg report, trust in most US institutions has been withering across the board. It doesn't matter where you look. Public, you know, civil society, the public is losing confidence. This is not just happening in the US, but it's happening globally. And whether you look at the United Kingdom or in France, you'll see that trust is declining. In fact, if you look at the OECD reports, the average public trust in governments hovers around just 41%. We can do a lot better in terms of providing quality of governance. In fact, to corroborate the point, Transparency International came out in 2023 saying that over two-thirds of countries score below 50 out of 100, strongly indicating that they have serious corruption problems. Now, why is this important? Well, another way to view this problem of kleptocracy is that government debt is growing. In fact, it's reached unsustainable levels in many of the advanced emerging economies. And this is from the Peterson Institute in International Economics in 2011. Even though they were talking about 25 years, we're kind of already quite into that. And this is also evidenced by foreign sovereigns not buying US debt. In fact, since the 1800s, 52 countries saw a debt-to-GDP ratio above 130%. 51 of those have defaulted, and today the United States is at 120%. By 2033, they're going to be above 130. And that's according to the Congressional Budget Office in 2018. Now, those challenges are also not unique to the United States. According to the National International Council, they projected that many governments of the OECD countries are likely to experience the same economic difficulties. I'm running a little bit over time, but so the Financial Action Task Force is also quite interesting in of itself, right? Because it has a unique form of sovereignty as a non-governmental organization. It has no legal basis. It enjoys this sovereign immunity granted to it by its member states. It has no oversight. Even though it is based in Paris in the OECD building, it is not the jurisdiction of the French government, neither the OECD countries. So they can do whatever they want and have no real pressure to reform unless all member states start participating in that. As I mentioned, if you don't follow them, it results in economic lockout of the global financial system and there's no way to opt out. So why does this matter? Because if we can start solving governing institutions, we can start unlocking trillions of dollars worth of value rather than just disappearing into the ether. There's this great report from the World Bank called Where is the Wealth of Nations? And what they found is that human capital and the value of institutions as measured by the rule of law actually constitutes the largest share of wealth in virtually all countries. This is beyond natural resource extraction. So at the end of the day, what I'm trying to convey to you is that we should stop thinking from a scarcity mindset when it comes to follow the money methods. And instead, what we can actually do is improve our institutions. If we can make them faster, more efficient, and instead what we can actually do is improve our institutions. If we can make them faster, more efficient, and maybe even develop them in parallel, then we can actually create a much better and thriving world and a higher quality of life for all. Thank you.", "eventId": "devcon-7", - "slot_start": 1731652200000, - "slot_end": 1731654000000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/171MvUF1M-7FvPkuWLfzY3WGZzA0pW2lZXE-foWeOt4Q", - "resources_slides": null, + "slot_start": 1731576600000, + "slot_end": 1731582000000, + "slot_roomId": "classroom-c", + "resources_presentation": "https://docs.google.com/presentation/d/1-2n2zwPdIpfxkXDYIJI5vN-Bz4JCM93vP20YXjSCQ4I", + "resources_slides": "", "speakers": [ - "bruno-macaes" + "holke-brammer", + "bitbeckers" ] }, "vector": [ @@ -462458,6 +461163,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -462798,6 +461504,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -463035,6 +461743,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -463162,6 +461871,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -463325,15 +462035,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -463348,8 +462050,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0 @@ -463357,48 +462057,26 @@ }, { "session": { - "id": "kickstarting-impact-funding-with-hypercerts", - "sourceId": "VGZ7PP", - "title": "Kickstarting impact funding with hypercerts", - "description": "Create hypercerts, evaluate their content and fund what matters by building on top of the hypercerts ecosystem. Building on top of a decentralised registry of impactful work, the hypercerts ecosystem empowers impact creators to explore novel forms of impact funding and resource coordination. \r\n\r\nDuring this workshop we'll explore the hypercerts stack and help you mint, evaluate and trade your first on-chain impact certificates.", - "track": "Real World Ethereum", - "type": "Workshop", - "expertise": "Intermediate", + "id": "ktv-attestation-winners", + "sourceId": "MP9UQV", + "title": "KTV Attestation Winners", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "DevEx", - "RPGF", - "Best Practices", - "funding", - "Best Practices", - "DevEx", - "RPGF" - ], - "keywords": [ - "Impact", - "Funding" - ], - "duration": 4872, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "68b95037f727bd07f5ee9fb3035c4efc4b0cd90bf2ba621b902764ea826d33ef", - "sources_youtubeId": "Ozf8X4UeDY0", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6735e4169dbb7a90e176e775", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735e0479dbb7a90e1e78f69.vtt", - "transcript_text": " Session of the day. Well done. I think you guys should all give yourself a round of applause for making it to the day three, last session. Well done. Awesome. My name's Mark, Englishman living in New Zealand. Any Kiwis here? No Kiwis? No, they've all gone home. Okay, last session. We're going to talk about corruption, KYC, and the cost of compliance. So Jared Hope, co-founder of Status, and Lagos, welcome, Jared. Cool. All right. Well, yeah, thanks for making it to the end. I'm sure you're hungry. I'm sure you're tired. I am too. So I'm here to talk to you about corruption, KYC, and the cost of compliance. And, I mean, I think a lot of people intuitively don't like KYC when it's applied to them, but don't really have, never really kind of looked into it or maybe some other alternate narratives. So I'm here to give you another narrative form. So let's rewind back the clock, back to September the 10th, 2001. Pay attention to this date. It might look familiar to you, right? Donald Rumsfeld, the Defense Secretary of the United States, held a press conference in which he admitted that $2.3 trillion of the Pentagon's budget was completely unaccounted for, had no idea what it was spent on. In fact, in that press conference, he went to say that the adversary wasn't terrorists. In fact, it was closer to home. It was the Pentagon bureaucracy. Now, the following year, Jim Minery took it on himself to track down just $300 million of this, right? He traveled all over the United States, and unfortunately, couldn't find it. He said, he's on quote saying, we know it's gone, but we don't know what they spent it on. Kind of a problem, right? Now, fast forward to 2015, right? Catherine Austin Fitz, who is the Assistant Secretary for the Housing Department of Housing and Urban Development, looked into her own department as well as the Department of Defense, and found for the year of 2015, $6.5 trillion was known as unsupported adjustments, right? This was 54 times the actual approved spend for the DoD authorized by Congress, which is $122 billion. Now, Mark Skidmore, not sure why he's not appearing there, but he has a face, thought that this might have been some kind of typo, right? Surely it can't point $6.5 trillion. Surely Catherine meant $6.5 billion. So he got together with Catherine, and they really dove into it. What they found through their research is that there was actually 21 trillion dollars unaccounted for between the period of 1998 and 2015. Oh, okay. There he is. Now, none of this would have been... This is old news for Franklin Spinney, right? He was a 1983 military whistleblower who had already come out in Time magazine trying to alert the public of this kind of spending, right? He's on quote saying the books are cooked routinely year after year. So this isn't something that happened just in that period. It's been happening for a long time. In fact, it's still happening, right? So here is something that's from this year where $8.2 billion of improperly valued material went to Ukraine, right? Again, no idea what that material was, it's just the money got spent. And here is Senator Chuck Grassley of 2024 on quote, saying that when it comes to catching fraud, the Department of Defense internal controls are a complete failure. We've known it for decades, And he's looking quite grumpy about it. And the reason for that is because he's basically made most of his term tracking down this kind of spending. So he's found a gas station that was created in Afghanistan that should have cost half a million dollars actually went for 43 million dollars, right? He's found soap dishes that cost $117. Hammers, $400. Pliers, $1,000. Forks and spoons, $57 each. I hope they were plated in gold. But the most comical one is probably toilet seat lid covers. Maybe you remember this, maybe you heard about it. It's happened a few times. Once in the military where they were going for roughly $640 per toilet seat, just the lid, right? Like not the entire thing, just the top part, right? In the Air Force recently, it's gone for $10,000 for each lid. It's a hell of a lid. So how does this happen? How on earth can you spend 54 times your allocated budget in a single year, right? Well, it's actually because of the oldest living program that's still in operation today. This is called MoCos, right? It's written in COBOL. I think it was deployed in 1958. Now, what its purpose is for is for tendering, procurement and so on. But what it also happens to do is it finds unspent money or funds in other programs that have kind of gone stale. This is completely illegal to do for any other governed department, but here apparently everyone seems to look the other way. completely illegal to do for any other government department, but Here apparently everyone seems to look the other way now Just to give you a sense of proportion going into this right on the on the left hand side here We've got the the unaccounted adjustments. I've just been talking about right this 1.2 trillion dollars on average For that's being spent per year according to that time span of six and a half trillion, right? This $1.2 trillion on average that's being spent per year according to that time span of $6.5 trillion, right? Adjusted for inflation, that might be around $2.5 trillion, let's say, right? On the other side, we've got the United Nations estimates for illicit financial flows on a given year, right? It's somewhere between 2% to 5% of global GDP. So that's the 1.6 trillion and the 4 trillion balance. Now, the 3.1 trillion that's on there, this sort of middle bracket, is an estimate of illicit financial flows for last year according to NASDAQ's global crime financial crime report that came out this year right just to give you a sense of proportion right this is just one government agency the entire planet's illicit financial flows okay so going back to that press conference if you recall at the beginning of this right the day after 9-11 happened, right? Tragedy, absolute tragedy. And as those towers fell and hit the ground, the world changed, right? Not only in terms of the surveillance, in terms of like our communications, but also the deployment of financial surveillance around the entire planet, right? In just one month, the Financial Action Task Force issued out the eight of their nine special recommendations, and actually, with the installment of the Patriot Act, they have basically a whole bunch of provisions for KYC and AML, specifically around Sections 311, 326, 351, and 352. So the 311 is about giving the Secretary of Treasury the authority to target specific money laundering and terrorist financing risks. 326 is about establishing minimum standards for financial institutions. So this is why you have to hold your passport up and take a photo of yourself whenever you sign up for an exchange or so on. 351 is immunity for anyone who likes to squeal, right? So reporting, and if there's any issues around reporting, they're kind of immune if it's in the name of this particular goal. And 352 is basically the mandate for financial institutions to implement the program, right? Now, I say this goes around the globe because it was felt in Asian banks. So, according to the LexisNexis Resolutions, the True Cost of Anti-Money Laundering Compliance Survey in 2015, even though local regulations were cited the most frequently as having the greatest impact, it was known that the US regulation embodied by the Patriot Act, which was actually what enforced these particular implementations of policy. Now, we've established agency, illicit financial flows, just to give you a sense of proportion, right? Now, we've also seen that it was implemented in the name of terrorism. Can you see on here the amount that has gone to terrorism? Yet? Maybe? Oh, there we go, right? $11.5 billion have gone to terrorism financing according to the latest NASDAQ Global Financial Crime Report. That's what we sold out our civil liberties for, just to give you an idea of that, right? Now, of course, you might look at that and go, well, you know, it's been two and a half decades since then. Surely we've solved terrorism. Well, actually, if you roll back the clock and look at some of the estimates of what that terrorism financing looks like, the World Bank and the IMF basically viewed the terrorism financing organizations for the entire year of 2001 to be less than half a billion dollars, right? Just to give you a sense of proportion here. In 2005, Rand found that al-Qaeda's annual budget was between $30 to $50 million. In 2015, the Department of Treasury estimated that ISIS generated roughly a billion dollars. And in 2016, Europol estimated that that was doubled to $2 billion. Now, in contrast to that, we have this great report from the Simpson Study Group on counterterrorism spending, right? And they believe that, according to that report, that between the period of 2002 and 2017, this came to a total spend of $2.8 trillion, or $186 billion on average per year. So this spending on counterterrorism is 16 times larger than what our estimates are for spending that goes towards terrorism, right? That's a pretty good margin if you're playing both sides. Anyway. So, I mentioned this notion of illicit financial flows, right? What are they? Well, there isn't really an agreed consensus on the definition. But generally speaking, it comprises of tax evasion, multinational tax avoidance, the theft of state assets, the laundering of prostates of crime, and they cover a broad range of market and regulatory abuses, such as corruption, drug smuggling, and human trafficking, right? And it is important, like I'm not trying to dismiss this at all, right? So don't get that from me, I'm just trying to give you a sense of proportion on this, right? It is important because Donato believes that for every dollar that goes towards crime gets reinvested in crime and therefore you get more crime, right? Perhaps a better analysis of this is actually Bjorn Lomborg who got some of the top economists in the world together and tried to rank the United Nations Sustainable Development Goals on a dollar-for-dollar spend, right? And they found that, you know, addressing illicit financial flows is a top priority. So why is this important? Why is addressing IFFs important? Well, they're basically associated with ineffective state functioning, illiterate use of state power, and they're kind of an international problem because even though you might solve it within your own state or within your own country, that capital will probably flow to other countries that support it in some way, right? At the end of the day, it results in lost GDP, which means there's lost tax revenue, and the argumentation is that that leads to poor governance. And higher quality governance is obviously desirable. So what exactly are IFFs? And, well, that depends just like the definition. So there's a myriad of different methodologies, and it's unclear exactly what comprises IFFs. And this is kind of part of the problem, because it's actually hard to understand what they are due to their nature. So Raymond Baker here basically views that 65% goes towards trade misinvoicing. Trade misinvoicing is like when a multinational corporation will do, say, for example, natural resource extraction in one country, but misprice the cost of extraction and the value of resource that's extracted, and then when moving it to another country, changes those numbers, right? 30% towards criminal activity and 5% towards corruption. The Cardamon view is that 80% goes towards trade misinvoicing, so this same problem, and 20% towards corruption. And the NASDAQ Global Financial Crime Report focuses more on the humanitarian aspects of illicit financial flows. So 25% towards drug trafficking and 12% to human trafficking, with 63% just towards other, which is corruption, crime, and so on. So even though we had the Financial Action Task Force and the Patriot Act implementing these regulations, it wasn't really until 2015, sorry, 2005, that illicit financial flows as an argumentation came onto the scene as a public policy issue, right? And this was actually through Raymond Baker's book, Capitalism's Achilles' Heel. The issue is that he's pushing a model and this idea that isn't well substantiated, and Peter Rudd was quite critical of it in 2015, where we didn't really know much more about the issue 10 years beyond, right? And in fact, there's more people who also feel the same way, right? Even though we use different measures and models to use to estimate money laundering, there is actually no model that can be used to quantify and accept it globally. Even the Financial Action Task Force's own methodology for assessing compliance with the FTAF recommendations and effectiveness of AML systems is completely vague. It relies on the general use of crime statistics, anecdotal evidence, and subjective conclusions. And this is part of the nature of what makes IFFs so difficult to deal with. Now, to their credit, there is an ongoing pilot program by the United Nations that is trying to provide the highest quality data around IFFs. But, yeah, I mean, it's largely focused on drug traffic in developing countries and it's roughly $1 billion we're talking about, some of the bigger ones. So, are follow-the-money methods effective? Well, according to the Financial Action Task Force's own admission last year, less than 1% of global illicit financial flows is recovered. Ronald F. Paul believes that less than 0.1% of criminal finances is impacted by follow-the-money methods. And according to the UN, this is roughly 0.2%. In other words, 99.8% of criminal proceeds are completely unaffected by all of the KYC that we've implemented. So what can we measure? We can measure the cost of implementing compliance, right? And according to the LexisNexis Risk Solutions report, for the United States banks alone, this is roughly $26.4 billion annually. That's double the amount that's going towards terrorism financing in the US alone. In fact, Ronald F. Paul believes that compliance costs exceed recovered criminal funds more than 100 times over. And the banks and the taxpayers and ordinary citizens are penalized more than criminal enterprises. That's you and that's me. In fact, this has been known at the OECD and B20 for several years. In fact, all of those costs get passed on to us. And in particular, when it comes to traditional trade finance, 50 to 60% of these costs, or more than half the price of your trade, is charged to clients. And a large portion of that is associated with compliance. Now, so why am I talking about this? Well, there's another inverse way to think about this. And there's a great thing that we understand in institutional economics, that if we can reduce transaction costs in the abstract sense by 0.1%, this enables a country to be able to quadruple its wealth. That's the difference between Argentina and Switzerland. So we can actually create better and more efficient institutions that lower transactions. I mean, when you line up in the DMV or if you're filling out a form, that's associated with transaction costs in an institutional sense. If you can reduce that by using, say, smart contracts or smart paper or whatever, we can enable more wealth, which will probably lift a lot more people out of... No, it will just make a higher quality of life. In fact, Michael Levy basically says that no one could rationally think that AML controls in general or financial investigation in particular will solve organized crime completely or eliminate high-level offending. For there even to be a chance to achieve that, there would need to be a step in change in transparency and effective action against high-level corruption along all possible supply chains. It's a huge problem. Why is that a huge problem? Well, before I get into that, Amnesty International created a report called Weaponizing Counterterrorism. I highly recommend you read it. But they basically showed how the Indian government has been exploiting the 2010 and 2013 FTFS assessments to target human rights activists, journalists, students, academics, and so on, right? And so this is one of the issues with these kinds of recommendations. What happens is that if you don't implement them on time, you can be excluded from the economic system, right? You can be excluded from member states. And so this leads to rushing bad law through their parliaments. And when they're in place, politicians can then use them for political gain. They've been used to suppress political opponents. It's not just Amnesty International talking about that. It's also the Royal United Services Institute in the UK. This is a 100-year-old think tank, very prestigious and well-respected. They focus on authoritarian regimes, but as we've just seen, it doesn't apply just to authoritarian regimes. So, who is perpetrating it? Are we really ones to blame? Alex Cobham doesn't believe so. In fact, he says that ISS are likely to be, by and large, an elite phenomenon. So the financial and political elite. And the reason for this is because that's where the systems of abuse are likely to be strongest and where the capital is concentrated. They're the ones who make the rules, and therefore they're the ones who can break them. And to give an example of this, you might recall Jamie Dimon, CEO of JPMorgan Trace, is on record basically saying crypto is a tool for criminal drug traffickers and money laundering and tax avoidance. And if he was the government, he would shut us down. There's a certain irony in that, that basically the following year, no, following weeks, I think, after that statement, he was hit for, well, not him, but JPMorgan Chase, was hit for fines totaling roughly $39 billion for doing exactly those things. This was part of the FinCEN files, which basically shows it's not just exclusively the JPMorgan Chase, but many major banks. Now when it comes to financial secrecy, you might think of tax havens and nice, lush, exotic Caribbean islands. You might think of even Switzerland. But I wanted to point out the financial secrecy index, and particularly look at the global scale weight here. The United States stands out at roughly 25%, in contrast to the second position of Switzerland, which has it less than 4%. Their policies are literally, as Rumsfeld said, closer to home. Puerto Rota goes on to say, asking a kleptocratic state to create an effective AML system is like asking the fox to create a better hen house. It is the governing political elite that benefits the most substantially from the weakness of existing systems and controls. And this is a problem, because this kleptocracy issue is real. According to this Bloomberg report, trust in most US institutions has been withering across the board. It doesn't matter where you look. Public, you know, civil society, the public is losing confidence. This is not just happening in the US, but it's happening globally. And whether you look at the United Kingdom or in France, you'll see that trust is declining. In fact, if you look at the OECD reports, the average public trust in governments hovers around just 41%. We can do a lot better in terms of providing quality of governance. In fact, to corroborate the point, Transparency International came out in 2023 saying that over two-thirds of countries score below 50 out of 100, strongly indicating that they have serious corruption problems. Now, why is this important? Well, another way to view this problem of kleptocracy is that government debt is growing. In fact, it's reached unsustainable levels in many of the advanced emerging economies. And this is from the Peterson Institute in International Economics in 2011. Even though they were talking about 25 years, we're kind of already quite into that. And this is also evidenced by foreign sovereigns not buying US debt. In fact, since the 1800s, 52 countries saw a debt-to-GDP ratio above 130%. 51 of those have defaulted, and today the United States is at 120%. By 2033, they're going to be above 130. And that's according to the Congressional Budget Office in 2018. Now, those challenges are also not unique to the United States. According to the National International Council, they projected that many governments of the OECD countries are likely to experience the same economic difficulties. I'm running a little bit over time, but so the Financial Action Task Force is also quite interesting in of itself, right? Because it has a unique form of sovereignty as a non-governmental organization. It has no legal basis. It enjoys this sovereign immunity granted to it by its member states. It has no oversight. Even though it is based in Paris in the OECD building, it is not the jurisdiction of the French government, neither the OECD countries. So they can do whatever they want and have no real pressure to reform unless all member states start participating in that. As I mentioned, if you don't follow them, it results in economic lockout of the global financial system and there's no way to opt out. So why does this matter? Because if we can start solving governing institutions, we can start unlocking trillions of dollars worth of value rather than just disappearing into the ether. There's this great report from the World Bank called Where is the Wealth of Nations? And what they found is that human capital and the value of institutions as measured by the rule of law actually constitutes the largest share of wealth in virtually all countries. This is beyond natural resource extraction. So at the end of the day, what I'm trying to convey to you is that we should stop thinking from a scarcity mindset when it comes to follow the money methods. And instead, what we can actually do is improve our institutions. If we can make them faster, more efficient, and instead what we can actually do is improve our institutions. If we can make them faster, more efficient, and maybe even develop them in parallel, then we can actually create a much better and thriving world and a higher quality of life for all. Thank you.", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731576600000, - "slot_end": 1731582000000, - "slot_roomId": "classroom-c", - "resources_presentation": "https://docs.google.com/presentation/d/1-2n2zwPdIpfxkXDYIJI5vN-Bz4JCM93vP20YXjSCQ4I", - "resources_slides": null, - "speakers": [ - "bitbeckers", - "holke-brammer" - ] + "slot_start": 1731486600000, + "slot_end": 1731488400000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1r7I3zIxHezXZS2fH5PPvuIsIk0QSyDWrsnAqeEl-U6o", + "resources_slides": "" }, "vector": [ 0, @@ -463407,10 +462085,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -463838,8 +462516,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -464182,8 +462858,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -464422,7 +463096,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -464550,7 +463223,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -464715,9 +463387,10 @@ 0, 0, 0, - 2, 0, 0, + 2, + 0, 0, 2, 0, @@ -464737,9 +463410,9 @@ }, { "session": { - "id": "ktv-attestation-winners", - "sourceId": "MP9UQV", - "title": "KTV Attestation Winners", + "id": "ktv-winners", + "sourceId": "UYQFMA", + "title": "KTV Winners", "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", "track": "Entertainment", "type": "Music", @@ -464752,10 +463425,11 @@ "language": "en", "speakers": [], "eventId": "devcon-7", - "slot_start": 1731486600000, - "slot_end": 1731488400000, + "slot_start": 1731501000000, + "slot_end": 1731501900000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1r7I3zIxHezXZS2fH5PPvuIsIk0QSyDWrsnAqeEl-U6o" + "resources_presentation": "https://docs.google.com/presentation/d/1cuZ-hN8gOGCEQohCTOeJVPCVT4HAWbG9sWbvDGpwg_Y", + "resources_slides": "" }, "vector": [ 0, @@ -466068,11 +464742,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -466094,25 +464763,43 @@ }, { "session": { - "id": "ktv-winners", - "sourceId": "UYQFMA", - "title": "KTV Winners", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", + "id": "l1sload-in-action-write-l2-dapps-that-read-from-l1-state", + "sourceId": "ERQ7N3", + "title": "L1SLOAD in Action: Write L2 Dapps that Read from L1 State", + "description": "In this workshop we will explore some interesting new use cases unlocked by the newly proposed L1SLOAD precompile (RIP-7728). We will develop and deploy L2 dapps that read from L1 state using this precompile.", + "track": "Layer 2", + "type": "Workshop", + "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Developer Infrastructure", + "DevEx", + "Rollups" + ], + "keywords": [ + "RIP", + "L1SLOAD", + "Precompile" + ], + "duration": 5050, "language": "en", - "speakers": [], + "sources_swarmHash": "4eb65f6c60b1496a7551b229ed23560c241cd3229d0222453f48e4d1c7d143fc", + "sources_youtubeId": "FhvJPUcD6go", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6733123b3a168eb535f7b817", "eventId": "devcon-7", - "slot_start": 1731501000000, - "slot_end": 1731501900000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1cuZ-hN8gOGCEQohCTOeJVPCVT4HAWbG9sWbvDGpwg_Y" + "slot_start": 1731394800000, + "slot_end": 1731400200000, + "slot_roomId": "classroom-e", + "resources_presentation": "https://docs.google.com/presentation/d/1bocSfX9_K930B6knXp5J9HUDPwUP0hIP5UeWRegKZ_E", + "resources_slides": "", + "speakers": [ + "peter-garamvolgyi", + "rh" + ] }, "vector": [ 0, @@ -466122,8 +464809,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -466555,6 +465240,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -466893,6 +465580,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -466904,6 +465592,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -466915,13 +465604,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -467432,7 +466115,6 @@ 0, 2, 0, - 0, 2, 0, 0, @@ -467451,42 +466133,47 @@ }, { "session": { - "id": "l1sload-in-action-write-l2-dapps-that-read-from-l1-state", - "sourceId": "ERQ7N3", - "title": "L1SLOAD in Action: Write L2 Dapps that Read from L1 State", - "description": "In this workshop we will explore some interesting new use cases unlocked by the newly proposed L1SLOAD precompile (RIP-7728). We will develop and deploy L2 dapps that read from L1 state using this precompile.", + "id": "l1sload-precompile-read-l1-state-from-your-l2-contract", + "sourceId": "VRXWFH", + "title": "L1SLOAD Precompile: Read L1 State from your L2 Contract", + "description": "We recently introduced [RIP 7728: L1SLOAD Precompile](https://github.com/ethereum/RIPs/pull/27). This is a new L2 precompile that allows dapps on L2s to read from the L1 state.\r\n\r\nIn this talk, we will explain how L1SLOAD works, and we will highlight some of the most exciting use cases that this precompile will unlock.", "track": "Layer 2", - "type": "Workshop", + "type": "Talk", "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ + "Cross-L2", "Developer Infrastructure", "DevEx", - "Rollups" + "precompile", + "Cross-L2", + "Developer Infrastructure", + "DevEx" ], "keywords": [ - "RIP", + "RIPs", "L1SLOAD", "Precompile" ], - "duration": 5050, + "duration": 918, "language": "en", - "sources_swarmHash": "4eb65f6c60b1496a7551b229ed23560c241cd3229d0222453f48e4d1c7d143fc", - "sources_youtubeId": "FhvJPUcD6go", + "sources_swarmHash": "21e05ca0fa617894020bb4f2b894ffa706dd5f0c8e2d6218c7f93aba7ed68a9e", + "sources_youtubeId": "Lbl1-fwunKc", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6733123b3a168eb535f7b817", + "sources_streamethId": "6735dceb9dbb7a90e18e42a1", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731394800000, - "slot_end": 1731400200000, - "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/1bocSfX9_K930B6knXp5J9HUDPwUP0hIP5UeWRegKZ_E", - "resources_slides": null, + "slot_start": 1731581400000, + "slot_end": 1731582600000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1nkzZ5Gin2GWcgGhvYhOmVQywSYCjYFlNu3xeFIu8YLs", + "resources_slides": "https://drive.google.com/file/d/1u5sBEvgQbpjasap_01jW_uOxR4Ltz49l/view", "speakers": [ - "peter-garamvolgyi", - "rh" + "peter-garamvolgyi" ] }, "vector": [ @@ -467928,10 +466615,7 @@ 0, 0, 0, - 0, 6, - 6, - 0, 0, 0, 0, @@ -468283,10 +466967,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -468295,11 +466975,11 @@ 0, 0, 0, - 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -468445,6 +467125,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -468642,6 +467323,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -468826,47 +467508,46 @@ }, { "session": { - "id": "l1sload-precompile-read-l1-state-from-your-l2-contract", - "sourceId": "VRXWFH", - "title": "L1SLOAD Precompile: Read L1 State from your L2 Contract", - "description": "We recently introduced [RIP 7728: L1SLOAD Precompile](https://github.com/ethereum/RIPs/pull/27). This is a new L2 precompile that allows dapps on L2s to read from the L1 state.\r\n\r\nIn this talk, we will explain how L1SLOAD works, and we will highlight some of the most exciting use cases that this precompile will unlock.", - "track": "Layer 2", + "id": "l2-daos-biggest-challenges-we-face-to-make-l2s-sustainable-long-term", + "sourceId": "BF8EWR", + "title": "L2 DAOs - biggest challenges we face to make L2s sustainable long term", + "description": "Today L2 DAOs are mostly focused on growth and supporting their ecosystem builders. But long-term they will be responsible for the management and maintenance of their chains from all perspectives - ecosystem growth, software development, security, chain economic parameters management, and others. In this talk, I will explore what DAOs need to figure out and fix before they will be able to take this responsibility in the coming years and why we should be addressing those issues already today.", + "track": "Coordination", "type": "Talk", - "expertise": "Beginner", - "audience": "Engineering", + "expertise": "Intermediate", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Cross-L2", - "Developer Infrastructure", - "DevEx", - "precompile", - "Cross-L2", - "Developer Infrastructure", - "DevEx" + "Coordination", + "DAO", + "Governance", + "processes", + "Coordination", + "DAO", + "Governance" ], "keywords": [ - "RIPs", - "L1SLOAD", - "Precompile" + "structures", + "processes" ], - "duration": 918, + "duration": 1500, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "59a57914473f149d366580f8bc98b834d15b56a2bf4f9eb3b68020593761e8c2", + "sources_youtubeId": "rG3Zkuo08SM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735dceb9dbb7a90e18e42a1", + "sources_streamethId": "673d8fd117a97b4f4d216e34", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731581400000, - "slot_end": 1731582600000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1nkzZ5Gin2GWcgGhvYhOmVQywSYCjYFlNu3xeFIu8YLs", - "resources_slides": null, + "slot_start": 1731638700000, + "slot_end": 1731640500000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1vQuKk5kYywWP8c4RZ3Xv_lV6TMmiWy4s6jRMdeFV9MU", + "resources_slides": "https://drive.google.com/file/d/170c4q2SoSO8VZ9kclQSueu-A3qZZF60P/view", "speakers": [ - "peter-garamvolgyi" + "krzysztof-urbanski" ] }, "vector": [ @@ -468877,11 +467558,12 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -469651,7 +468333,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -469675,7 +468356,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -469715,10 +468395,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -469772,6 +468454,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -469821,8 +468504,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -469909,6 +468590,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -470020,7 +468702,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -470179,6 +468860,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -470186,7 +468868,6 @@ 0, 0, 0, - 2, 0, 2, 0, @@ -470196,56 +468877,43 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "l2-daos-biggest-challenges-we-face-to-make-l2s-sustainable-long-term", - "sourceId": "BF8EWR", - "title": "L2 DAOs - biggest challenges we face to make L2s sustainable long term", - "description": "Today L2 DAOs are mostly focused on growth and supporting their ecosystem builders. But long-term they will be responsible for the management and maintenance of their chains from all perspectives - ecosystem growth, software development, security, chain economic parameters management, and others. In this talk, I will explore what DAOs need to figure out and fix before they will be able to take this responsibility in the coming years and why we should be addressing those issues already today.", - "track": "Coordination", + "id": "l2-evm-common-core-a-path-beyond-evm-equivalence", + "sourceId": "9RJ3MA", + "title": "L2 EVM Common Core: A Path Beyond EVM Equivalence", + "description": "Network effects of the EVM have locked many of the L2s into equivalence with the L1 EVM. L1 is optimized for moderate throughput and maximal decentralization, but L2s need higher throughput and can rely on heavier full nodes.\r\n\r\nThe talk will present a vision for an L2 EVM Common Core as a new base VM for participating L2s. It aims to offer a way to ship more ambitious EVM changes without increasing L2 fragmentation. It is a result of our work as leads of the RollCall L2 coordination process.", + "track": "Layer 2", "type": "Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Coordination", - "DAO", - "Governance", - "processes", - "Coordination", - "DAO", - "Governance" - ], - "keywords": [ - "structures", - "processes" + "EVM-equivalent", + "Rollups" ], - "duration": 1500, + "keywords": [], + "duration": 1552, "language": "en", - "sources_swarmHash": "59a57914473f149d366580f8bc98b834d15b56a2bf4f9eb3b68020593761e8c2", - "sources_youtubeId": "rG3Zkuo08SM", + "sources_swarmHash": "be5774410c333f9038b5b9f1d8f38553237468d423f216e7da33a56b045bf8a7", + "sources_youtubeId": "eLO6N-99CZE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673d8fd117a97b4f4d216e34", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6736d1881b0f83434d4b82fe", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d1881b0f83434d4b82fe.vtt", + "transcript_text": " . Hey, everyone. Yeah, I'm here to talk about the L2 EVM Common Core. It's basically this initiative, that's a recent initiative that came out of the roll call process. If you've never heard of the roll call process, that's something we started earlier in the year. It's basically a L2 coordination effort. We have a monthly call where many of the major L2s on Ethereum send representatives, and it's basically for now mostly used as both coordination across them, also trying to be a bit of a connective tissue to the L1. I'm personally spending most of my time in L1 core development. And then we also have what we call RIPs, roll-up improvement proposals. And the idea there is basically that they are similar to EIPs. EIPs, you might be familiar that they are proposals to add new features to the EVM on L1. And so the IPs are the equivalent for the L2 side of things. And yeah, the process was originally kind of started, the roll call by Karl, who can't be here today, unfortunately, Joaf and me. And then more recently, Nico also started joining us more actively. And yeah, I want to basically talk a little bit about EVM equivalence in general and then kind of the specific kind of direction that we see the EVM equivalence go in the future. And I wanted to start a little bit with history. It's going to be a bit more of like an easy kind of beginner-friendly talk, at least in the beginning. I like pictures, so I have some pictures. In principle, the way I always think about scalability for blockchains is basically as this fundamental trade-off between the cost to run a full node and the throughput you get, right? And so Bitcoin, Ethereum, two early examples. Ethereum has more throughput than Bitcoin, but it's also a bit harder to run an Ethereum full node than it is to run a Bitcoin full node, right? And I call these kind of trustless chains because the important thing is everyone on earth who wants to can verify the chain. At least that is the goal. And we are sticking pretty close to that. And then there is the approach of just going further up the diagonal and just picking Solana as an example here of chains that basically just say we want higher throughput, and how do we get there? Well, we basically go up and we accept that now it is harder to run a full node, but we get more throughput. And of course, the trade-off is that now you actually need to have a quite beefy machine to be able to still trustlessly participate, otherwise you have to trust the majority of nodes. Now, ideally, we want to go there, right? The ideal chains, they have very low cost to run full node. Everyone can verify, but they have high throughput. So how do we do this? We basically, this is the key roll-up trick, right? We realize that actually what matters is the cost to verify the chain. And if there's a way to verify the chain cheaper than running a full node, that's sufficient. And that's basically how we get to rollups. Because rollups are basically a way to compress verification of a chain. And so what you can do now is once you get to that place, you can do either, you know, basically the thing you could immediately think of is just, hey, take the high performance chain and just run it as a roll-up. There are some projects now on Ethereum that do this, that are Solana VM-based. It's a bit of a challenge, though, to adapt them for L2s. Then, of course, what you can do is, as well, you can just take the EVM and you can really scale it to a high throughput. People sometimes have this mistaken conception that the EVM is fundamentally not able to go to similar throughput levels as the SVM that's really not the case the reason why Ethereum runs on lower throughput is specifically because we want the straight off we want to be verifiable by everyone on earth in a trustless way right it's a choice it's not a technical constraint so once you go into the roll-up you can actually push the EVM to its limits but the variant of this that we've seen the most is actually what I would call, I don't know, the multi-rollup cluster, which is just many smaller, lower throughput individual rollups that basically together form a very high throughput system. So that's kind of like the background of how the ecosystem evolved as Ethereum went through its roadmap. And if you zoom into the chain itself and then eventually the EVM, I have some more pictures. I call basically this, this is just like the execution chain. It's just a symbol that's representing chains where actual activity happens, right? Like chains that actually offer execution, where you can build apps on top. And the most basic version of a chain is just like a self-contained chain. It's like an L1 with just normal apps on top. But now you want to turn it into a rollup. How do you turn it into a rollup? You need settlement and data availability. And those two things can be provided in a combined way by a settlement chain. So then once you have a settlement chain you can take your chain there and just turn it into a roll up and the nice thing there is that now you inherit the security of the settlement chain but you can still now yourself run with much higher throughput, right? But because you have that security that you inherit from that low throughput but high security chain. That's kind of like how with much higher throughput, right? But because you have that security that you inherit from that low throughput but high security chain. That's kind of like how this kind of this best of both worlds solution works. And that's kind of how you get to this world, right? Where you have a settlement chain and you have a bunch of rollups on top. Now this was basically the original vision for what people back then called ETH2. You might remember before the merge, people used to call kind of like the beacon chain ETH2. And we moved away from the name, and that has a specific reason. Why? Because ETH2 was this vision that we would basically give this new chain some fancy new execution and settlement system, and you would take the old ETH1 chain and the old EVM on top, and you would retire it. Back in the day, there were a bunch of different thought experiments. You could turn it basically into some sort of roll-up on top of the new ETH2 chain. You could just have it parked somewhere on the side with some, you know, either like proof of work forever with some bridges to the new system, or just even over time, deprecated or something. But then at some point, basically, as we moved closer to the merge and to these ideas, people realized that, hey, actually, the ETH1 chain is kind of good enough for what we need for the settlement chain. And so that's why then instead the plan became, let's merge the existing system into this new ETH2 chain. And that's why we kind of stopped calling it ETH1 and ETH2 and instead just call it Ethereum again. And so that's why today what we have on Ethereum is this hybrid settlement and execution chain. So down there, that's what Ethereum is supposed to be. That's both the settlement thing, settlement and data availability, but we also still have execution. You can have your UA on L1, you can use Uniswap on L1, all of that still works on L1. And it's all still powered by the original EVM that we used to have on Ethereum from the early days. Right. So that's basically like how the role of the EVM evolved and basically how for a long time the EVM was supposed to have an expiry date and then it turns out it actually didn't. Now how do L2s and their execution systems fit into this? Well, actually when we first pivoted from sharding to rollups, the idea was, well, this is going to be amazing because now all the L2s can go on this wild exploration and see what type of new fancy execution system works for them, works best for them. But then it turns out we have seen a little bit of that, and I said that earlier, we now see experimentation with the SVM and move VM and like more fancy throughput EVMs and whatnot. That happens a little bit, but the vast majority of L2s actually basically followed the network effects and they were like well there's already a lot of dApp developers for the existing EVM today so what we are going to do is we're going to just copy the EVM right we're going to copy the EVM and we're specifically not going to make changes to it because otherwise apps would not be compatible and it doesn't mean that the L2s were not innovative I would say quite the opposite I think we've had we've like three, four years of pretty rapid innovation in this space, but that was very much focused on the specific new challenges for us. So that was focused on new functionality and fault-proof, ZK-proof. Of course, in the ZK space, the innovation has been amazing to see. And then when it touched the EVM, it was more like extension. So I'm not sure how many of you are familiar with Arbitrum Stylus. That's an example of an extension to the EVM that left the core of the EVM unchanged and just added something on top. And then one more wrinkle to this, the consequence of the EVM equivalence was also that most L2s ended up just going with the dominant L1 EVM client, which at the time was Gath. Of course, Gath is still", "eventId": "devcon-7", - "slot_start": 1731638700000, - "slot_end": 1731640500000, + "slot_start": 1731580800000, + "slot_end": 1731582600000, "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1vQuKk5kYywWP8c4RZ3Xv_lV6TMmiWy4s6jRMdeFV9MU", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/12XdvKPNbvuPDHnrej4p-WzreCiZV7ATA5gFRxh1Vejk", + "resources_slides": "https://drive.google.com/file/d/1Tfnis6TUHKD6mGUcncuZmmXYYCNB8ygt/view", "speakers": [ - "krzysztof-urbanski" + "ansgar-dietrichs" ] }, "vector": [ @@ -470256,10 +468924,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -470311,6 +468975,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -470690,7 +469355,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -471043,6 +469707,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -471096,12 +469761,10 @@ 0, 0, 0, - 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -471155,7 +469818,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -471292,7 +469954,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -471329,6 +469990,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -471567,12 +470230,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -471585,38 +470248,45 @@ }, { "session": { - "id": "l2-evm-common-core-a-path-beyond-evm-equivalence", - "sourceId": "9RJ3MA", - "title": "L2 EVM Common Core: A Path Beyond EVM Equivalence", - "description": "Network effects of the EVM have locked many of the L2s into equivalence with the L1 EVM. L1 is optimized for moderate throughput and maximal decentralization, but L2s need higher throughput and can rely on heavier full nodes.\r\n\r\nThe talk will present a vision for an L2 EVM Common Core as a new base VM for participating L2s. It aims to offer a way to ship more ambitious EVM changes without increasing L2 fragmentation. It is a result of our work as leads of the RollCall L2 coordination process.", + "id": "l2-interoperability-via-collaborative-snarks", + "sourceId": "JPGEPU", + "title": "L2 Interoperability via Collaborative SNARKs", + "description": "Can contracts across rollups interact synchronously while maintaining horizontal scalability? The L2 interoperability problem can be viewed through the lens of collaborative SNARKs, where a coordinator splits a witness over N provers who collectively generate a proof, and the work each prover does should decrease linearly in N (horizonal scaling). This talk presents a solution for the special case of L2 interoperability and motivates new design constraints for SNARKs.", "track": "Layer 2", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "EVM-equivalent", - "Rollups" + "Fragmentation", + "Zk Rollups", + "Cryptography", + "interoperability", + "Cryptography", + "Fragmentation", + "Zk Rollups" ], - "keywords": [], - "duration": 1552, + "keywords": [ + "Interoperability" + ], + "duration": 1530, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "145060245426c6488d4e7abb9c124c221025d0620cf3cefc8df683e61eaacd97", + "sources_youtubeId": "Et58V-0FYsE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736d1881b0f83434d4b82fe", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d1881b0f83434d4b82fe.vtt", - "transcript_text": " . Hey, everyone. Yeah, I'm here to talk about the L2 EVM Common Core. It's basically this initiative, that's a recent initiative that came out of the roll call process. If you've never heard of the roll call process, that's something we started earlier in the year. It's basically a L2 coordination effort. We have a monthly call where many of the major L2s on Ethereum send representatives, and it's basically for now mostly used as both coordination across them, also trying to be a bit of a connective tissue to the L1. I'm personally spending most of my time in L1 core development. And then we also have what we call RIPs, roll-up improvement proposals. And the idea there is basically that they are similar to EIPs. EIPs, you might be familiar that they are proposals to add new features to the EVM on L1. And so the IPs are the equivalent for the L2 side of things. And yeah, the process was originally kind of started, the roll call by Karl, who can't be here today, unfortunately, Joaf and me. And then more recently, Nico also started joining us more actively. And yeah, I want to basically talk a little bit about EVM equivalence in general and then kind of the specific kind of direction that we see the EVM equivalence go in the future. And I wanted to start a little bit with history. It's going to be a bit more of like an easy kind of beginner-friendly talk, at least in the beginning. I like pictures, so I have some pictures. In principle, the way I always think about scalability for blockchains is basically as this fundamental trade-off between the cost to run a full node and the throughput you get, right? And so Bitcoin, Ethereum, two early examples. Ethereum has more throughput than Bitcoin, but it's also a bit harder to run an Ethereum full node than it is to run a Bitcoin full node, right? And I call these kind of trustless chains because the important thing is everyone on earth who wants to can verify the chain. At least that is the goal. And we are sticking pretty close to that. And then there is the approach of just going further up the diagonal and just picking Solana as an example here of chains that basically just say we want higher throughput, and how do we get there? Well, we basically go up and we accept that now it is harder to run a full node, but we get more throughput. And of course, the trade-off is that now you actually need to have a quite beefy machine to be able to still trustlessly participate, otherwise you have to trust the majority of nodes. Now, ideally, we want to go there, right? The ideal chains, they have very low cost to run full node. Everyone can verify, but they have high throughput. So how do we do this? We basically, this is the key roll-up trick, right? We realize that actually what matters is the cost to verify the chain. And if there's a way to verify the chain cheaper than running a full node, that's sufficient. And that's basically how we get to rollups. Because rollups are basically a way to compress verification of a chain. And so what you can do now is once you get to that place, you can do either, you know, basically the thing you could immediately think of is just, hey, take the high performance chain and just run it as a roll-up. There are some projects now on Ethereum that do this, that are Solana VM-based. It's a bit of a challenge, though, to adapt them for L2s. Then, of course, what you can do is, as well, you can just take the EVM and you can really scale it to a high throughput. People sometimes have this mistaken conception that the EVM is fundamentally not able to go to similar throughput levels as the SVM that's really not the case the reason why Ethereum runs on lower throughput is specifically because we want the straight off we want to be verifiable by everyone on earth in a trustless way right it's a choice it's not a technical constraint so once you go into the roll-up you can actually push the EVM to its limits but the variant of this that we've seen the most is actually what I would call, I don't know, the multi-rollup cluster, which is just many smaller, lower throughput individual rollups that basically together form a very high throughput system. So that's kind of like the background of how the ecosystem evolved as Ethereum went through its roadmap. And if you zoom into the chain itself and then eventually the EVM, I have some more pictures. I call basically this, this is just like the execution chain. It's just a symbol that's representing chains where actual activity happens, right? Like chains that actually offer execution, where you can build apps on top. And the most basic version of a chain is just like a self-contained chain. It's like an L1 with just normal apps on top. But now you want to turn it into a rollup. How do you turn it into a rollup? You need settlement and data availability. And those two things can be provided in a combined way by a settlement chain. So then once you have a settlement chain you can take your chain there and just turn it into a roll up and the nice thing there is that now you inherit the security of the settlement chain but you can still now yourself run with much higher throughput, right? But because you have that security that you inherit from that low throughput but high security chain. That's kind of like how with much higher throughput, right? But because you have that security that you inherit from that low throughput but high security chain. That's kind of like how this kind of this best of both worlds solution works. And that's kind of how you get to this world, right? Where you have a settlement chain and you have a bunch of rollups on top. Now this was basically the original vision for what people back then called ETH2. You might remember before the merge, people used to call kind of like the beacon chain ETH2. And we moved away from the name, and that has a specific reason. Why? Because ETH2 was this vision that we would basically give this new chain some fancy new execution and settlement system, and you would take the old ETH1 chain and the old EVM on top, and you would retire it. Back in the day, there were a bunch of different thought experiments. You could turn it basically into some sort of roll-up on top of the new ETH2 chain. You could just have it parked somewhere on the side with some, you know, either like proof of work forever with some bridges to the new system, or just even over time, deprecated or something. But then at some point, basically, as we moved closer to the merge and to these ideas, people realized that, hey, actually, the ETH1 chain is kind of good enough for what we need for the settlement chain. And so that's why then instead the plan became, let's merge the existing system into this new ETH2 chain. And that's why we kind of stopped calling it ETH1 and ETH2 and instead just call it Ethereum again. And so that's why today what we have on Ethereum is this hybrid settlement and execution chain. So down there, that's what Ethereum is supposed to be. That's both the settlement thing, settlement and data availability, but we also still have execution. You can have your UA on L1, you can use Uniswap on L1, all of that still works on L1. And it's all still powered by the original EVM that we used to have on Ethereum from the early days. Right. So that's basically like how the role of the EVM evolved and basically how for a long time the EVM was supposed to have an expiry date and then it turns out it actually didn't. Now how do L2s and their execution systems fit into this? Well, actually when we first pivoted from sharding to rollups, the idea was, well, this is going to be amazing because now all the L2s can go on this wild exploration and see what type of new fancy execution system works for them, works best for them. But then it turns out we have seen a little bit of that, and I said that earlier, we now see experimentation with the SVM and move VM and like more fancy throughput EVMs and whatnot. That happens a little bit, but the vast majority of L2s actually basically followed the network effects and they were like well there's already a lot of dApp developers for the existing EVM today so what we are going to do is we're going to just copy the EVM right we're going to copy the EVM and we're specifically not going to make changes to it because otherwise apps would not be compatible and it doesn't mean that the L2s were not innovative I would say quite the opposite I think we've had we've like three, four years of pretty rapid innovation in this space, but that was very much focused on the specific new challenges for us. So that was focused on new functionality and fault-proof, ZK-proof. Of course, in the ZK space, the innovation has been amazing to see. And then when it touched the EVM, it was more like extension. So I'm not sure how many of you are familiar with Arbitrum Stylus. That's an example of an extension to the EVM that left the core of the EVM unchanged and just added something on top. And then one more wrinkle to this, the consequence of the EVM equivalence was also that most L2s ended up just going with the dominant L1 EVM client, which at the time was Gath. Of course, Gath is still", + "sources_streamethId": "67343ad99dbb7a90e19c6d6f", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731580800000, - "slot_end": 1731582600000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/12XdvKPNbvuPDHnrej4p-WzreCiZV7ATA5gFRxh1Vejk", - "resources_slides": null, + "slot_start": 1731474000000, + "slot_end": 1731475800000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1ZVK2vJYrK2rxz9r6LEc4JhYeEu_tVk_8Q4kLDWRxY9k", + "resources_slides": "https://drive.google.com/file/d/1HjfrYnUx3LXhiYt6NWvEo7mGKUGbDAnd/view", "speakers": [ - "ansgar-dietrichs" + "ben-fisch" ] }, "vector": [ @@ -471678,10 +470348,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -472065,6 +470731,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -472384,6 +471051,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -472439,6 +471107,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -472697,11 +471366,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -472762,6 +471426,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -472938,8 +471603,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -472956,10 +471621,10 @@ }, { "session": { - "id": "l2-interoperability-via-collaborative-snarks", - "sourceId": "JPGEPU", - "title": "L2 Interoperability via Collaborative SNARKs", - "description": "Can contracts across rollups interact synchronously while maintaining horizontal scalability? The L2 interoperability problem can be viewed through the lens of collaborative SNARKs, where a coordinator splits a witness over N provers who collectively generate a proof, and the work each prover does should decrease linearly in N (horizonal scaling). This talk presents a solution for the special case of L2 interoperability and motivates new design constraints for SNARKs.", + "id": "l2-specific-mev-mitigation-strategies", + "sourceId": "FFWJAV", + "title": "L2 Specific MEV Mitigation Strategies", + "description": "MEV mitigation and prevention has primarily been researched in the base L1 Ethereum layer. This talk explores L2 specific strategies, including the future in the event of decentralized sequencing. We explore emerging EIP proposals and drafts (EIP-7640), the use of intents in L2s and other new constructions.", "track": "Layer 2", "type": "Talk", "expertise": "Intermediate", @@ -472967,34 +471632,34 @@ "featured": false, "doNotRecord": false, "tags": [ - "Fragmentation", - "Zk Rollups", - "Cryptography", - "interoperability", - "Cryptography", - "Fragmentation", - "Zk Rollups" + "Layer 2s", + "Rollups", + "MEV", + "defi", + "Layer 2s", + "MEV", + "Rollups" ], "keywords": [ - "Interoperability" + "DeFi" ], - "duration": 1530, + "duration": 1490, "language": "en", - "sources_swarmHash": "145060245426c6488d4e7abb9c124c221025d0620cf3cefc8df683e61eaacd97", - "sources_youtubeId": "Et58V-0FYsE", + "sources_swarmHash": "bb61f06d6177b7bc13e9366cbeba4e4fdf1d238ef84a4852919c8def46558a39", + "sources_youtubeId": "IXV3yFXHlfo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67343ad99dbb7a90e19c6d6f", + "sources_streamethId": "6736f8ee1b0f83434dc12123", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731474000000, - "slot_end": 1731475800000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1ZVK2vJYrK2rxz9r6LEc4JhYeEu_tVk_8Q4kLDWRxY9k", - "resources_slides": null, + "slot_start": 1731646800000, + "slot_end": 1731648600000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1WzPEAvLhXYIe49IEB4HEC3EgI2OglZ-ElusjYDJG2QY", + "resources_slides": "https://drive.google.com/file/d/1WtGRPj67rCdioc1DzmL2tfqoFP-WKPls/view", "speakers": [ - "ben-fisch" + "joseph-poon" ] }, "vector": [ @@ -473748,6 +472413,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -473762,11 +472428,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -473816,8 +472477,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -473943,6 +472602,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -474138,7 +472799,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -474334,56 +472994,60 @@ }, { "session": { - "id": "l2-specific-mev-mitigation-strategies", - "sourceId": "FFWJAV", - "title": "L2 Specific MEV Mitigation Strategies", - "description": "MEV mitigation and prevention has primarily been researched in the base L1 Ethereum layer. This talk explores L2 specific strategies, including the future in the event of decentralized sequencing. We explore emerging EIP proposals and drafts (EIP-7640), the use of intents in L2s and other new constructions.", - "track": "Layer 2", - "type": "Talk", + "id": "latency-advantage-in-cex-dex-arbitrage", + "sourceId": "RPMHLF", + "title": "Latency Advantage in CEX-DEX Arbitrage", + "description": "We study the effects of having latency advantage in the CEX-DEX arbitrage in the first-come first-serve transaction ordering policies. We search for optimal strategies for a trader that owns such advantage. To find optimal strategies, we simulate price changes on CEX using real data and assume DEX price does not change in the latency advantage interval. We find that optimal strategy can even be to trade right away as soon as the price difference crosses a threshold where trading is profitable", + "track": "Cryptoeconomics", + "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", "Rollups", + "Economics", "MEV", - "defi", - "Layer 2s", + "AMMs", + "programming", + "dynamic", + "AMMs", + "Economics", "MEV", "Rollups" ], "keywords": [ - "DeFi" + "Optimal", + "Stopping;", + "Dynamic", + "Programming;" ], - "duration": 1490, + "duration": 562, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "373cd46037e49db5d6daf67c74b2a86e58ffe6dbe5747c1938a919384ce69f95", + "sources_youtubeId": "9r7hmxQ8DTA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736f8ee1b0f83434dc12123", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731646800000, - "slot_end": 1731648600000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1WzPEAvLhXYIe49IEB4HEC3EgI2OglZ-ElusjYDJG2QY", - "resources_slides": null, + "slot_start": 1731487200000, + "slot_end": 1731487800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1CjpmVDcW4MOjilttmNcrYu_KP0rC8ud1_BjudHV_ntI", + "resources_slides": "https://drive.google.com/file/d/1le5l7TiUtjNS4T1T41z89p_iQAH7np6R/view", "speakers": [ - "joseph-poon" + "akaki-mamageishvili" ] }, "vector": [ 0, 0, + 6, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -475126,9 +473790,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -475162,13 +473823,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -475176,23 +473830,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -475234,6 +473871,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -475318,7 +473956,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -475550,6 +474187,28 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -475712,59 +474371,49 @@ }, { "session": { - "id": "latency-advantage-in-cex-dex-arbitrage", - "sourceId": "RPMHLF", - "title": "Latency Advantage in CEX-DEX Arbitrage", - "description": "We study the effects of having latency advantage in the CEX-DEX arbitrage in the first-come first-serve transaction ordering policies. We search for optimal strategies for a trader that owns such advantage. To find optimal strategies, we simulate price changes on CEX using real data and assume DEX price does not change in the latency advantage interval. We find that optimal strategy can even be to trade right away as soon as the price difference crosses a threshold where trading is profitable", - "track": "Cryptoeconomics", + "id": "launching-projects-out-of-the-global-majority", + "sourceId": "7VZ8WH", + "title": "Launching Projects out of the Global Majority", + "description": "Launching projects has been an almost entirely US driven exercise, with a handful of expectations out of Europe and Asia - and basically 0 examples out of LATAM or Africa. This talk aims to shed light on why this is a reality and how we as an ecosystem can support more experimentation and launches out of the global majority. Talking through cryptoeconomics, investors, narrative and positioning of previous high impact project launches.", + "track": "Real World Ethereum", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Business", "featured": false, - "doNotRecord": false, - "tags": [ - "Rollups", - "Economics", - "MEV", - "AMMs", - "programming", - "dynamic", - "AMMs", - "Economics", - "MEV", - "Rollups" - ], + "doNotRecord": true, "keywords": [ - "Optimal", - "Stopping;", - "Dynamic", - "Programming;" + "Global" + ], + "tags": [ + "DAO", + "Sufficient decentralization", + "Best Practices", + "macro/micro economics", + "global", + "Best Practices", + "DAO", + "macro/micro economics", + "Sufficient decentralization" ], - "duration": 562, "language": "en", - "sources_swarmHash": "373cd46037e49db5d6daf67c74b2a86e58ffe6dbe5747c1938a919384ce69f95", - "sources_youtubeId": "9r7hmxQ8DTA", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, + "speakers": [ + "james-waugh" + ], "eventId": "devcon-7", - "slot_start": 1731487200000, - "slot_end": 1731487800000, + "slot_start": 1731478800000, + "slot_end": 1731479400000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1CjpmVDcW4MOjilttmNcrYu_KP0rC8ud1_BjudHV_ntI", - "resources_slides": null, - "speakers": [ - "akaki-mamageishvili" - ] + "resources_presentation": "https://docs.google.com/presentation/d/1BZ-1nzUuvITdZkK8Kxj9N_dkxHmkmlG75RJ7u4tbtAc", + "resources_slides": "https://drive.google.com/file/d/1M8U1t40smifJRjqZk0BVPIuD-sy4IRg-/view" }, "vector": [ 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -476511,9 +475160,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -476540,18 +475186,17 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -476592,7 +475237,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -476612,6 +475256,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -476667,6 +475312,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -476722,6 +475368,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -476910,9 +475557,6 @@ 0, 0, 2, - 2, - 0, - 0, 0, 0, 0, @@ -477077,9 +475721,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -477094,48 +475738,54 @@ }, { "session": { - "id": "launching-projects-out-of-the-global-majority", - "sourceId": "7VZ8WH", - "title": "Launching Projects out of the Global Majority", - "description": "Launching projects has been an almost entirely US driven exercise, with a handful of expectations out of Europe and Asia - and basically 0 examples out of LATAM or Africa. This talk aims to shed light on why this is a reality and how we as an ecosystem can support more experimentation and launches out of the global majority. Talking through cryptoeconomics, investors, narrative and positioning of previous high impact project launches.", - "track": "Real World Ethereum", - "type": "Lightning Talk", + "id": "lazarus-how-to-stay-safe-from-the-biggest-threat-actor-in-crypto", + "sourceId": "HCXCXB", + "title": "Lazarus! How to stay safe from the biggest threat actor in crypto", + "description": "Lazarus has stolen by far the most funds in the blockchain space. They use the same or very similar attack vectors every time yet we see the biggest crypto companies falling victim to them one after another.\r\n\r\nIn this talk, i'll go over some of the attack vectors used by Lazarus and how people can keep themselves safe from Lazarus.", + "track": "Security", + "type": "Talk", "expertise": "Intermediate", - "audience": "Business", + "audience": "Engineering", "featured": false, - "doNotRecord": true, + "doNotRecord": false, "keywords": [ - "Global" + "Lazarus" ], "tags": [ - "DAO", - "Sufficient decentralization", + "Security", "Best Practices", - "macro/micro economics", - "global", + "Hacks", + "lazarus", "Best Practices", - "DAO", - "macro/micro economics", - "Sufficient decentralization" + "Hacks", + "Security" ], "language": "en", + "sources_swarmHash": "c4250f32ff689a42bcb4f8a9158944ab1c783e4f1d985a1b531bbe7096128ce5", + "sources_youtubeId": "W5wcGsh3UVE", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "james-waugh" + "mudit-gupta" ], "eventId": "devcon-7", - "slot_start": 1731478800000, - "slot_end": 1731479400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1BZ-1nzUuvITdZkK8Kxj9N_dkxHmkmlG75RJ7u4tbtAc" + "slot_start": 1731580200000, + "slot_end": 1731582000000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/15zVK369DMEaAyZgEYl7ytDPnVtTcqgBbNjAZaVtPUfk", + "resources_slides": "https://drive.google.com/file/d/1k3fV-hc66ZofGtyB0e97lSKdAfe_26fE/view" }, "vector": [ + 6, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -477877,10 +476527,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, + 6, 0, 0, 0, @@ -477981,7 +476628,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -478037,7 +476683,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -478093,7 +476738,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -478137,6 +476781,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -478282,9 +476927,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -478447,10 +477092,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -478465,43 +477110,56 @@ }, { "session": { - "id": "lazarus-how-to-stay-safe-from-the-biggest-threat-actor-in-crypto", - "sourceId": "HCXCXB", - "title": "Lazarus! How to stay safe from the biggest threat actor in crypto", - "description": "Lazarus has stolen by far the most funds in the blockchain space. They use the same or very similar attack vectors every time yet we see the biggest crypto companies falling victim to them one after another.\r\n\r\nIn this talk, i'll go over some of the attack vectors used by Lazarus and how people can keep themselves safe from Lazarus.", - "track": "Security", - "type": "Talk", + "id": "learn-huff-to-become-an-evm-chad", + "sourceId": "HRMCBK", + "title": "Learn Huff to become an EVM chad", + "description": "Become an EVM chad by learning Huff, a low level assembly language for the EVM! On top of being able to write super duper optimized smart-contracts, Huff will teach you how the EVM works under the hood and will let you master high level languages like Solidity or Vyper.", + "track": "Developer Experience", + "type": "Workshop", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developper", "featured": false, "doNotRecord": false, - "keywords": [ - "Lazarus" - ], "tags": [ - "Security", + "Tooling", + "Languages", + "Open Source Software", "Best Practices", - "Hacks", - "lazarus", + "programming", "Best Practices", - "Hacks", - "Security" + "Languages", + "Open Source Software", + "Tooling" ], - "language": "en", - "speakers": [ - "mudit-gupta" + "keywords": [ + "Education", + "Huff", + "Programming" ], + "duration": 6545, + "language": "en", + "sources_swarmHash": "be66d9d7f9ede2c00c9b1a1058e72e7c830a1b5e3e0a89765651e49d331857a1", + "sources_youtubeId": "5j0HmFlwe68", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735bba59dbb7a90e19f5402", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731580200000, - "slot_end": 1731582000000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/15zVK369DMEaAyZgEYl7ytDPnVtTcqgBbNjAZaVtPUfk" + "slot_start": 1731564000000, + "slot_end": 1731571200000, + "slot_roomId": "classroom-d", + "resources_presentation": "https://docs.google.com/presentation/d/1-l5GZfkJD_jGXx19MZKctGeyeRotdNV_0HKanpnUjLU", + "resources_slides": "https://drive.google.com/file/d/1Fzu4jDSv_P8D_74GYYQTa7ZRGLuJxe4R/view", + "speakers": [ + "clement-lakhal" + ] }, "vector": [ - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -479249,7 +477907,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -479268,6 +477925,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -479277,10 +477935,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -479343,6 +478001,16 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -479503,7 +478171,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -479636,6 +478303,38 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -479652,52 +478351,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -479816,7 +478469,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -479825,6 +478477,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -479834,56 +478487,45 @@ }, { "session": { - "id": "learn-huff-to-become-an-evm-chad", - "sourceId": "HRMCBK", - "title": "Learn Huff to become an EVM chad", - "description": "Become an EVM chad by learning Huff, a low level assembly language for the EVM! On top of being able to write super duper optimized smart-contracts, Huff will teach you how the EVM works under the hood and will let you master high level languages like Solidity or Vyper.", - "track": "Developer Experience", - "type": "Workshop", - "expertise": "Intermediate", - "audience": "Developper", + "id": "lessons-and-learning-in-people-ops-at-the-ef", + "sourceId": "D7V8ZY", + "title": "Lessons & Learning in People Ops at the EF", + "description": "In this talk, you will learn more about the learnings of People Ops at the EF gathered after the first two years of its existence. \r\n\r\nWe will discuss the differences between People Ops in an open and decentralized setting, such as the EF and centralized, traditional organizations, and the required differences in approach and tradeoffs.", + "track": "Coordination", + "type": "Talk", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, - "tags": [ - "Tooling", - "Languages", - "Open Source Software", - "Best Practices", - "programming", - "Best Practices", - "Languages", - "Open Source Software", - "Tooling" - ], + "tags": [], "keywords": [ - "Education", - "Huff", - "Programming" + "people", + "growth", + "open" ], - "duration": 6545, + "duration": 950, "language": "en", - "sources_swarmHash": "be66d9d7f9ede2c00c9b1a1058e72e7c830a1b5e3e0a89765651e49d331857a1", - "sources_youtubeId": "5j0HmFlwe68", + "sources_swarmHash": "6053fc3bc7cfdf2336529c71db16ae75959733a3e1b980d33b9d80fe1c4c42ba", + "sources_youtubeId": "NAyiWuQX5xg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735bba59dbb7a90e19f5402", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6736fa4e74749a4b89a25233", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736fa4e74749a4b89a25233.vtt", + "transcript_text": " Hey everyone. So good afternoon. My name is Jose. I work for PeopleOps at the Ethereum Foundation. I joined the Ethereum Foundation around two years ago. So this has been an experiment. We didn't have people operations before I joined the EF. And I want to share with you some of the learnings that we got for the past two years. So what I'm going to talk today is our experience, as I said, for the past two years, our go-through of challenges, positive aspects and learnings in both recruitment, people experience and development, and off-boarding as well, and especially how these differ from other traditional organizations that exist in the field. But first, I will provide some context on why doing people operations at the EF is a very particular challenge, full of nuances, mostly because of the way that the EF is organized. So the way that the EF is organized is the EF is part of a much larger ecosystem. It is an organization that exists at the frontier of what we have known so far. One that supports a blockchain ecosystem without controlling it. And what that means is also a work in progress. So every day we get to learn more about what DEF is and what kind of organization it needs to be for the present and for the future to support obviously Ethereum's long-term growth. If we zoom in a little bit on how the EF relates to the ecosystem, the EF was new and we didn't know the best way for teams to organize themselves and to relate to the ecosystem. So we prioritized creating an environment where actually different teams could choose to organize themselves differently depending on the goals that they actually wanted to achieve and they thought it was best for them to achieve those goals. So this resulted in having more like a community of teams than actual set of typical company teams that are kind of standardized all across the board. There are multiple ways in how these teams interact with each other and with the other entities that form the Ethereum ecosystem. So there are teams that work at the frontier of the EF, there are teams that work within the EF, there are teams that work externally, there are teams that work with Ethereum ecosystem entities that are, again, at the frontier of the EF. So there are many ways in which these teams collaborate with one another and with the ecosystem. They are largely independent, as I said. Some of them are internally. Some of them work at the frontier. So that is very important in talking about the way also moving forward and how people operations works for these teams. These teams are also largely autonomous. So they work in a decentralized way. We have, and they have a high degree of autonomy. Some decisions such as, for example, roadmaps and priorities are taken by these teams in consultation with leadership. But most of them are also made independently. Aside from some prescriptive decisions, such as for example, legal matters, which obviously need to be complied with all across the board, there is quite a fair amount of leeway for teams to move around and to actually choose the setting, as I said, that are more suitable for them to help them achieve their own goals. So having said this, how does this actually impact people operations? In broad strokes, in traditional organizations, PeopleOps is kind of more structured and has more predictive outcomes. They aim to provide a consistent experience for all the teams within the same organization. At EF, things are slightly different because these teams work independently and they kind of form their own culture internally. We aim to be as diverse as possible to make sure it works for everyone. But we kind of maintain the same minimum threshold to make sure that there's a minimum standard that all teams can meet. Talking a little bit about recruitment and going into people ops directly, so as I mentioned before, teams are independent. So different teams mean different subcultures, mean a different way of working. So this means that working for a certain team at the EF can be substantially different than working for other team within DEF. This reflects on how they communicate internally, ranging from how to when to what platforms they use, compensation philosophy, way of working practices, interactions with the adjacent community, Ethereum community, etc. So what we learned from this is that the best way for people operations to actually facilitate the recruitment process is to let them lead the way. And we are actually helping and holding hands to let them do whatever they want to do, within obviously some limits, but using our expertise to help them achieve their own goals. So this can range from purely operational support. There are teams that get to us and say, hey, I know exactly who I want to hire because I have a very close relationship with the community that works around my team and I want to hire that person and I just need to set up the operational process of it and that is fine. We can obviously help with that. There are other teams who are less sure about what the process looks like. So what they usually do is, okay so I want to hire this kind of profile which can be more or less defined and then they kind of ask for help for doing the process from end to end. So that goes from defining the profile, sourcing the candidates, establishing a hiring process, running the process, helping them run the interviews, until the offer. So it's fairly common for the EF to hire external contributors as I said. This is also a very good way to lower the risk when we hire someone as, okay if this person was an external contributor and was already contributing to the EF, as I said before, working at the frontier between the EF and the ecosystem, that obviously lowers the risk because we know that person from their past contributions, so it's easier to just bring them in and we know that they will have a good fit within the organization. Another thing that we learned that is probably a challenge for us that does not happen within traditional organizations is that whenever we get a candidate that is a good candidate for a certain position within a team, it's much harder for us to share that profile with other teams because as I said, teams work and hire largely independently so we're not always aware of their hiring needs so moving one profile from one team's pipeline to another team's pipeline can be substantially difficult when we talk about people experience and their development there's also a lot to say here so because we are non-structured, well, we're a structured organization, but we are a decentralized organization, and we are a flat organization, so we don't have job titles. In traditional organizations, it's easier for people to feel progression through changes in their job titles. So you know that when you go from junior something into senior something, there is a progression there. So you have somewhere to look for because", "eventId": "devcon-7", - "slot_start": 1731564000000, - "slot_end": 1731571200000, - "slot_roomId": "classroom-d", - "resources_presentation": "https://docs.google.com/presentation/d/1-l5GZfkJD_jGXx19MZKctGeyeRotdNV_0HKanpnUjLU", - "resources_slides": null, + "slot_start": 1731652800000, + "slot_end": 1731654000000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1pSqd-PaSLhWa3-GQ2HCRVcJCWv_fnu9Z5T4wVlAMT-c", + "resources_slides": "https://drive.google.com/file/d/10KedOZLHU3N3U74YhH1A1uBpk3CvtNmR/view", "speakers": [ - "clement-lakhal" + "jose-pedro-cabrita" ] }, "vector": [ 0, 0, 0, - 6, 0, 0, 0, @@ -479892,6 +478534,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -480652,7 +479295,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -480662,7 +479304,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -480728,8 +479369,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -481031,7 +479670,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -481194,19 +479832,19 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -481216,39 +479854,44 @@ }, { "session": { - "id": "lessons-and-learning-in-people-ops-at-the-ef", - "sourceId": "D7V8ZY", - "title": "Lessons & Learning in People Ops at the EF", - "description": "In this talk, you will learn more about the learnings of People Ops at the EF gathered after the first two years of its existence. \r\n\r\nWe will discuss the differences between People Ops in an open and decentralized setting, such as the EF and centralized, traditional organizations, and the required differences in approach and tradeoffs.", - "track": "Coordination", + "id": "lessons-from-integrating-logup-gkr-in-the-miden-vm", + "sourceId": "LL799L", + "title": "Lessons from integrating LogUp-GKR in the Miden VM", + "description": "In this talk we will describe how to modify the STARK protocol to prove multiset checks using the GKR protocol. We will take a deep dive of the approach we’ve taken to implement it in the Miden VM, covering the benefits and challenges we've experienced.", + "track": "Applied Cryptography", "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "expertise": "Expert", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], + "tags": [ + "Zero-Knowledge", + "Cryptography", + "gkr", + "Cryptography", + "Zero-Knowledge" + ], "keywords": [ - "people", - "growth", - "open" + "LogUp", + "GKR" ], - "duration": 950, + "duration": 1392, "language": "en", - "sources_swarmHash": "6053fc3bc7cfdf2336529c71db16ae75959733a3e1b980d33b9d80fe1c4c42ba", - "sources_youtubeId": "NAyiWuQX5xg", + "sources_swarmHash": "7afb12e3dc341b4fc4f68df53f30360755a4e5033980052d6975716f890afc24", + "sources_youtubeId": "f4Zwn6ItiNs", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736fa4e74749a4b89a25233", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736fa4e74749a4b89a25233.vtt", - "transcript_text": " Hey everyone. So good afternoon. My name is Jose. I work for PeopleOps at the Ethereum Foundation. I joined the Ethereum Foundation around two years ago. So this has been an experiment. We didn't have people operations before I joined the EF. And I want to share with you some of the learnings that we got for the past two years. So what I'm going to talk today is our experience, as I said, for the past two years, our go-through of challenges, positive aspects and learnings in both recruitment, people experience and development, and off-boarding as well, and especially how these differ from other traditional organizations that exist in the field. But first, I will provide some context on why doing people operations at the EF is a very particular challenge, full of nuances, mostly because of the way that the EF is organized. So the way that the EF is organized is the EF is part of a much larger ecosystem. It is an organization that exists at the frontier of what we have known so far. One that supports a blockchain ecosystem without controlling it. And what that means is also a work in progress. So every day we get to learn more about what DEF is and what kind of organization it needs to be for the present and for the future to support obviously Ethereum's long-term growth. If we zoom in a little bit on how the EF relates to the ecosystem, the EF was new and we didn't know the best way for teams to organize themselves and to relate to the ecosystem. So we prioritized creating an environment where actually different teams could choose to organize themselves differently depending on the goals that they actually wanted to achieve and they thought it was best for them to achieve those goals. So this resulted in having more like a community of teams than actual set of typical company teams that are kind of standardized all across the board. There are multiple ways in how these teams interact with each other and with the other entities that form the Ethereum ecosystem. So there are teams that work at the frontier of the EF, there are teams that work within the EF, there are teams that work externally, there are teams that work with Ethereum ecosystem entities that are, again, at the frontier of the EF. So there are many ways in which these teams collaborate with one another and with the ecosystem. They are largely independent, as I said. Some of them are internally. Some of them work at the frontier. So that is very important in talking about the way also moving forward and how people operations works for these teams. These teams are also largely autonomous. So they work in a decentralized way. We have, and they have a high degree of autonomy. Some decisions such as, for example, roadmaps and priorities are taken by these teams in consultation with leadership. But most of them are also made independently. Aside from some prescriptive decisions, such as for example, legal matters, which obviously need to be complied with all across the board, there is quite a fair amount of leeway for teams to move around and to actually choose the setting, as I said, that are more suitable for them to help them achieve their own goals. So having said this, how does this actually impact people operations? In broad strokes, in traditional organizations, PeopleOps is kind of more structured and has more predictive outcomes. They aim to provide a consistent experience for all the teams within the same organization. At EF, things are slightly different because these teams work independently and they kind of form their own culture internally. We aim to be as diverse as possible to make sure it works for everyone. But we kind of maintain the same minimum threshold to make sure that there's a minimum standard that all teams can meet. Talking a little bit about recruitment and going into people ops directly, so as I mentioned before, teams are independent. So different teams mean different subcultures, mean a different way of working. So this means that working for a certain team at the EF can be substantially different than working for other team within DEF. This reflects on how they communicate internally, ranging from how to when to what platforms they use, compensation philosophy, way of working practices, interactions with the adjacent community, Ethereum community, etc. So what we learned from this is that the best way for people operations to actually facilitate the recruitment process is to let them lead the way. And we are actually helping and holding hands to let them do whatever they want to do, within obviously some limits, but using our expertise to help them achieve their own goals. So this can range from purely operational support. There are teams that get to us and say, hey, I know exactly who I want to hire because I have a very close relationship with the community that works around my team and I want to hire that person and I just need to set up the operational process of it and that is fine. We can obviously help with that. There are other teams who are less sure about what the process looks like. So what they usually do is, okay so I want to hire this kind of profile which can be more or less defined and then they kind of ask for help for doing the process from end to end. So that goes from defining the profile, sourcing the candidates, establishing a hiring process, running the process, helping them run the interviews, until the offer. So it's fairly common for the EF to hire external contributors as I said. This is also a very good way to lower the risk when we hire someone as, okay if this person was an external contributor and was already contributing to the EF, as I said before, working at the frontier between the EF and the ecosystem, that obviously lowers the risk because we know that person from their past contributions, so it's easier to just bring them in and we know that they will have a good fit within the organization. Another thing that we learned that is probably a challenge for us that does not happen within traditional organizations is that whenever we get a candidate that is a good candidate for a certain position within a team, it's much harder for us to share that profile with other teams because as I said, teams work and hire largely independently so we're not always aware of their hiring needs so moving one profile from one team's pipeline to another team's pipeline can be substantially difficult when we talk about people experience and their development there's also a lot to say here so because we are non-structured, well, we're a structured organization, but we are a decentralized organization, and we are a flat organization, so we don't have job titles. In traditional organizations, it's easier for people to feel progression through changes in their job titles. So you know that when you go from junior something into senior something, there is a progression there. So you have somewhere to look for because", + "sources_streamethId": "67342ddd9dbb7a90e1c34f54", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731652800000, - "slot_end": 1731654000000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1pSqd-PaSLhWa3-GQ2HCRVcJCWv_fnu9Z5T4wVlAMT-c", - "resources_slides": null, + "slot_start": 1731470400000, + "slot_end": 1731472200000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1Eh_tW-ueqILgRF3_daF57cyNlIe38F86K1969SSn5sg", + "resources_slides": "https://drive.google.com/file/d/1_Rvu9pBGs12QgqZWNYaxDt9ZteMQZh5n/view", "speakers": [ - "jose-pedro-cabrita" + "philippe-laferriere" ] }, "vector": [ @@ -481262,7 +479905,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -481700,14 +480342,8 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -482019,6 +480655,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -482408,6 +481046,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -482569,14 +481208,13 @@ 0, 0, 2, + 2, 0, 0, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -482588,45 +481226,49 @@ }, { "session": { - "id": "lessons-from-integrating-logup-gkr-in-the-miden-vm", - "sourceId": "LL799L", - "title": "Lessons from integrating LogUp-GKR in the Miden VM", - "description": "In this talk we will describe how to modify the STARK protocol to prove multiset checks using the GKR protocol. We will take a deep dive of the approach we’ve taken to implement it in the Miden VM, covering the benefits and challenges we've experienced.", - "track": "Applied Cryptography", - "type": "Talk", - "expertise": "Expert", - "audience": "Engineering", + "id": "leveraging-ethereum-for-sustainable-solutions-in-southeast-asia", + "sourceId": "F7Z87P", + "title": "Leveraging Ethereum for Sustainable Solutions in Southeast Asia", + "description": "In this talk you will learn how Ethereum can shape a sustainable and regenerative future in Southeast Asia. We will dive into the challenges faced by communities like Thai farmers, and how cryptoeconomic solutions can drive resilience, fair markets, and renewable energy adoption. Discover innovative projects tackling coordination failures through cryptoeconomics, from parametric insurance to decentralized energy exchanges, and see how you can contribute to this transformative vision.", + "track": "Real World Ethereum", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Local/SEA", "featured": false, "doNotRecord": false, - "tags": [ - "Zero-Knowledge", - "Cryptography", - "gkr", - "Cryptography", - "Zero-Knowledge" - ], "keywords": [ - "LogUp", - "GKR" + "Ethereum", + "Use", + "Cases" + ], + "tags": [ + "Ethereum for Good", + "Climate", + "SEA", + "ethereum", + "case", + "use", + "Climate", + "Ethereum for Good", + "SEA" ], - "duration": 1392, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "faa954cc0d587a16e9dac5073d053dbe28772d3b17a31ca5bd910439b488f60d", + "sources_youtubeId": "G87k3c_cLLc", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67342ddd9dbb7a90e1c34f54", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731470400000, - "slot_end": 1731472200000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1Eh_tW-ueqILgRF3_daF57cyNlIe38F86K1969SSn5sg", - "resources_slides": null, "speakers": [ - "philippe-laferriere" - ] + "gesa-schneider" + ], + "eventId": "devcon-7", + "slot_start": 1731574200000, + "slot_end": 1731574800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/103WQKb3Z0-Knd415-KUFx0TbNISdUujVoQzaXW3xd3Q", + "resources_slides": "https://drive.google.com/file/d/1zfHNGm9EufK4kThv5QSBw7Rl63KBdBsL/view" }, "vector": [ 0, @@ -482635,11 +481277,11 @@ 0, 0, 0, + 6, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -483392,8 +482034,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -483505,6 +482145,27 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -483605,6 +482266,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -483666,11 +482329,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -483784,22 +482449,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -483933,6 +482582,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -483947,59 +482597,52 @@ 0, 0, 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "leveraging-ethereum-for-sustainable-solutions-in-southeast-asia", - "sourceId": "F7Z87P", - "title": "Leveraging Ethereum for Sustainable Solutions in Southeast Asia", - "description": "In this talk you will learn how Ethereum can shape a sustainable and regenerative future in Southeast Asia. We will dive into the challenges faced by communities like Thai farmers, and how cryptoeconomic solutions can drive resilience, fair markets, and renewable energy adoption. Discover innovative projects tackling coordination failures through cryptoeconomics, from parametric insurance to decentralized energy exchanges, and see how you can contribute to this transformative vision.", - "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Local/SEA", + "id": "leveraging-high-performance-computing-for-efficient-stark-provers", + "sourceId": "ZGXYDF", + "title": "Leveraging High-Performance Computing for Efficient STARK Provers", + "description": "Zero-Knowledge Proof (ZKP) protocols' applicability hinges on the prover's ability to efficiently generate proofs. This talk explores the computational aspects affecting ZKP performance, specifically focusing on STARK provers. We will analyze performance across high-performance and standard computing architectures and interpret results by examining key workload characteristics. From this understanding, we can project ZKP capabilities in future scenarios.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Ethereum", - "Use", - "Cases" - ], "tags": [ - "Ethereum for Good", - "Climate", - "SEA", - "ethereum", - "case", - "use", - "Climate", - "Ethereum for Good", - "SEA" + "ZK-EVMs", + "ZKP", + "STARK", + "optimization", + "STARK", + "ZK-EVMs", + "ZKP" ], - "language": "en", - "speakers": [ - "gesa-schneider" + "keywords": [ + "computing performance", + "optimization" ], + "duration": 1650, + "language": "en", + "sources_swarmHash": "466210110f16c732a0fb6c39294ffcee236d6138e2103181a425734ea235f63c", + "sources_youtubeId": "-4Sz8etUrig", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673448629dbb7a90e1853564", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673448629dbb7a90e1853564.vtt", + "transcript_text": " Bona nit a tothom, gràcies per ser-hi. Soc el Ricard Borré, com va dir, gràcies per la presentació. Jo treballo a Polygon. Bàsicament treballo en desenvolupar i optimitzar la tecnologia de GKE. Aquesta presentació es tracta de l'estabilització de la computació de alta performance per a proveïdors d'estalvis eficients. La presentació es divideix en dues parts. Primer, parlarem del present en termes de producte, que és la ZKEVM. I explicaré una mica les lesions aportades en el desenvolupament de la ZKEVM. I després, lessons learned on the development of the ZK-EVM. And then I will talk about the future, which is what we are working now, which is the ZK-VM, so a general proposed proving system based on STARKs as well. Okay, let's go for the ZK-EVM. The ZK-EVM was released two years ago, and its proof is based on a Stark method and we have a Stark and then we have aggregation to have a recursion process to aggregate proofs and then with a certain frequency we will send the proofs to L1. The only thing that we need to understand about a Stark for this talk is that it basically has two stages, or two phases. In the first phase, we will compute the trace, and then in the second phase, we'll take the Frey polynomial, which is kind of a summary of all the trace in a single polynomial, and we will prove the proximity to a low-degree polynomial. en una única polinomia i provem la proximitat a una polinomia de baix degrés. La fase de comit es fa en diferents estages. Això vol dir que la trànsit es fira en diferents estages i això és perquè necessitem randomitat per a alguns dels estages. Necessitem uns números randoms per avaluar algunes de les polinomies de la trànsit i aquesta randomitat es rebut i aquest ràndom és obtenit des dels estats previus. Les raons de la polinomia avaluades en els estats previus són adjuntades al transcripte i després tenim ràndom per al següent estat que hem d'avaluar. En la aritmetització de la ZKEVM tenim 1.300, més o menys, polinomials. I després, el ràndom de la polinomia és el que hem de fer. to evaluate. Okay, in our arithmetization of the ZK-EBM we have 1300 more or less polynomials and then the number of rows of the trace which is something that we can fix, it's a parameter of the prover let's say, it's 2 to the 25, so it's about 32 million. This is quite a lot and it ends up posing a requirement of 800 gigabytes of memory. I will talk more about this. So, why we use such a big number of rows on our trace? Basically, because you have a monolithic approach on the remethization. This means that we have a static capacity, so the capacity of our trace is determined by the number of rows. And so we fix a big number because, basically, we want to be able to handle any transaction that comes to the network. That's why for the most heavy-loaded transactions, the ones that will need more steps, we need to have enough space to handle them. This approach has advantages, but also some disadvantages. A clear one is that when we have to close a batch because we don't have more room for new transactions, because some of the state machines of the trace are filled, then there will be some spare cells on the trace perquè alguns dels machines de la trànsit són llocs, llavors hi haurà uns cels en la trànsit que hem de proveir però no estan llocs amb valors reals venent dels inputs. Així que tenim alguns efectes de padding, però també tenim uns avantatges, per exemple, que tenim un lloc constant que podem optimitzar molt bé, podem al·locar buffers on on advance etc so it also has some advantages related to performance now let's talk about timings a proof of this huge trace for the zkabm takes about six minutes and the cost with the i el cost amb la instància GCP que suporta l'AVX 512 és només 0,46 dòlars. Normalitzat pel nombre de transaccions, transaccions d'Ethereum que podem fitxar en un batx, és d'uns 0,2 mil·lidòlars. Aquest serà l'abast per transacció. És un abast molt baix, que és negligible en termes de la gran esquema de coses, en termes de cost que adquireix la transacció. A més, OKEx ha fet un treball molt interessant, que ha portat el proveïdor a GPUs, i això ha reduït la latència a tres minuts. I fins i tot reduint el cost. És una situació perfecta on reduïm la latència al cost utilitzant GPUs. Aquests són els números donats per ells i al final acabem tenint 0,1 mili dòlars per adreçat al cost de la transacció. Així que és molt bo. La velocitat obtenida des de la CPU a la GPU, si considerem la prova de l'endemà a l'endemà, és 2X. Aquí hi ha una de les limitacions de la prova. Com que tenim un trastament tan gran, a l'endemà, transferem molta data entre el host i el dispositiu durant l'execució dels kernels. I serem limitats pel PCI Express. A més, si mirem els costats reals, el que és important, són molt menys. No és una cosa que és molt rellevant en el context actual. Si mirem una mica més enllà i mirem la distribució dels kernels quan fem una prova, és una situació perfecta o bona perquè tenim el 95% de la temps de la prova és només concentrat en quatre kernels, especialment en dos, que són l'entitat i la mercantilització, que prenen el 70% del temps. I després tenim expressions que són una combinació línia de polinomials, i després l'executor, que és la primera fase de la avaluació de la tracció. Així que és la part de la avaluació de la tracció que depèn dels inputs. Aquesta és l'execució inicial dels inputs. Bàsicament, hem de focussar-nos en aquests quatre kernels, bàsicament en dos, i això és el que vam fer. Molt de feina en això, en el passat, abans de publicar el ZK-EBM. The first thing you have to consider when you want to optimize a code is to understand where are the boundaries for this code. Basically, you have to do a roofline analysis. Basically, the roofline analysis shows you where is the limit depending on the arithmetic intensity of your kernel. If your kernel does a lot of operations per each field element that you upload to the cache of the CPU, it means that it will be bounded by the CPU, by the computing part of the process, and if you have a lower emitting intensity, you are becoming bounded by the memory transfers from the main memory to the cache of the CPU. In our case, we are basically bounded by the memory transfers from the from the main memory to the cache of the CPU. In our case, we are basically bounded by the memory transfers for all the kernels except the hash. And this makes sense because the hash does a lot of arithmetic operations per each field element involved in the hashing. Okay, so we focused a lot on optimizing the optimizing the entity. We did a lot of basically the focus here was to minimize the L3 misses. Focos a l'optimització de l'entitat. Focs en la minimització de les emisses de l'L3. Al final, el resultat és que la performance que obtenim és de fer l'entitat en 100 polinomials de 2.23, és a 1.9 segons. Aquesta és més o menys la referència. I després, la mercantilització, és més o menys la referència. I després, la mercatització, és clar, és sobre, en la CPU, és una altra cosa, en la CPU és sobre vectorització, per tant, prenent avantatge dels registres vectorals per fer, en cada cop, 4 o 8 operacions al mateix temps. I anem a una performance amb Goldilocks 1, no Goldilocks 2, de 1 milió de hashes per segon. Així que aquest és el resum del que vam fer més o menys dos anys abans de llançar el ZKVM. A un punt vam arribar a la conclusió que mantenir i millorar el proveïdor no tindria cap impacte irrelevant en la performance que vam fer a l'aplicació. Aquesta és una bona situació, podem focusar-nos en altres coses, i això és el que fem ara. El projecte en què estem involucrats és el CISC, el CISC-AVM, que és un CISC-AVM generalment proposat. which is a general proposed ZK-EBM. Basically, as others that are in the space, we want to prove any program that is written in a high-level language like Rust, CPP, or others. The motivation of doing this project, it's very meaningful for us because we have acquired a lot of experience, a lot of tooling, a lot of knowledge on running the ZK-EBM, és molt significatiu per a nosaltres perquè hem adquirit molta experiència, molta informació, molt de coneixement en fer el ZK-EBM, que ha estat en Mainnet dos anys i hem patit molt amb les performances. Així podem prendre avantatge de totes aquestes coses que hem après per posar-les en servei d'aquest nou projecte, que és més general i pot ser més impactant en general per a qualsevol usuari. És obert, tu pots xerrar la GitHub, no és tot complet per fer una prova, però és gairebé allà, potser en un o dues setmanes serem capaços de provar qualsevol codi que es trobi escrit a REST. Ara, la major diferència amb aquest nou to prove any code that comes written in Rust. Okay, now, the main difference with this new proving system, so the proving system that we are doing for Zisk, it's different in many ways, but the main difference comes from the remitization, because now we don't have any more static capacity for the trace. We will have a dynamic capacity. This means that we will shape the trace depending on the inputs. And it will be divided into different instances. All this will be explained in the next talk, so I cannot skip the responsibility of explaining this well, but of course I encourage you to stay to understand all the implications of this. On the proving time, basically you will have to prove all the instances that you have generated on the arithmetization. We call them sub-proof. So we have an array of sub-proofs to be proven. And then, of course, we will have an aggregation phase on which we will aggregate all the proofs resulting from for all the instances that will result on a single proof. Okay? en què agregarem totes les proves que resulten en una sola prova. Pensem en les implicacions computacionals de tot això. De fet, en l'eficiència de la CPU, no es pot obtenir res d'aquest. Es pot obtenir una hora pitjor. Per què? Bàsicament, el cost d one proof is proportional to the area, and this stays true even when you go to small instances. So you have proportional cost. You have divided the main trace into different sub-traces, but the sum of the areas is more or less the same. Of course, we have eliminated the padding effects. This is helpful, and it will reduce a little bit the overall area, la suma d'àrees és més o menys la mateixa. És clar que hem eliminat els efectes de padding, que això és útil, i això reduirà una mica l'àrea overall, però no és una gran diferència. I tenim altres costos de degrada. A l'endemà, estem fent la prova una mica més costosa. Aleshores, amb aquesta nova granularitat, hi ha un munt d'oportunitats. However, with this new granularity, there are a bunch of opportunities. And the most important one is that we can take advantage of accelerators without having this limitation of being to move data back and forth from the host to the GPU, with this PCI Express limitation. So this is one very important improvement here. We will be able to exploit GPUs much more efficiently. We have other ideas, like vectorizing the execution of Starks. So we can kind of run eight Starks at the same time with trying to do every operation in a vector register containing elements of different Starks. These are some kind of ideas that we are considering. But the most appealing idea for us is going to distributed computing. Basically, the idea of distributed computing però la més apropiada idea per a nosaltres és la de distribució de computació. Bàsicament, la idea de distribució de computació és que vols fer-ne la teva prova sense fer-ne una sola instància al cloud, sinó diverses instàncies, i amb això intentes escalar més fort la teva aplicació. Vols produir una latència, que és un dels focussos de aquest projecte. Parlem de la distribuïda proveïda. Primer de tot, som molt feliços, tenim una avantatge molt important en aquest projecte, és que tenim accés a un supercomputador, que es diu Selenius, és part de la organització de la CCEURO, una organització en els Neus. És part de l'EUROHPC, però és una divisió en els Neus, diguem-ne. Ens van donar recursos per fer les nostres proves. Vam fer un projecte de recerca on volíem trobar els límits de la lluita per la Stark. Vam veure on ens podíem anar, com anar en la reducció de la latència. Aquest és el nostre objectiu. Aquí tenim tres línies. Tenim centenars de milions de CPUs que podem llançar juntes en una prova. No anirem a aquest nivell, però la hardware és allà. I també tenim centenars de GPUs que podem llançar per fer una prova. A més a més, volem targetar la performance de màxima edat, però tot el que fem pot ser fer-ho a la cloud també. Volem reproduir a la escala menys, volem reproduir els resultats a la cloud. I això és molt important perquè la producció no es farà en un supercomputador, serà fet en un sistema de la cloud, com els que utilitzem avui. Parlem de la primera part, de fer la traça. La computació de la winters és la part de fer la traça que ve des dels inputs. Com ho farem en un ambient distribut? La manera més naïfa de fer-ho seria... Bé, tinc diversos processos. Parlo de processos i darrere. Un procés. Dos processos no comparteixen memòria, doncs són processos distribuïts a través de la xarxa i després els processos espanten darrere. Tu pots assignar un procés, el rol màster, i dir que computaràs el trac, i després distribuiràs the result into equal parts, and then send to the others, okay? This is a functional approach, but it's not scalable at all. So we are not parallelizing anything. We have a lot of memory requirements in the master process, and we are also adding a high communication overhead. So it's not a serious approach. Now, let's try to get rid of the communication costs. Let's do the computation of the witness, redundant in every process. Okay, that's fine. We have to get rid of the communication costs, but we are not accelerating because it's redundant, and we have high memory requirements in all the processes perquè és redundant, i tenim requeriments de memòria en tots els processos, perquè tots els requeriments han de guardar tots els trets. No encara hi són. La segona estratègia que hem desenvolupat és minimitzar els trets que generem. Hem dividit la generació de trets en dues estades. En la primera estada, només generem el que anomenem el tre minimum, divided the trace generation in kind of two stages. In the first stage, we only generate what we call the minimum trace, which is the information that you require to then, in a second stage, restart the evaluation of the trace at any point. So in a second stage, we can, in parallel, recompute the trace and divide this computation. So we have this first stage of computing the minimum trace, which is very fast. And then we can, from the information generated in this process, we can distribute the trace between processes. I mean, each process will have had run this same... This first step of generating the minimum trace has been run by all the processes. aquest primer pas de generació de la trama mínima ha estat fet amb tots els processos, tots els processos saben quin és el lloc actual que s'ha de distribuir, i tots els processos prenen una part i computeixen aquesta part, però només aquesta part. Això escala molt millor. Ok, mirem els resultats. El test que estic utilitzant aquí i en els següents slides Let's look at the results. The test that I am using here and in the next slides, it's a program that executes 10,000 SHA-256 hash operations. The decision that we are generating is not complete, because as I said, this project is under development. At the actual moment, at that point, we're generating instances for the main and the binaries and some multiplicity tables. Aquest projecte està en desenvolupament. En el moment actual, a aquest punt, estem generant instances per a la majoria i les binàries i algunes tables de multiplicitat, però és prou per entendre els resultats i el potencial d'això. Ara, si considerem com a referència l'estona d'emulació, que és executar el programa sense generar cap trac, encara que no sigui el tracció mínima, el temps és 0,6 segons per aquest programa. Això vol dir que estem fent 80 MHz, és la nostra velocitat. L'abans de generar les traces ranya amb un instant, amb un nodi de computació, és al 100%, doncs estem doblement aquest temps, a dalt del 37% nodi de computació, és d'uns 100%, doncs estem doblement aquest temps, a 37% en instances de computació. Així que anem molt endavant. La escalaritat no és gaire bona perquè estem ja molt fàcils en l'execució d'aquestes... de la traça, perquè ho estem dividint en threads en un nodi de computació. Però, a la fi, because we are dividing it into threads on a single node. But at the end, really this overhead is very acceptable, okay? Only 37%. Okay, I will go a little bit fast because I am running out of time. Then let's talk about the distributed, how we manage the distribution of the sub-proofs. So we have to solve the proof for all these instances that have been generated. Here we have a synchronization point because all these instances share the transcript. This means that this object to get the randomness for the next stage is shared between all the instances. And this poses some synchronization between the execution of all these instances. What we do is a communication stage where we share the roots that we have to put to that transcript and then we get the same randomness in each of the proofs. Let's see the results. Scalability of this phase, generating the sub-proofs, it's 100% efficiency. We go from 70 seconds down to 3.3 seconds. It's a little bit suspicious that we have even superlinality. So if you see the efficiency, which is the line, it goes over 100% at some points. So this is something that is not, let's say, which is the line, it goes over 100% at some points. So this is something that is not, let's say, a little bit suspicious. The problem here is that the distributed prover even runs better in a single node. Because you can fix better the granularity of the threads that you are using for the subproves. Then if we compare the base point is the distributed proidor en un únic nodi computador, obtenim una cosa més significativa. Així obtenim una eficiència paral·lela de 80%. Això és bo. Anem a la distribució. La recursió. Tindrem requeriments de comunicació quan generarem el 3, recursion, we'll have some communication requirements when we generate the tree, because the subproofs that we are aggregating can be hosted in different processes. We have to transfer to the process that will generate the aggregation. And if we look at the parallel efficiency, really, it's not so appealing, right? It's 44%. Okay. But here, the problem that we have is that when we are going down to the tree, at some point we have less aggregations to do than processes. So there are some resources that are being not used at all. So if we just release these processes to do other things, the real parallel efficiency related to the hardware that we are consuming is 88%. So it's, again, a very acceptable parallel performance. Now, if we look at the complete overall situation for all the proof, including the win-loss computation, the sub-proofs and aggregation, the parallel efficiency is 78%. We go from 125 seconds down to 12.2 secondsons. La recursió es pren la major part del temps ara. És el 65% de la prova. Així hem de focusar per optimitzar la performance i menjar més la latència. I la conclusió més important és que, com que tenim una bona eficiència paral·lela, és el 70%, podem augmentar la latència per 10X. Hem augmentat la latència de la prova it's 70%, we can increase the latency by 10x, right? We have increased the latency of the proof by 10x, while the costs have only increased 27%. So this is a very good situation to have because basically it means that you can really decrease the latency without additional costs. So that's why we... And this can be reproduced in the cloud because we are not we don't rely on a very high performance network because the transferring of data that we are doing is really minimal. We are transferring proofs or we are transferring routes which is minimal information. So just I am closing with this. So we have, we can decrease, reduce the latency at a very low Estic acabant amb això. Podem reduir la latència a un increment de cost molt baix. I, com a millors millors, ens centrem en la recursió, en alguns algoritmes, com la estira i la multifrida. És clar que aquí no hi ha la GPU. Amb la GPU podem extreure-la encara més, perquè la nostra performance, la nostra arquitectura, pot suport accelerate more, because our performance, our architecture can support GPUs very well, because the instances are smaller. We can even reduce more the performance with the GPUs. And also, run on the cloud infrastructure is our next step. OK, sorry for extending a little bit. And I can take any question. Thank you, Ricard. Yeah, a round of applause. So we'll have a few questions. Reminder that great for everyone who submitted. The QR code is always in the presentation. And you can also vote them. So we're going to start, if it's all right for you, with the first one. Could you give an intuition of how do you generate a minimum trace that can then be used to restart the trace at any point? What's the data structure like? Yeah, the minimum trace only has, it's kind of evaluating the columns that you require to then be able to extend the trace. And this basically is two columns for the main machine, which is the ones that load data from the memory. It's kind of a A and B register. So it's only two columns. From that, the rest of columns that compose the main trace can be generated from this information at a given checkpoint. So that's basically it. So it's all waiting just a subset of two columns. Great. So the next one voted, can we say SyscVM basically trades more aggregation costs with better concurrency architecture? So basically, yeah. Yeah, that is one way to see it. Although the aggregation costs now are huge, and we have to pay for this aggregation, of course, to have this granularity that we are interested in, but we want to reduce as much as possible it by improving the algorithms. For instance, Flunky2 has a very fast aggregation. We can do something similar. We have the approach that reduces the number of queries that you have to verify on each aggregation step. So we have a package of things that we can do to improve this part. Also, we have to really think which is the granularity that is more suited for our hardware. So maybe we can generate less instances if we can really exploit the hardware well. And then the recursion will have less levels Okay, so a lot of investigation. It's it's coming on this part because as you saw it's the one that It's more relevant at this point. Okay, great great great The next one is can Sisk run programs written in lower level circuit languages will they be faster than rust will the gap ever disappear? the top one. Well, the purpose of this is not this. The purpose of this is basically generate the inputs with a high level language. Basically Rust, C++, or any other language. So this is mainly the design point of this. Of course, you may be able to generate some arithmetization and then use the prover as a component that can be self-contained, and you may be able to use it. But the main purpose is just you generate your code in a high-level language. You don't care about anything. you will get the proof of the execution of this program from the inputs which would be the inputs of the program, the outputs of the program, and the ELF of the compilation of the program with RISC-V or any other architectures that we are also considering like Wasm or LLVM. Great, great, great. And I think we got chance with one more quick. Just trying to be futuristic, can you give insights of what could probably be done in order to improve the CKE VM or VM prover in one or two orders of magnitude? For me, it's clear that we have to go, the distribution vector is a path, or a vector of improvement that we have to take, because if you rely on more specialized and complicated hardware on a single, but relying on a single instance, on a single node, you won't accelerate. But we're going to take advantage of the most sophisticated node available, the most sophisticated hardware available and compose it and take it two, three, four, whatever number can fit with a good parallel efficiency. Because we want to control all the time the costs of the proof. If you have parallel efficiency, it means the cost doesn't grow. So that's the trade-off that we really have to take into account. Amazing. Please give a big welcome to AlphaPlus.", "eventId": "devcon-7", - "slot_start": 1731574200000, - "slot_end": 1731574800000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/103WQKb3Z0-Knd415-KUFx0TbNISdUujVoQzaXW3xd3Q" + "slot_start": 1731477600000, + "slot_end": 1731479400000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1J3KMOMYAXjSesFqZthBz2neGQcOt3Ui_KyKgToVj0Z0", + "resources_slides": "https://drive.google.com/file/d/1YdxtiNk4sgXtJUUMEEdBzY-r1BjMQ8vn/view", + "speakers": [ + "ricard-borrell" + ] }, "vector": [ 0, @@ -484008,11 +482651,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -484825,6 +483468,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -484879,7 +483523,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -484888,7 +483531,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -484962,6 +483604,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -485000,7 +483643,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -485064,16 +483706,11 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -485104,6 +483741,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -485315,6 +483954,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -485331,53 +483971,47 @@ 0, 0, 0, - 0, - 2, 0 ] }, { "session": { - "id": "leveraging-high-performance-computing-for-efficient-stark-provers", - "sourceId": "ZGXYDF", - "title": "Leveraging High-Performance Computing for Efficient STARK Provers", - "description": "Zero-Knowledge Proof (ZKP) protocols' applicability hinges on the prover's ability to efficiently generate proofs. This talk explores the computational aspects affecting ZKP performance, specifically focusing on STARK provers. We will analyze performance across high-performance and standard computing architectures and interpret results by examining key workload characteristics. From this understanding, we can project ZKP capabilities in future scenarios.", - "track": "Applied Cryptography", + "id": "libp2p-implementation-in-c-and-prysm", + "sourceId": "F7UVJP", + "title": "Libp2p implementation in C# and Prysm", + "description": "Joint talk will discuss a project which split to work on the C# implementation of Nethermind's libp2p, where we implemented the TLS protocol, upgraded the Noise protocol, and added the Perf protocol. Although the first version of the C# implementation isn’t released yet, it is integrated with Gnosis Chain’s Shutter node. Second part of the talk focuses on new goland libp2p library for Prysm CL client", + "track": "[CLS] EPF Day", "type": "Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "ZK-EVMs", - "ZKP", - "STARK", - "optimization", - "STARK", - "ZK-EVMs", - "ZKP" + "Consensus", + "Network State" ], "keywords": [ - "computing performance", - "optimization" + "Libp2p", + "Networking", + "P2P" ], - "duration": 1650, + "duration": 1015, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "688bcf8ed0f257f588315fb5d07e0d1d44de2a4780efe358e803d9c20c0c708e", + "sources_youtubeId": "zAlxmEONKGE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673448629dbb7a90e1853564", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673448629dbb7a90e1853564.vtt", - "transcript_text": " Bona nit a tothom, gràcies per ser-hi. Soc el Ricard Borré, com va dir, gràcies per la presentació. Jo treballo a Polygon. Bàsicament treballo en desenvolupar i optimitzar la tecnologia de GKE. Aquesta presentació es tracta de l'estabilització de la computació de alta performance per a proveïdors d'estalvis eficients. La presentació es divideix en dues parts. Primer, parlarem del present en termes de producte, que és la ZKEVM. I explicaré una mica les lesions aportades en el desenvolupament de la ZKEVM. I després, lessons learned on the development of the ZK-EVM. And then I will talk about the future, which is what we are working now, which is the ZK-VM, so a general proposed proving system based on STARKs as well. Okay, let's go for the ZK-EVM. The ZK-EVM was released two years ago, and its proof is based on a Stark method and we have a Stark and then we have aggregation to have a recursion process to aggregate proofs and then with a certain frequency we will send the proofs to L1. The only thing that we need to understand about a Stark for this talk is that it basically has two stages, or two phases. In the first phase, we will compute the trace, and then in the second phase, we'll take the Frey polynomial, which is kind of a summary of all the trace in a single polynomial, and we will prove the proximity to a low-degree polynomial. en una única polinomia i provem la proximitat a una polinomia de baix degrés. La fase de comit es fa en diferents estages. Això vol dir que la trànsit es fira en diferents estages i això és perquè necessitem randomitat per a alguns dels estages. Necessitem uns números randoms per avaluar algunes de les polinomies de la trànsit i aquesta randomitat es rebut i aquest ràndom és obtenit des dels estats previus. Les raons de la polinomia avaluades en els estats previus són adjuntades al transcripte i després tenim ràndom per al següent estat que hem d'avaluar. En la aritmetització de la ZKEVM tenim 1.300, més o menys, polinomials. I després, el ràndom de la polinomia és el que hem de fer. to evaluate. Okay, in our arithmetization of the ZK-EBM we have 1300 more or less polynomials and then the number of rows of the trace which is something that we can fix, it's a parameter of the prover let's say, it's 2 to the 25, so it's about 32 million. This is quite a lot and it ends up posing a requirement of 800 gigabytes of memory. I will talk more about this. So, why we use such a big number of rows on our trace? Basically, because you have a monolithic approach on the remethization. This means that we have a static capacity, so the capacity of our trace is determined by the number of rows. And so we fix a big number because, basically, we want to be able to handle any transaction that comes to the network. That's why for the most heavy-loaded transactions, the ones that will need more steps, we need to have enough space to handle them. This approach has advantages, but also some disadvantages. A clear one is that when we have to close a batch because we don't have more room for new transactions, because some of the state machines of the trace are filled, then there will be some spare cells on the trace perquè alguns dels machines de la trànsit són llocs, llavors hi haurà uns cels en la trànsit que hem de proveir però no estan llocs amb valors reals venent dels inputs. Així que tenim alguns efectes de padding, però també tenim uns avantatges, per exemple, que tenim un lloc constant que podem optimitzar molt bé, podem al·locar buffers on on advance etc so it also has some advantages related to performance now let's talk about timings a proof of this huge trace for the zkabm takes about six minutes and the cost with the i el cost amb la instància GCP que suporta l'AVX 512 és només 0,46 dòlars. Normalitzat pel nombre de transaccions, transaccions d'Ethereum que podem fitxar en un batx, és d'uns 0,2 mil·lidòlars. Aquest serà l'abast per transacció. És un abast molt baix, que és negligible en termes de la gran esquema de coses, en termes de cost que adquireix la transacció. A més, OKEx ha fet un treball molt interessant, que ha portat el proveïdor a GPUs, i això ha reduït la latència a tres minuts. I fins i tot reduint el cost. És una situació perfecta on reduïm la latència al cost utilitzant GPUs. Aquests són els números donats per ells i al final acabem tenint 0,1 mili dòlars per adreçat al cost de la transacció. Així que és molt bo. La velocitat obtenida des de la CPU a la GPU, si considerem la prova de l'endemà a l'endemà, és 2X. Aquí hi ha una de les limitacions de la prova. Com que tenim un trastament tan gran, a l'endemà, transferem molta data entre el host i el dispositiu durant l'execució dels kernels. I serem limitats pel PCI Express. A més, si mirem els costats reals, el que és important, són molt menys. No és una cosa que és molt rellevant en el context actual. Si mirem una mica més enllà i mirem la distribució dels kernels quan fem una prova, és una situació perfecta o bona perquè tenim el 95% de la temps de la prova és només concentrat en quatre kernels, especialment en dos, que són l'entitat i la mercantilització, que prenen el 70% del temps. I després tenim expressions que són una combinació línia de polinomials, i després l'executor, que és la primera fase de la avaluació de la tracció. Així que és la part de la avaluació de la tracció que depèn dels inputs. Aquesta és l'execució inicial dels inputs. Bàsicament, hem de focussar-nos en aquests quatre kernels, bàsicament en dos, i això és el que vam fer. Molt de feina en això, en el passat, abans de publicar el ZK-EBM. The first thing you have to consider when you want to optimize a code is to understand where are the boundaries for this code. Basically, you have to do a roofline analysis. Basically, the roofline analysis shows you where is the limit depending on the arithmetic intensity of your kernel. If your kernel does a lot of operations per each field element that you upload to the cache of the CPU, it means that it will be bounded by the CPU, by the computing part of the process, and if you have a lower emitting intensity, you are becoming bounded by the memory transfers from the main memory to the cache of the CPU. In our case, we are basically bounded by the memory transfers from the from the main memory to the cache of the CPU. In our case, we are basically bounded by the memory transfers for all the kernels except the hash. And this makes sense because the hash does a lot of arithmetic operations per each field element involved in the hashing. Okay, so we focused a lot on optimizing the optimizing the entity. We did a lot of basically the focus here was to minimize the L3 misses. Focos a l'optimització de l'entitat. Focs en la minimització de les emisses de l'L3. Al final, el resultat és que la performance que obtenim és de fer l'entitat en 100 polinomials de 2.23, és a 1.9 segons. Aquesta és més o menys la referència. I després, la mercantilització, és més o menys la referència. I després, la mercatització, és clar, és sobre, en la CPU, és una altra cosa, en la CPU és sobre vectorització, per tant, prenent avantatge dels registres vectorals per fer, en cada cop, 4 o 8 operacions al mateix temps. I anem a una performance amb Goldilocks 1, no Goldilocks 2, de 1 milió de hashes per segon. Així que aquest és el resum del que vam fer més o menys dos anys abans de llançar el ZKVM. A un punt vam arribar a la conclusió que mantenir i millorar el proveïdor no tindria cap impacte irrelevant en la performance que vam fer a l'aplicació. Aquesta és una bona situació, podem focusar-nos en altres coses, i això és el que fem ara. El projecte en què estem involucrats és el CISC, el CISC-AVM, que és un CISC-AVM generalment proposat. which is a general proposed ZK-EBM. Basically, as others that are in the space, we want to prove any program that is written in a high-level language like Rust, CPP, or others. The motivation of doing this project, it's very meaningful for us because we have acquired a lot of experience, a lot of tooling, a lot of knowledge on running the ZK-EBM, és molt significatiu per a nosaltres perquè hem adquirit molta experiència, molta informació, molt de coneixement en fer el ZK-EBM, que ha estat en Mainnet dos anys i hem patit molt amb les performances. Així podem prendre avantatge de totes aquestes coses que hem après per posar-les en servei d'aquest nou projecte, que és més general i pot ser més impactant en general per a qualsevol usuari. És obert, tu pots xerrar la GitHub, no és tot complet per fer una prova, però és gairebé allà, potser en un o dues setmanes serem capaços de provar qualsevol codi que es trobi escrit a REST. Ara, la major diferència amb aquest nou to prove any code that comes written in Rust. Okay, now, the main difference with this new proving system, so the proving system that we are doing for Zisk, it's different in many ways, but the main difference comes from the remitization, because now we don't have any more static capacity for the trace. We will have a dynamic capacity. This means that we will shape the trace depending on the inputs. And it will be divided into different instances. All this will be explained in the next talk, so I cannot skip the responsibility of explaining this well, but of course I encourage you to stay to understand all the implications of this. On the proving time, basically you will have to prove all the instances that you have generated on the arithmetization. We call them sub-proof. So we have an array of sub-proofs to be proven. And then, of course, we will have an aggregation phase on which we will aggregate all the proofs resulting from for all the instances that will result on a single proof. Okay? en què agregarem totes les proves que resulten en una sola prova. Pensem en les implicacions computacionals de tot això. De fet, en l'eficiència de la CPU, no es pot obtenir res d'aquest. Es pot obtenir una hora pitjor. Per què? Bàsicament, el cost d one proof is proportional to the area, and this stays true even when you go to small instances. So you have proportional cost. You have divided the main trace into different sub-traces, but the sum of the areas is more or less the same. Of course, we have eliminated the padding effects. This is helpful, and it will reduce a little bit the overall area, la suma d'àrees és més o menys la mateixa. És clar que hem eliminat els efectes de padding, que això és útil, i això reduirà una mica l'àrea overall, però no és una gran diferència. I tenim altres costos de degrada. A l'endemà, estem fent la prova una mica més costosa. Aleshores, amb aquesta nova granularitat, hi ha un munt d'oportunitats. However, with this new granularity, there are a bunch of opportunities. And the most important one is that we can take advantage of accelerators without having this limitation of being to move data back and forth from the host to the GPU, with this PCI Express limitation. So this is one very important improvement here. We will be able to exploit GPUs much more efficiently. We have other ideas, like vectorizing the execution of Starks. So we can kind of run eight Starks at the same time with trying to do every operation in a vector register containing elements of different Starks. These are some kind of ideas that we are considering. But the most appealing idea for us is going to distributed computing. Basically, the idea of distributed computing però la més apropiada idea per a nosaltres és la de distribució de computació. Bàsicament, la idea de distribució de computació és que vols fer-ne la teva prova sense fer-ne una sola instància al cloud, sinó diverses instàncies, i amb això intentes escalar més fort la teva aplicació. Vols produir una latència, que és un dels focussos de aquest projecte. Parlem de la distribuïda proveïda. Primer de tot, som molt feliços, tenim una avantatge molt important en aquest projecte, és que tenim accés a un supercomputador, que es diu Selenius, és part de la organització de la CCEURO, una organització en els Neus. És part de l'EUROHPC, però és una divisió en els Neus, diguem-ne. Ens van donar recursos per fer les nostres proves. Vam fer un projecte de recerca on volíem trobar els límits de la lluita per la Stark. Vam veure on ens podíem anar, com anar en la reducció de la latència. Aquest és el nostre objectiu. Aquí tenim tres línies. Tenim centenars de milions de CPUs que podem llançar juntes en una prova. No anirem a aquest nivell, però la hardware és allà. I també tenim centenars de GPUs que podem llançar per fer una prova. A més a més, volem targetar la performance de màxima edat, però tot el que fem pot ser fer-ho a la cloud també. Volem reproduir a la escala menys, volem reproduir els resultats a la cloud. I això és molt important perquè la producció no es farà en un supercomputador, serà fet en un sistema de la cloud, com els que utilitzem avui. Parlem de la primera part, de fer la traça. La computació de la winters és la part de fer la traça que ve des dels inputs. Com ho farem en un ambient distribut? La manera més naïfa de fer-ho seria... Bé, tinc diversos processos. Parlo de processos i darrere. Un procés. Dos processos no comparteixen memòria, doncs són processos distribuïts a través de la xarxa i després els processos espanten darrere. Tu pots assignar un procés, el rol màster, i dir que computaràs el trac, i després distribuiràs the result into equal parts, and then send to the others, okay? This is a functional approach, but it's not scalable at all. So we are not parallelizing anything. We have a lot of memory requirements in the master process, and we are also adding a high communication overhead. So it's not a serious approach. Now, let's try to get rid of the communication costs. Let's do the computation of the witness, redundant in every process. Okay, that's fine. We have to get rid of the communication costs, but we are not accelerating because it's redundant, and we have high memory requirements in all the processes perquè és redundant, i tenim requeriments de memòria en tots els processos, perquè tots els requeriments han de guardar tots els trets. No encara hi són. La segona estratègia que hem desenvolupat és minimitzar els trets que generem. Hem dividit la generació de trets en dues estades. En la primera estada, només generem el que anomenem el tre minimum, divided the trace generation in kind of two stages. In the first stage, we only generate what we call the minimum trace, which is the information that you require to then, in a second stage, restart the evaluation of the trace at any point. So in a second stage, we can, in parallel, recompute the trace and divide this computation. So we have this first stage of computing the minimum trace, which is very fast. And then we can, from the information generated in this process, we can distribute the trace between processes. I mean, each process will have had run this same... This first step of generating the minimum trace has been run by all the processes. aquest primer pas de generació de la trama mínima ha estat fet amb tots els processos, tots els processos saben quin és el lloc actual que s'ha de distribuir, i tots els processos prenen una part i computeixen aquesta part, però només aquesta part. Això escala molt millor. Ok, mirem els resultats. El test que estic utilitzant aquí i en els següents slides Let's look at the results. The test that I am using here and in the next slides, it's a program that executes 10,000 SHA-256 hash operations. The decision that we are generating is not complete, because as I said, this project is under development. At the actual moment, at that point, we're generating instances for the main and the binaries and some multiplicity tables. Aquest projecte està en desenvolupament. En el moment actual, a aquest punt, estem generant instances per a la majoria i les binàries i algunes tables de multiplicitat, però és prou per entendre els resultats i el potencial d'això. Ara, si considerem com a referència l'estona d'emulació, que és executar el programa sense generar cap trac, encara que no sigui el tracció mínima, el temps és 0,6 segons per aquest programa. Això vol dir que estem fent 80 MHz, és la nostra velocitat. L'abans de generar les traces ranya amb un instant, amb un nodi de computació, és al 100%, doncs estem doblement aquest temps, a dalt del 37% nodi de computació, és d'uns 100%, doncs estem doblement aquest temps, a 37% en instances de computació. Així que anem molt endavant. La escalaritat no és gaire bona perquè estem ja molt fàcils en l'execució d'aquestes... de la traça, perquè ho estem dividint en threads en un nodi de computació. Però, a la fi, because we are dividing it into threads on a single node. But at the end, really this overhead is very acceptable, okay? Only 37%. Okay, I will go a little bit fast because I am running out of time. Then let's talk about the distributed, how we manage the distribution of the sub-proofs. So we have to solve the proof for all these instances that have been generated. Here we have a synchronization point because all these instances share the transcript. This means that this object to get the randomness for the next stage is shared between all the instances. And this poses some synchronization between the execution of all these instances. What we do is a communication stage where we share the roots that we have to put to that transcript and then we get the same randomness in each of the proofs. Let's see the results. Scalability of this phase, generating the sub-proofs, it's 100% efficiency. We go from 70 seconds down to 3.3 seconds. It's a little bit suspicious that we have even superlinality. So if you see the efficiency, which is the line, it goes over 100% at some points. So this is something that is not, let's say, which is the line, it goes over 100% at some points. So this is something that is not, let's say, a little bit suspicious. The problem here is that the distributed prover even runs better in a single node. Because you can fix better the granularity of the threads that you are using for the subproves. Then if we compare the base point is the distributed proidor en un únic nodi computador, obtenim una cosa més significativa. Així obtenim una eficiència paral·lela de 80%. Això és bo. Anem a la distribució. La recursió. Tindrem requeriments de comunicació quan generarem el 3, recursion, we'll have some communication requirements when we generate the tree, because the subproofs that we are aggregating can be hosted in different processes. We have to transfer to the process that will generate the aggregation. And if we look at the parallel efficiency, really, it's not so appealing, right? It's 44%. Okay. But here, the problem that we have is that when we are going down to the tree, at some point we have less aggregations to do than processes. So there are some resources that are being not used at all. So if we just release these processes to do other things, the real parallel efficiency related to the hardware that we are consuming is 88%. So it's, again, a very acceptable parallel performance. Now, if we look at the complete overall situation for all the proof, including the win-loss computation, the sub-proofs and aggregation, the parallel efficiency is 78%. We go from 125 seconds down to 12.2 secondsons. La recursió es pren la major part del temps ara. És el 65% de la prova. Així hem de focusar per optimitzar la performance i menjar més la latència. I la conclusió més important és que, com que tenim una bona eficiència paral·lela, és el 70%, podem augmentar la latència per 10X. Hem augmentat la latència de la prova it's 70%, we can increase the latency by 10x, right? We have increased the latency of the proof by 10x, while the costs have only increased 27%. So this is a very good situation to have because basically it means that you can really decrease the latency without additional costs. So that's why we... And this can be reproduced in the cloud because we are not we don't rely on a very high performance network because the transferring of data that we are doing is really minimal. We are transferring proofs or we are transferring routes which is minimal information. So just I am closing with this. So we have, we can decrease, reduce the latency at a very low Estic acabant amb això. Podem reduir la latència a un increment de cost molt baix. I, com a millors millors, ens centrem en la recursió, en alguns algoritmes, com la estira i la multifrida. És clar que aquí no hi ha la GPU. Amb la GPU podem extreure-la encara més, perquè la nostra performance, la nostra arquitectura, pot suport accelerate more, because our performance, our architecture can support GPUs very well, because the instances are smaller. We can even reduce more the performance with the GPUs. And also, run on the cloud infrastructure is our next step. OK, sorry for extending a little bit. And I can take any question. Thank you, Ricard. Yeah, a round of applause. So we'll have a few questions. Reminder that great for everyone who submitted. The QR code is always in the presentation. And you can also vote them. So we're going to start, if it's all right for you, with the first one. Could you give an intuition of how do you generate a minimum trace that can then be used to restart the trace at any point? What's the data structure like? Yeah, the minimum trace only has, it's kind of evaluating the columns that you require to then be able to extend the trace. And this basically is two columns for the main machine, which is the ones that load data from the memory. It's kind of a A and B register. So it's only two columns. From that, the rest of columns that compose the main trace can be generated from this information at a given checkpoint. So that's basically it. So it's all waiting just a subset of two columns. Great. So the next one voted, can we say SyscVM basically trades more aggregation costs with better concurrency architecture? So basically, yeah. Yeah, that is one way to see it. Although the aggregation costs now are huge, and we have to pay for this aggregation, of course, to have this granularity that we are interested in, but we want to reduce as much as possible it by improving the algorithms. For instance, Flunky2 has a very fast aggregation. We can do something similar. We have the approach that reduces the number of queries that you have to verify on each aggregation step. So we have a package of things that we can do to improve this part. Also, we have to really think which is the granularity that is more suited for our hardware. So maybe we can generate less instances if we can really exploit the hardware well. And then the recursion will have less levels Okay, so a lot of investigation. It's it's coming on this part because as you saw it's the one that It's more relevant at this point. Okay, great great great The next one is can Sisk run programs written in lower level circuit languages will they be faster than rust will the gap ever disappear? the top one. Well, the purpose of this is not this. The purpose of this is basically generate the inputs with a high level language. Basically Rust, C++, or any other language. So this is mainly the design point of this. Of course, you may be able to generate some arithmetization and then use the prover as a component that can be self-contained, and you may be able to use it. But the main purpose is just you generate your code in a high-level language. You don't care about anything. you will get the proof of the execution of this program from the inputs which would be the inputs of the program, the outputs of the program, and the ELF of the compilation of the program with RISC-V or any other architectures that we are also considering like Wasm or LLVM. Great, great, great. And I think we got chance with one more quick. Just trying to be futuristic, can you give insights of what could probably be done in order to improve the CKE VM or VM prover in one or two orders of magnitude? For me, it's clear that we have to go, the distribution vector is a path, or a vector of improvement that we have to take, because if you rely on more specialized and complicated hardware on a single, but relying on a single instance, on a single node, you won't accelerate. But we're going to take advantage of the most sophisticated node available, the most sophisticated hardware available and compose it and take it two, three, four, whatever number can fit with a good parallel efficiency. Because we want to control all the time the costs of the proof. If you have parallel efficiency, it means the cost doesn't grow. So that's the trade-off that we really have to take into account. Amazing. Please give a big welcome to AlphaPlus.", + "sources_streamethId": "673443a29dbb7a90e136e2e7", "eventId": "devcon-7", - "slot_start": 1731477600000, - "slot_end": 1731479400000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1J3KMOMYAXjSesFqZthBz2neGQcOt3Ui_KyKgToVj0Z0", - "resources_slides": null, + "slot_start": 1731476700000, + "slot_end": 1731477600000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/114N7ZFoxay5HlB8T-LpY8iCOTKX70658i91W43eXDpc", + "resources_slides": "https://drive.google.com/file/d/1MuNTL7JM66f3ktnqh8vGUCq2b3NiGW3m/view", "speakers": [ - "ricard-borrell" + "rose", + "richa", + "kira" ] }, "vector": [ @@ -485391,14 +484025,12 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -485538,6 +484170,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -485669,6 +484302,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -486134,6 +484768,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -486175,6 +484810,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -486207,10 +484843,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -486343,7 +484975,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -486481,8 +485112,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -486717,42 +485346,40 @@ }, { "session": { - "id": "libp2p-implementation-in-c-and-prysm", - "sourceId": "F7UVJP", - "title": "Libp2p implementation in C# and Prysm", - "description": "Joint talk will discuss a project which split to work on the C# implementation of Nethermind's libp2p, where we implemented the TLS protocol, upgraded the Noise protocol, and added the Perf protocol. Although the first version of the C# implementation isn’t released yet, it is integrated with Gnosis Chain’s Shutter node. Second part of the talk focuses on new goland libp2p library for Prysm CL client", + "id": "light-client-support-in-prysm", + "sourceId": "9PC3EY", + "title": "Light Client Support in Prysm", + "description": "Showcasing the addition of Light Client server support to the Prysm consensus client.", "track": "[CLS] EPF Day", - "type": "Talk", + "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ + "EPF", "Consensus", - "Network State" + "Light Clients" ], "keywords": [ - "Libp2p", - "Networking", - "P2P" + "Prysm" ], - "duration": 1015, + "duration": 724, "language": "en", - "sources_swarmHash": "688bcf8ed0f257f588315fb5d07e0d1d44de2a4780efe358e803d9c20c0c708e", - "sources_youtubeId": "zAlxmEONKGE", + "sources_swarmHash": "87a24b2a455f641db3325c1e58fa4d6f7aa331c6e585835bb8a48f1ac01635a0", + "sources_youtubeId": "ZAU0kJJut54", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673443a29dbb7a90e136e2e7", + "sources_streamethId": "67343f059dbb7a90e1ea95b6", "eventId": "devcon-7", - "slot_start": 1731476700000, - "slot_end": 1731477600000, + "slot_start": 1731475800000, + "slot_end": 1731476700000, "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/114N7ZFoxay5HlB8T-LpY8iCOTKX70658i91W43eXDpc", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1o1_9VdMiq5Uf_dyQTPf5R3mVbxhL4d0QI33ZiZNm28Q", + "resources_slides": "https://drive.google.com/file/d/1BTsXmJDB0RxfhgdxdqKtZ0LtK0A_tOuo/view", "speakers": [ - "rose", - "richa", - "kira" + "bastin", + "rupam" ] }, "vector": [ @@ -486911,7 +485538,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -487044,7 +485670,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -487208,11 +485833,10 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -487527,33 +486151,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -487840,6 +486437,55 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -488046,30 +486692,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -488092,40 +486714,44 @@ }, { "session": { - "id": "light-client-support-in-prysm", - "sourceId": "9PC3EY", - "title": "Light Client Support in Prysm", - "description": "Showcasing the addition of Light Client server support to the Prysm consensus client.", - "track": "[CLS] EPF Day", + "id": "lighthouse-introduction-to-siren", + "sourceId": "F3ZPRJ", + "title": "Lighthouse: Introduction to Siren", + "description": "Sigma Prime would like to introduce Lighthouse's official user interface called Siren. Siren was made to monitor performance, display key metrics and help make lighthouse validator management easy. Siren comes with built in metrics, logging, and other features users will find useful when updating their validator.", + "track": "Usability", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "expertise": "Beginner", + "audience": "Stakers/Validators", "featured": false, "doNotRecord": false, "tags": [ - "EPF", - "Consensus", - "Light Clients" + "Home staking", + "UI/UX", + "Accessibility", + "ui", + "Accessibility", + "Home staking", + "UI/UX" ], "keywords": [ - "Prysm" + "lighthouse", + "UI" ], - "duration": 724, + "duration": 388, "language": "en", - "sources_swarmHash": "87a24b2a455f641db3325c1e58fa4d6f7aa331c6e585835bb8a48f1ac01635a0", - "sources_youtubeId": "ZAU0kJJut54", + "sources_swarmHash": "581e106f3b22dfacc1d56771706969f972348e514d3d6a94afc75aac47317df4", + "sources_youtubeId": "RhlKmJqk0go", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67343f059dbb7a90e1ea95b6", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731475800000, - "slot_end": 1731476700000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1o1_9VdMiq5Uf_dyQTPf5R3mVbxhL4d0QI33ZiZNm28Q", - "resources_slides": null, + "slot_start": 1731408000000, + "slot_end": 1731408600000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1iWFucLqzajqGIcn5d4YFuRZ1zk1Y8VHURhoTiKQ1T-w", + "resources_slides": "https://drive.google.com/file/d/1lwroo0zWbWBkMkkJx2L-r2Ug7eDQHyGN/view", "speakers": [ - "bastin", - "rupam" + "ricki-moore" ] }, "vector": [ @@ -488137,6 +486763,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -488144,10 +486771,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -488582,12 +487205,11 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -488885,7 +487507,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -488900,7 +487521,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -488942,6 +487562,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -488967,6 +487589,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -489187,7 +487810,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -489285,6 +487907,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -489447,13 +488070,11 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -489465,44 +488086,40 @@ }, { "session": { - "id": "lighthouse-introduction-to-siren", - "sourceId": "F3ZPRJ", - "title": "Lighthouse: Introduction to Siren", - "description": "Sigma Prime would like to introduce Lighthouse's official user interface called Siren. Siren was made to monitor performance, display key metrics and help make lighthouse validator management easy. Siren comes with built in metrics, logging, and other features users will find useful when updating their validator.", - "track": "Usability", + "id": "lighthouse-transition-journey-from-warp-to-axum", + "sourceId": "ZF79GZ", + "title": "Lighthouse transition journey : from warp to axum", + "description": "This talk will explore how to approach a significant refactor of the HTTP framework for lighthouse. \r\n\r\nIt will cover:\r\n- Measuring the performance of endpoints between Warp and Axum\r\n- A concrete plan for implementing the necessary changes", + "track": "[CLS] EPF Day", "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Stakers/Validators", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Home staking", - "UI/UX", - "Accessibility", - "ui", - "Accessibility", - "Home staking", - "UI/UX" + "Developer Infrastructure", + "Light Clients" ], "keywords": [ - "lighthouse", - "UI" + "Performance", + "Developer", + "experience" ], - "duration": 388, + "duration": 537, "language": "en", - "sources_swarmHash": "581e106f3b22dfacc1d56771706969f972348e514d3d6a94afc75aac47317df4", - "sources_youtubeId": "RhlKmJqk0go", + "sources_swarmHash": "0ba36082c0dc4df92f95f85927da7080d528c36d541326d01a2f2c36606c619e", + "sources_youtubeId": "PX-LJ7iqHig", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "67343c729dbb7a90e1adea32", "eventId": "devcon-7", - "slot_start": 1731408000000, - "slot_end": 1731408600000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1iWFucLqzajqGIcn5d4YFuRZ1zk1Y8VHURhoTiKQ1T-w", - "resources_slides": null, + "slot_start": 1731474000000, + "slot_end": 1731474900000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1xTcTdXk_Eq4KKe0Dg4IQWit5KLqwivJSom49AbKiMFM", + "resources_slides": "https://drive.google.com/file/d/1KxsX6opQOw677jx_ThW2hpE_hCsvxSOl/view", "speakers": [ - "ricki-moore" + "lea-narzis" ] }, "vector": [ @@ -489514,7 +488131,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -489522,6 +488138,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -490274,6 +488891,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -490307,6 +488925,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -490316,10 +488935,6 @@ 0, 0, 0, - 2, - 2, - 0, - 0, 0, 0, 0, @@ -490343,7 +488958,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -490662,7 +489276,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -490819,6 +489432,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -490830,8 +489444,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -490842,41 +489454,45 @@ }, { "session": { - "id": "lighthouse-transition-journey-from-warp-to-axum", - "sourceId": "ZF79GZ", - "title": "Lighthouse transition journey : from warp to axum", - "description": "This talk will explore how to approach a significant refactor of the HTTP framework for lighthouse. \r\n\r\nIt will cover:\r\n- Measuring the performance of endpoints between Warp and Axum\r\n- A concrete plan for implementing the necessary changes", - "track": "[CLS] EPF Day", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "liquid-staking-for-daos", + "sourceId": "ZV39SQ", + "title": "Liquid Staking for DAOs", + "description": "DAOs face a critical challenge: aligning token holder interests with long-term success while maintaining effective governance. This talk explores the tension between governance participation and financial gains, as well as the dangers and opportunities posed by restaking protocols using DAO tokens. We'll examine how misaligned incentives can compromise DAOs and discuss innovative solutions like liquid staking and token splitting.", + "track": "Coordination", + "type": "Talk", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, - "tags": [ - "Developer Infrastructure", - "Light Clients" - ], "keywords": [ - "Performance", - "Developer", - "experience" + "DAOs" + ], + "tags": [ + "Coordination", + "DAO", + "Best Practices", + "Mechanism design", + "Best Practices", + "Coordination", + "Mechanism design" ], - "duration": 537, "language": "en", - "sources_swarmHash": "0ba36082c0dc4df92f95f85927da7080d528c36d541326d01a2f2c36606c619e", - "sources_youtubeId": "PX-LJ7iqHig", + "sources_swarmHash": "af4dfb7196b3f28598932bfa68f572203f1aa20534f0498a56d483f964c3c0ba", + "sources_youtubeId": "94oHo7AORak", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67343c729dbb7a90e1adea32", - "eventId": "devcon-7", - "slot_start": 1731474000000, - "slot_end": 1731474900000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1xTcTdXk_Eq4KKe0Dg4IQWit5KLqwivJSom49AbKiMFM", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "lea-narzis" - ] + "dennison-bertram" + ], + "eventId": "devcon-7", + "slot_start": 1731394800000, + "slot_end": 1731396600000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1o6QVDTmx3Wki_7YsSzzIkIb_lvDouMYuQyMpQ98lqww", + "resources_slides": "https://drive.google.com/file/d/1egL6My2goPKpL3mjWJx3OgN9B7Wg_HWs/view" }, "vector": [ 0, @@ -490890,10 +489506,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -491335,33 +489947,11 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -491650,7 +490240,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -491661,6 +490250,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -491754,6 +490344,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -491807,6 +490398,25 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -492193,7 +490803,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -492204,6 +490813,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -492215,37 +490826,26 @@ }, { "session": { - "id": "liquid-staking-for-daos", - "sourceId": "ZV39SQ", - "title": "Liquid Staking for DAOs", - "description": "DAOs face a critical challenge: aligning token holder interests with long-term success while maintaining effective governance. This talk explores the tension between governance participation and financial gains, as well as the dangers and opportunities posed by restaking protocols using DAO tokens. We'll examine how misaligned incentives can compromise DAOs and discuss innovative solutions like liquid staking and token splitting.", - "track": "Coordination", - "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "liron-achdut", + "sourceId": "TPPWYB", + "title": "Liron Achdut", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "DAOs" - ], - "tags": [ - "Coordination", - "DAO", - "Best Practices", - "Mechanism design", - "Best Practices", - "Coordination", - "Mechanism design" - ], + "keywords": [], + "tags": [], "language": "en", - "speakers": [ - "dennison-bertram" - ], + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731394800000, - "slot_end": 1731396600000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1o6QVDTmx3Wki_7YsSzzIkIb_lvDouMYuQyMpQ98lqww" + "slot_start": 1731654000000, + "slot_end": 1731657600000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1YdbQcP_NmrA5hsp8UnCOjPl-Em8SCjy8qlYVJjrw3jo", + "resources_slides": "" }, "vector": [ 0, @@ -492257,8 +490857,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -492705,7 +491303,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -493006,7 +491603,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -493030,7 +491626,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -493100,7 +491695,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -493154,7 +491748,7 @@ 0, 0, 0, - 2, + 0, 0, 0, 0, @@ -493567,11 +492161,12 @@ 2, 0, 0, + 2, + 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -493584,38 +492179,46 @@ }, { "session": { - "id": "liron-achdut", - "sourceId": "TPPWYB", - "title": "Liron Achdut", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", + "id": "little-things-weve-learned-about-fhe", + "sourceId": "9JFDZA", + "title": "Little Things We've learned About FHE", + "description": "Recently, at PSE, we have been exploring the field of cryptography, specifically focusing on Fully Homomorphic Encryption (FHE). FHE enables secure interactions with encrypted data between different parties.\r\n\r\nIn this presentation, we will introduce key concepts and essential information tailored for developers and application designers. This will help them quickly grasp the fundamentals without getting bogged down by complex mathematical details.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Cryptography", + "Homomorphic Encryption", + "eli5", + "Cryptography", + "Homomorphic Encryption" + ], + "keywords": [ + "ELI5" + ], + "duration": 1181, "language": "en", - "speakers": [], + "sources_swarmHash": "abd4e937b9caaecc4dc7c05e26ceccf32cd810e419e9f50e4b17fe00e8a55fce", + "sources_youtubeId": "4AEKFmPftSY", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735c37a9dbb7a90e1fa8ae4", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735c37a9dbb7a90e1fa8ae4.vtt", + "transcript_text": " Thank you, Teresa. Hi, welcome here. So, I'm C.C. Liang from PSE. We're talking about FHE today. So, here's my selected resume to fit my narrative. First, like in August and July this years, I've been working on ZK-EBM projects with Kimi there and a bunch of many people. And more before that, I was working on different various applications in PSE. So I think in summary, what I have seen is a rise of computational primitives on cryptography. So what does that mean? Like we discovered a bunch of computation legos. This legos starts with gates like this. You can add numbers together or you can multiply numbers together and we call these gates arithmetic gates and if you're coming for a programming world you're probably seeing this N gates or gates XOR gates and not gates and you can fit bits inside it and do operation with it. These are called Boolean gates. So these are kind of abstract models. Once you get this, you get the whole computation you need. You can serve user with different kind of application logic. How does that go? So first you have gates and then if you combine these gates together you get circuits and then if you build language and compilers on top of them and you feed developers some coffees and teas and they'll build amazing applications and to happy users. So I think that's what happened in the ZK past. In a ZK we kind of have some arithmetic gates that allows you to perform computations on secret values and now FHE we are getting new sets of Legos and they unlocked new sets of capabilities and this is what we'll dive into today so before that let's do some recap on ZK calculation So when we talk about computations, we usually start with calculations, and then we build more abstractions, and then we can do computations on actual application logic. like a Sudoku board. They have some pre-filled values 7 and 3. You can fill in some initial value 5 and then you multiply together you get 35. You plus 3 you get 38. You can witness some private value 2 and then you get a result 40. And then you can send input and output to the verifier without sending all the computation trace in the middle. And in this way, you get privacy for your private value. You also get the computation compression for people call this the sickness. And if we accept some more abstractions we wrap developers are very good at turning your application needs into the computational bits and bytes so we can do some high-level logic like this. For example, you have ID 123 that resides in the ID registry. So this computation result tells us this is a valid ID. As this doc, you can insert some secret video in this secret field of ID. This ID has some certain attributes. And then the whole thing is combined together. This shows you know the secret. So you are the owner. This dog is the owner of ID 123. And then furthermore, this ID 123 has an age field, say it's 19. So this dog is 19 years old. But the dog wants to tell people it's 19 years old. The dog wants to tell it's greater than 18 years old with just one additional computation step. So that works well if your user is just one dog. But now, like, if you want to build applications that interact with multiple users, like this dog and cat, and they want to decide what to have for dinner, either khao-soi or pad thai. But their opinions are very sensitive, so secret they need some cryptography that can protect you from state-acted attackers. So they kind of... Let's imagine we don't have any cryptography and we host this computation and trusted third party like the Horse below and then the dog sends their vote one and the preference for pot I 0 and the cat sends the And the cat sends the vote one, and the vote for Pad Thai. Then we perform some computations. We want to add the votes together and to get the final preference for the dinner. But then, you see, this requires the interaction of the dog's secret and the cat's secret, and then it's not achievable by the ZK gate. So that's where FHE comes to help. So FHE stands for, it's short for three words, but I'm not going to torture you with that nerdy details. I'm going to redefine FHE as just one word, and it's a computation over encrypted data. As people say, the ticker is FHE. So what does it mean, like, computation over encrypted data? So you basically get these two new Legos, and then you can have encrypted data from Player 1, encrypted data from Player 2, and then you add them together, you get a computation result. And so when I heard, like, the encrypted computation over encrypted data first time I didn't realize how powerful it is or like a significant it is but it if you see here it kind of builds a boundaries between two players and then you that there are secrets to interact with each other, and you get a computation result. So now, like with FHE, you can do the dinner voting properly. So the dog sends the encrypted preference, cat sends their encrypted preference, and then you use the new FH gate and to add them together to get an encrypted computation result. And then you have to decrypt it to get the actual result. So I think Vitalik has a saying, like, all the technology we have is kind of a simulated, trusted third party. And I think, like think from this example, we're kind of seeing this phenomena. This horse on the bottom is kind of like a trusted third party. And when we're using FHE and without it, the application logic looks similar, but it's kind of wrapping on another layer of security. And from the perspective of user, all they see is the same. They input some value on their browser, and then they got some values from their browser, but there are differences behind the scenes, and there are different security and trust assumptions behind the scenes. So why are we not already using FHE now? We kind of already, like, so in the basement, you have FrogZone from 0xPark I highly recommend you to play it but there are three main problems of FHE right now first is the computation is costly let's say you want to send one bit of plain text from the stock to the server you have to encrypt it and in this encrypted message is a data itself so it has some size and how big is this data so this data takes six bytes is kind's kind of amortized, and it's kind of 100 times of the plain text. And it grows linearly. It means if you have 100 bits, you multiply 16 bytes by 100. This is already very good. But the terrible part is the computation part. You need a server of 192 vCPU. It burns 10 USD per hour. So I highly recommend you to play the frog games in the basement to feel that $10 per hour feeling. So the second problem is the verifiability. So you are getting this computation result, but is this computation result coming from the correct computation step or the incorrect computation step or the incorrect computation step. To be honest, like, you cannot really tell. Like, it needs extra work. You either wrap another zkProof on top of it, you either do some, like, message check or something to either guarantee kind of integrity. It's a problem people are actively solving, but it's still a problem. So the problem number three is the decryption problem. So after all the computations, we're getting the encrypted computation output. But how do you decrypt this? How do you remove that elephant from the fridge? So you need someone to decrypt the message, but you can give the decryption key to a centralized this is like you're not doing FHE at all. But the concern is like if you, like if someone has the ability to decrypt the output, can they also decrypt other users' input? And this, you know, this makes the whole like FHE a security theater. So the other way to solve this is you use some threshold decryption scheme. the whole FHES security theater. So the other way to solve this is you use some threshold decryption scheme. So every encrypted output, you need all the party to derive a decryption key share. And together, they have to collect all the decryption key together to actually see the output so if the dog did not give its decryption key to their input the other people were not able to decrypt the dog's input but the problem here is the cat if the cat ghosted, then nobody can actually decrypt this output. And then this secret will be secret forever. So let's give a quick recap. The message is simple. In ZK, we kind of have computations on top of your own secret. And for FHE, we can engage with other secrets. So you can build applications for multiple peoples. And that opens a lot of doors to many new applications. So I expect you will hear the word FHE in the near years. There will be more talks in the next DEF CON. This is a kind of trending topic. And lastly, I want to direct you to play the Frog Crypto on the basement to feel like how early days of FHE feel like. Also, if you are coming from a developer background, here are some reports you should check. The first two are from Gauss Lab. It's a library for multi-party FHE. Pay attention to the branch because the latest code is living on some branch. And if you want to deploy your frag zone yourself, check the last link. You can deploy a frag zone. And it's early days. Things break. So expect some hiccup from the code. And that's all I have for you. Thank you so much. Alright, thank you, CC. We actually have a lot of time for QA, so feel free to submit questions. Let's go from the first one on the screen. What are the best use cases you've seen for FHE so far? That's a very good question. So for me personally, like definitely not for others, like I'm more from like economic background, and I'm very interested in like auctions and votings. These are very, FFG applications. They are very ancient. But I feel like if we can do proper voting and proper auctions, that would be cool. I think what makes people exciting in the future is if you can build some private state, you can build a, let's say, P2P network. You have a private state, and then you have some state transition function. You can continuously update the states, and then you can build a kind of private blockchain and private application to many, but I cannot say anything concrete at this moment because I don't know. And we have another one. Which optimizations do you see possible to solve performance issue oh so so this is what I heard from our colleagues PA he said like the last so the latest performance update was already in 2016, and there hasn't been new papers for that performance improvement. So we might be looking for a new generation of bootstrapping technique to improve their performance. When should you use ZK, FHE, or MPC? So my over-simplistic understanding of this is if your application involves just one party, then use DK. If you have two parties, use MPC. If you have more than for QA. What do you think of TEs as a solutions for shared state, shared private state computation as a performance solution over FHE? Good question. I don't know yet. I haven't dive into this topic, and so I haven't developed opinions on this. All right, perfect. OK. All right, let's give it another 10, 15 seconds to see if there are any more. Maybe a question for you, like who here have heard FHE before? Oh, okay, almost everyone. Okay. Who here have heard indistinguishable obfuscation before. OK. Oh. Nice. All right, cool. Thank you, CC, for your amazing talk. Our next talk will be in 10 minutes.", "eventId": "devcon-7", - "slot_start": 1731654000000, - "slot_end": 1731657600000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1YdbQcP_NmrA5hsp8UnCOjPl-Em8SCjy8qlYVJjrw3jo" + "slot_start": 1731574800000, + "slot_end": 1731576600000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1yFyLyjYjdDzT6MDPS4LGolPm0BsYYfhsoxLz5fezE_k", + "resources_slides": "https://drive.google.com/file/d/14J68hMRQHzjC88VWeNPTxdegCreGl9CT/view", + "speakers": [ + "chih-cheng-liang" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, 0, 0, 0, @@ -493626,6 +492229,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -494072,6 +492676,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -494375,6 +492980,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -494451,6 +493057,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -494765,6 +493372,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -494923,6 +493531,7 @@ 2, 0, 0, + 0, 2, 0, 0, @@ -494941,44 +493550,28 @@ }, { "session": { - "id": "little-things-weve-learned-about-fhe", - "sourceId": "9JFDZA", - "title": "Little Things We've learned About FHE", - "description": "Recently, at PSE, we have been exploring the field of cryptography, specifically focusing on Fully Homomorphic Encryption (FHE). FHE enables secure interactions with encrypted data between different parties.\r\n\r\nIn this presentation, we will introduce key concepts and essential information tailored for developers and application designers. This will help them quickly grasp the fundamentals without getting bogged down by complex mathematical details.", - "track": "Applied Cryptography", - "type": "Talk", + "id": "live-music-open-jam-or-something-in-between", + "sourceId": "FVHR9Y", + "title": "Live Music, Open Jam, Or Something In Between", + "description": "This will be an open, emergent, co-created format where we're inviting everyone to make music together.", + "track": "Entertainment", + "type": "Music", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Community", "featured": false, "doNotRecord": false, - "tags": [ - "Cryptography", - "Homomorphic Encryption", - "eli5", - "Cryptography", - "Homomorphic Encryption" - ], - "keywords": [ - "ELI5" - ], - "duration": 1181, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6735c37a9dbb7a90e1fa8ae4", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735c37a9dbb7a90e1fa8ae4.vtt", - "transcript_text": " Thank you, Teresa. Hi, welcome here. So, I'm C.C. Liang from PSE. We're talking about FHE today. So, here's my selected resume to fit my narrative. First, like in August and July this years, I've been working on ZK-EBM projects with Kimi there and a bunch of many people. And more before that, I was working on different various applications in PSE. So I think in summary, what I have seen is a rise of computational primitives on cryptography. So what does that mean? Like we discovered a bunch of computation legos. This legos starts with gates like this. You can add numbers together or you can multiply numbers together and we call these gates arithmetic gates and if you're coming for a programming world you're probably seeing this N gates or gates XOR gates and not gates and you can fit bits inside it and do operation with it. These are called Boolean gates. So these are kind of abstract models. Once you get this, you get the whole computation you need. You can serve user with different kind of application logic. How does that go? So first you have gates and then if you combine these gates together you get circuits and then if you build language and compilers on top of them and you feed developers some coffees and teas and they'll build amazing applications and to happy users. So I think that's what happened in the ZK past. In a ZK we kind of have some arithmetic gates that allows you to perform computations on secret values and now FHE we are getting new sets of Legos and they unlocked new sets of capabilities and this is what we'll dive into today so before that let's do some recap on ZK calculation So when we talk about computations, we usually start with calculations, and then we build more abstractions, and then we can do computations on actual application logic. like a Sudoku board. They have some pre-filled values 7 and 3. You can fill in some initial value 5 and then you multiply together you get 35. You plus 3 you get 38. You can witness some private value 2 and then you get a result 40. And then you can send input and output to the verifier without sending all the computation trace in the middle. And in this way, you get privacy for your private value. You also get the computation compression for people call this the sickness. And if we accept some more abstractions we wrap developers are very good at turning your application needs into the computational bits and bytes so we can do some high-level logic like this. For example, you have ID 123 that resides in the ID registry. So this computation result tells us this is a valid ID. As this doc, you can insert some secret video in this secret field of ID. This ID has some certain attributes. And then the whole thing is combined together. This shows you know the secret. So you are the owner. This dog is the owner of ID 123. And then furthermore, this ID 123 has an age field, say it's 19. So this dog is 19 years old. But the dog wants to tell people it's 19 years old. The dog wants to tell it's greater than 18 years old with just one additional computation step. So that works well if your user is just one dog. But now, like, if you want to build applications that interact with multiple users, like this dog and cat, and they want to decide what to have for dinner, either khao-soi or pad thai. But their opinions are very sensitive, so secret they need some cryptography that can protect you from state-acted attackers. So they kind of... Let's imagine we don't have any cryptography and we host this computation and trusted third party like the Horse below and then the dog sends their vote one and the preference for pot I 0 and the cat sends the And the cat sends the vote one, and the vote for Pad Thai. Then we perform some computations. We want to add the votes together and to get the final preference for the dinner. But then, you see, this requires the interaction of the dog's secret and the cat's secret, and then it's not achievable by the ZK gate. So that's where FHE comes to help. So FHE stands for, it's short for three words, but I'm not going to torture you with that nerdy details. I'm going to redefine FHE as just one word, and it's a computation over encrypted data. As people say, the ticker is FHE. So what does it mean, like, computation over encrypted data? So you basically get these two new Legos, and then you can have encrypted data from Player 1, encrypted data from Player 2, and then you add them together, you get a computation result. And so when I heard, like, the encrypted computation over encrypted data first time I didn't realize how powerful it is or like a significant it is but it if you see here it kind of builds a boundaries between two players and then you that there are secrets to interact with each other, and you get a computation result. So now, like with FHE, you can do the dinner voting properly. So the dog sends the encrypted preference, cat sends their encrypted preference, and then you use the new FH gate and to add them together to get an encrypted computation result. And then you have to decrypt it to get the actual result. So I think Vitalik has a saying, like, all the technology we have is kind of a simulated, trusted third party. And I think, like think from this example, we're kind of seeing this phenomena. This horse on the bottom is kind of like a trusted third party. And when we're using FHE and without it, the application logic looks similar, but it's kind of wrapping on another layer of security. And from the perspective of user, all they see is the same. They input some value on their browser, and then they got some values from their browser, but there are differences behind the scenes, and there are different security and trust assumptions behind the scenes. So why are we not already using FHE now? We kind of already, like, so in the basement, you have FrogZone from 0xPark I highly recommend you to play it but there are three main problems of FHE right now first is the computation is costly let's say you want to send one bit of plain text from the stock to the server you have to encrypt it and in this encrypted message is a data itself so it has some size and how big is this data so this data takes six bytes is kind's kind of amortized, and it's kind of 100 times of the plain text. And it grows linearly. It means if you have 100 bits, you multiply 16 bytes by 100. This is already very good. But the terrible part is the computation part. You need a server of 192 vCPU. It burns 10 USD per hour. So I highly recommend you to play the frog games in the basement to feel that $10 per hour feeling. So the second problem is the verifiability. So you are getting this computation result, but is this computation result coming from the correct computation step or the incorrect computation step or the incorrect computation step. To be honest, like, you cannot really tell. Like, it needs extra work. You either wrap another zkProof on top of it, you either do some, like, message check or something to either guarantee kind of integrity. It's a problem people are actively solving, but it's still a problem. So the problem number three is the decryption problem. So after all the computations, we're getting the encrypted computation output. But how do you decrypt this? How do you remove that elephant from the fridge? So you need someone to decrypt the message, but you can give the decryption key to a centralized this is like you're not doing FHE at all. But the concern is like if you, like if someone has the ability to decrypt the output, can they also decrypt other users' input? And this, you know, this makes the whole like FHE a security theater. So the other way to solve this is you use some threshold decryption scheme. the whole FHES security theater. So the other way to solve this is you use some threshold decryption scheme. So every encrypted output, you need all the party to derive a decryption key share. And together, they have to collect all the decryption key together to actually see the output so if the dog did not give its decryption key to their input the other people were not able to decrypt the dog's input but the problem here is the cat if the cat ghosted, then nobody can actually decrypt this output. And then this secret will be secret forever. So let's give a quick recap. The message is simple. In ZK, we kind of have computations on top of your own secret. And for FHE, we can engage with other secrets. So you can build applications for multiple peoples. And that opens a lot of doors to many new applications. So I expect you will hear the word FHE in the near years. There will be more talks in the next DEF CON. This is a kind of trending topic. And lastly, I want to direct you to play the Frog Crypto on the basement to feel like how early days of FHE feel like. Also, if you are coming from a developer background, here are some reports you should check. The first two are from Gauss Lab. It's a library for multi-party FHE. Pay attention to the branch because the latest code is living on some branch. And if you want to deploy your frag zone yourself, check the last link. You can deploy a frag zone. And it's early days. Things break. So expect some hiccup from the code. And that's all I have for you. Thank you so much. Alright, thank you, CC. We actually have a lot of time for QA, so feel free to submit questions. Let's go from the first one on the screen. What are the best use cases you've seen for FHE so far? That's a very good question. So for me personally, like definitely not for others, like I'm more from like economic background, and I'm very interested in like auctions and votings. These are very, FFG applications. They are very ancient. But I feel like if we can do proper voting and proper auctions, that would be cool. I think what makes people exciting in the future is if you can build some private state, you can build a, let's say, P2P network. You have a private state, and then you have some state transition function. You can continuously update the states, and then you can build a kind of private blockchain and private application to many, but I cannot say anything concrete at this moment because I don't know. And we have another one. Which optimizations do you see possible to solve performance issue oh so so this is what I heard from our colleagues PA he said like the last so the latest performance update was already in 2016, and there hasn't been new papers for that performance improvement. So we might be looking for a new generation of bootstrapping technique to improve their performance. When should you use ZK, FHE, or MPC? So my over-simplistic understanding of this is if your application involves just one party, then use DK. If you have two parties, use MPC. If you have more than for QA. What do you think of TEs as a solutions for shared state, shared private state computation as a performance solution over FHE? Good question. I don't know yet. I haven't dive into this topic, and so I haven't developed opinions on this. All right, perfect. OK. All right, let's give it another 10, 15 seconds to see if there are any more. Maybe a question for you, like who here have heard FHE before? Oh, okay, almost everyone. Okay. Who here have heard indistinguishable obfuscation before. OK. Oh. Nice. All right, cool. Thank you, CC, for your amazing talk. Our next talk will be in 10 minutes.", - "eventId": "devcon-7", - "slot_start": 1731574800000, - "slot_end": 1731576600000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1yFyLyjYjdDzT6MDPS4LGolPm0BsYYfhsoxLz5fezE_k", - "resources_slides": null, "speakers": [ - "chih-cheng-liang" - ] + "marc-nitzsche" + ], + "eventId": "devcon-7", + "slot_start": 1731477600000, + "slot_end": 1731481200000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1CYvsKADAZ5-gmioFl_lFIeEXHIyeSBbn2GOtenPyFq4", + "resources_slides": "" }, "vector": [ 0, @@ -494990,7 +493583,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -495439,9 +494031,8 @@ 0, 0, 0, - 6, - 0, 0, + 6, 0, 0, 0, @@ -495745,7 +494336,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -495822,7 +494412,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -496138,7 +494727,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -496299,12 +494887,12 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -496317,27 +494905,47 @@ }, { "session": { - "id": "live-music-open-jam-or-something-in-between", - "sourceId": "FVHR9Y", - "title": "Live Music, Open Jam, Or Something In Between", - "description": "This will be an open, emergent, co-created format where we're inviting everyone to make music together.", - "track": "Entertainment", - "type": "Music", + "id": "local-build-why-language-is-key-to-decentralization", + "sourceId": "UHVBNL", + "title": "Local Build: Why language is key to decentralization", + "description": "Localization is not a “nice to have” for decentralization: it is a core requirement.\r\n\r\nOver 50% of ETH nodes are between the US and Germany. 90% of stablecoins are USD-pegged. The world we’re creating is stifled by the one that already exists. \r\n\r\nTo be credibly decentralized, Ethereum must be built and secured in the human languages of people outside of the current paradigm. This talk will highlight web3-native problems and tangible solutions in l10n, from the technical to the organizational.", + "track": "Coordination", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Product", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [ - "marc-nitzsche" + "tags": [ + "Decentralization Improvements", + "Languages", + "User Experience", + "localization", + "l10n", + "Decentralization Improvements", + "Languages", + "User Experience" + ], + "keywords": [ + "Internationalization", + "Localization" ], + "duration": 572, + "language": "en", + "sources_swarmHash": "291a51deef43112c4bd20554687baa9e61d8f77a35566b6d264ee3da2af0d9d3", + "sources_youtubeId": "oaztuq6hO4c", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673482ed9dbb7a90e1e580fb", "eventId": "devcon-7", - "slot_start": 1731477600000, - "slot_end": 1731481200000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1CYvsKADAZ5-gmioFl_lFIeEXHIyeSBbn2GOtenPyFq4" + "slot_start": 1731490800000, + "slot_end": 1731491400000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1zMgBNNs4mjcJlQvsWzcG-01qBLosEtl3W_zPUteNz-0", + "resources_slides": "https://drive.google.com/file/d/1aLLAARYtpdgsuKe760boUWNpb4Dgfm6v/view", + "speakers": [ + "oliver-jl-renwick", + "laurel" + ] }, "vector": [ 0, @@ -496349,9 +494957,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -496800,6 +495408,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -497093,6 +495702,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -497102,6 +495712,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -497182,6 +495793,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -497490,6 +496102,10 @@ 0, 0, 0, + 2, + 2, + 0, + 0, 0, 0, 0, @@ -497641,28 +496257,14 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -497671,51 +496273,53 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "local-build-why-language-is-key-to-decentralization", - "sourceId": "UHVBNL", - "title": "Local Build: Why language is key to decentralization", - "description": "Localization is not a “nice to have” for decentralization: it is a core requirement.\r\n\r\nOver 50% of ETH nodes are between the US and Germany. 90% of stablecoins are USD-pegged. The world we’re creating is stifled by the one that already exists. \r\n\r\nTo be credibly decentralized, Ethereum must be built and secured in the human languages of people outside of the current paradigm. This talk will highlight web3-native problems and tangible solutions in l10n, from the technical to the organizational.", - "track": "Coordination", + "id": "logs-for-you-anon", + "sourceId": "RRYVNW", + "title": "Logs for you anon", + "description": "The removal of log events has sparked a discussion about its implications for apps that rely on events to display information. Without logs, developers would need to use specialized software to index the chain and search for specific actions, which is costly, not friendly with privacy and requires a case-by-case approach. This is in contrast to the current system, where logs provide developers with the freedom to query the chain anonymously, without limits, and without sacrificing any detail.", + "track": "Cypherpunk & Privacy", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Product", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Decentralization Improvements", - "Languages", - "User Experience", - "localization", - "l10n", - "Decentralization Improvements", - "Languages", - "User Experience" + "DevEx", + "Privacy", + "Decentralization", + "indexing", + "Decentralization", + "DevEx", + "Privacy" ], "keywords": [ - "Internationalization", - "Localization" + "logs", + "local apps", + "indexing" ], - "duration": 572, + "duration": 526, "language": "en", - "sources_swarmHash": "291a51deef43112c4bd20554687baa9e61d8f77a35566b6d264ee3da2af0d9d3", - "sources_youtubeId": "oaztuq6hO4c", + "sources_swarmHash": "e4e9a3d1779f291acbf4d359d30f7beb6e8e538ba5f374c13ef4402fa8d29873", + "sources_youtubeId": "e0HJbXgdl-g", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673482ed9dbb7a90e1e580fb", + "sources_streamethId": "6736d90d74749a4b8935f22a", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d90d74749a4b8935f22a.vtt", + "transcript_text": " . Hello everyone. Thanks for coming. Thanks to the organization for this event, it has been great so far. So for today, my goal is logs for you, Anu. I will be talking about logs. So first, let's see what logs are. They are a piece of information emitted by contracts. They don't have a limit on size more than the block size, and they have three important parts. One is the address of the contract emitting the log. The other is topics that are indexed information so it can be retrieved fast. And then we have data that is decimal data field, and it can be whatever you want. So this is a BlockScout screenshot, and this is a transfer from USDT. And you can see that they present, like, a very human-friendly UI, like they could input data. But here below, you can see the topics and the data. The first one is the signature, always, the signature of the event. In this case, the transfer has a from and a to. And these are the two other topics in the logs. So the importance of topics is, for example, this is a screenshot from Rodkey, and we can use logs to provide insightful information to the user. From transfer, for example, we can obtain what tokens were swapped in a transaction, and it works in any EVM chain. So for example, these are information obtained from Arbitrum and Optimist, and we can see that the user swapped some tokens in Lama Thief. And all this is thank you to logs. So what is the problem with them? There are two big complaints that the community has about them. One is that the user has to pay for emitting logs, so every time the contract emits information, the user pays for it. And the network as a whole needs to store this information in the database. To see about the first problem, I took this Dune dashboard, and as you can see, it's a bit old information, but in the 30 days period, it was over 1,000 Ethereum spent only on gas, and this is around 15 to 20% of the total spent in transactions. So what can we do about this? One of the proposals that I saw, the first company that I saw was Shadow, is to completely remove the locks. So if they are a problem, we get rid of them. And the proposal is that you can have the chain without locks, and then afterwards, you off-chain, emit them, like reprocess the transaction and emit them, modifying the bytecode of the contract. This was from a company, and then it came the proposal, the standard proposal. It is this one, and it's useful to have a standard because everyone can implement it. But it has a conversation and it didn't have much feedback. It stopped at some point last year. So is this good, is this bad? We need to consider a few things first. The user pays for the emitting logs, but they also benefit from them. So as you can see in RodKey, we display information, and it's thanks to the logs. Also, if you use Sirion or any other application, Aave, Frontend, whatever, they all use logs. So you are paying for them, but they are useful for you always. Also, regarding the Ethereum scalability, GoEthereum, the node is 14 terabytes in archive mode. I was told that it's around 600 gigabytes the locks in the system so it's a small proportion of the 14 terabytes and if at any point there are problems, they can be completely removed from the node. Also, they can be sharded in a different node or whatever. And it's not such a big deal. And if it ever becomes a problem, we can manage it. And also data ownership. The problem is that if you put a company like Shadow or Ghost or any other company doing Shadow Logs, you are not sovereign of your data. You can't use your node anymore to get this information that it is useful for you. So you are using it. So this is a big problem. And, of course, we are cyberpunk, so we want to have this data and use it. There are also good things about shadow logs. For example, liquidity liquidations in the contracts were not indexed properly. So you can use shadow logs to properly detect when you were liquidated. And also there is a similar issue in the Gnosis bridge because the address is not indexed properly. Also, for example, Uniswap created a beautiful dashboard and Pendel improved the routing using shadow logs, so there are good use cases for them. So my feedback here is that we need balance and there are two main takeaways. One is that we need the logs for everyone to use them. We don't need everything in there. You can add it later, but we need a minimum. And it's good to have extra information in Shadow Logs. So that is all. Thank you for coming. Let's see if you have any questions. Okay. Now, if somebody has any questions, please raise your hand and I will throw you this. Don't worry, it doesn't hurt. It's soft. Also, it's a five-minute talk. Yeah, and afterwards if there's some more deep questions, you can discuss it outside. It's okay. Okay, Miko has a question. Don't worry, it doesn't break. Mikko on käsitellyt. Ei huomaa, se ei rikki. Onko tämä mikrofoni? Se on. Laita sen lähelle ja puhutaan siitä. Se ei ole orffi. Miksi ei laiteta ne melkein ilmaiseksi, jotta protokollit käyttäisivät niitä enemmän? them basically almost free so that the protocols would use them more and that way we don't need set of logs because I don't think the impact on the node performance is that bad. As far as I know there is no such big impact on using the logs. The main problem will be, I guess I'm not an expert on that part, but I guess it will be DDoS to the node. So if you can expand as much logs as you want, someone could DDoS and it's not zero cost but it's almost free, the logs in the disk, but if you can reduce the amount that you spend on them, you can DDoS nodes and also you have to careful measure because everything is like an equilibrium in the Ethereum ecosystem so if you modify one constant here in the price it can affect to everything because as far as I know, locks were supposed to be like for real-time notification so user could get real-time feedback and then developers use them to store information so they change it the use case of the lock. So I guess it needs an equilibrium and careful review for that. Hey. Do you think that it's fair to say that the reason that Shadow wants to just eliminate the logs is in order to make the users pay less, but in the end, the users end up using the logs from like Rodkey, from Aave, from every front end. So shouldn't the users actually end up paying for the logs? I mean, doesn't it make sense in the end? For users to pay? Yeah. Yeah, for me, it makes use of what I said a little bit. The user is paying for this information, but they directly have a positive impact from it. So shadow logs are good in cases that you need extra information, like adding more things as a developer if you need analytics or anything. But the user has a real positive impact for having logs. They pay for something that really benefits. And it's not that expensive. For a single user, the whole network can have a high cost but the single user is not paying everything, just paying a fraction of that. So this is positive for them. Okay. Thank you very much, Javier. Time is up. Thank you. Bye. . So now please...", "eventId": "devcon-7", - "slot_start": 1731490800000, - "slot_end": 1731491400000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1zMgBNNs4mjcJlQvsWzcG-01qBLosEtl3W_zPUteNz-0", - "resources_slides": null, + "slot_start": 1731646200000, + "slot_end": 1731646800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/19tr5hJbHHcDFcMqxEDdnvWaK2uCU2yR2HV12bhQ1NTQ", + "resources_slides": "https://drive.google.com/file/d/1viCgTcgFxwThmUw7wqFjIOMDYZN2yGRx/view", "speakers": [ - "oliver-jl-renwick", - "laurel" + "yabir-garcia-benchakhtir" ] }, "vector": [ @@ -497724,13 +496328,13 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -498179,9 +496783,8 @@ 0, 0, 0, - 6, - 6, 0, + 6, 0, 0, 0, @@ -498476,7 +497079,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -498486,7 +497088,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -498500,6 +497101,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -498567,12 +497169,11 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -498586,6 +497187,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -498878,9 +497480,6 @@ 0, 0, 2, - 2, - 0, - 0, 0, 0, 0, @@ -499037,11 +497636,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -499055,47 +497654,43 @@ }, { "session": { - "id": "logs-for-you-anon", - "sourceId": "RRYVNW", - "title": "Logs for you anon", - "description": "The removal of log events has sparked a discussion about its implications for apps that rely on events to display information. Without logs, developers would need to use specialized software to index the chain and search for specific actions, which is costly, not friendly with privacy and requires a case-by-case approach. This is in contrast to the current system, where logs provide developers with the freedom to query the chain anonymously, without limits, and without sacrificing any detail.", - "track": "Cypherpunk & Privacy", + "id": "long-term-decentralized-storage-for-blobs", + "sourceId": "RCVFHX", + "title": "Long-term Decentralized Storage for Blobs", + "description": "This talk will present a possible scheme to store blobs and other historical data for the long-term in a decentralized fashion. The technology relies on erasure codes and SNARKs. This talk is related to EIP-4444.", + "track": "Core Protocol", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "DevEx", - "Privacy", - "Decentralization", - "indexing", - "Decentralization", - "DevEx", - "Privacy" + "Core Protocol", + "Blobs", + "Sustainability", + "storage", + "Blobs", + "Core Protocol", + "Sustainability" ], "keywords": [ - "logs", - "local apps", - "indexing" + "Storage" ], - "duration": 526, + "duration": 324, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "146236064ab2f260f965665a61e0db66f86769d420f93b351c716f64d8f6e2bf", + "sources_youtubeId": "Bizi9n0t6pY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736d90d74749a4b8935f22a", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d90d74749a4b8935f22a.vtt", - "transcript_text": " . Hello everyone. Thanks for coming. Thanks to the organization for this event, it has been great so far. So for today, my goal is logs for you, Anu. I will be talking about logs. So first, let's see what logs are. They are a piece of information emitted by contracts. They don't have a limit on size more than the block size, and they have three important parts. One is the address of the contract emitting the log. The other is topics that are indexed information so it can be retrieved fast. And then we have data that is decimal data field, and it can be whatever you want. So this is a BlockScout screenshot, and this is a transfer from USDT. And you can see that they present, like, a very human-friendly UI, like they could input data. But here below, you can see the topics and the data. The first one is the signature, always, the signature of the event. In this case, the transfer has a from and a to. And these are the two other topics in the logs. So the importance of topics is, for example, this is a screenshot from Rodkey, and we can use logs to provide insightful information to the user. From transfer, for example, we can obtain what tokens were swapped in a transaction, and it works in any EVM chain. So for example, these are information obtained from Arbitrum and Optimist, and we can see that the user swapped some tokens in Lama Thief. And all this is thank you to logs. So what is the problem with them? There are two big complaints that the community has about them. One is that the user has to pay for emitting logs, so every time the contract emits information, the user pays for it. And the network as a whole needs to store this information in the database. To see about the first problem, I took this Dune dashboard, and as you can see, it's a bit old information, but in the 30 days period, it was over 1,000 Ethereum spent only on gas, and this is around 15 to 20% of the total spent in transactions. So what can we do about this? One of the proposals that I saw, the first company that I saw was Shadow, is to completely remove the locks. So if they are a problem, we get rid of them. And the proposal is that you can have the chain without locks, and then afterwards, you off-chain, emit them, like reprocess the transaction and emit them, modifying the bytecode of the contract. This was from a company, and then it came the proposal, the standard proposal. It is this one, and it's useful to have a standard because everyone can implement it. But it has a conversation and it didn't have much feedback. It stopped at some point last year. So is this good, is this bad? We need to consider a few things first. The user pays for the emitting logs, but they also benefit from them. So as you can see in RodKey, we display information, and it's thanks to the logs. Also, if you use Sirion or any other application, Aave, Frontend, whatever, they all use logs. So you are paying for them, but they are useful for you always. Also, regarding the Ethereum scalability, GoEthereum, the node is 14 terabytes in archive mode. I was told that it's around 600 gigabytes the locks in the system so it's a small proportion of the 14 terabytes and if at any point there are problems, they can be completely removed from the node. Also, they can be sharded in a different node or whatever. And it's not such a big deal. And if it ever becomes a problem, we can manage it. And also data ownership. The problem is that if you put a company like Shadow or Ghost or any other company doing Shadow Logs, you are not sovereign of your data. You can't use your node anymore to get this information that it is useful for you. So you are using it. So this is a big problem. And, of course, we are cyberpunk, so we want to have this data and use it. There are also good things about shadow logs. For example, liquidity liquidations in the contracts were not indexed properly. So you can use shadow logs to properly detect when you were liquidated. And also there is a similar issue in the Gnosis bridge because the address is not indexed properly. Also, for example, Uniswap created a beautiful dashboard and Pendel improved the routing using shadow logs, so there are good use cases for them. So my feedback here is that we need balance and there are two main takeaways. One is that we need the logs for everyone to use them. We don't need everything in there. You can add it later, but we need a minimum. And it's good to have extra information in Shadow Logs. So that is all. Thank you for coming. Let's see if you have any questions. Okay. Now, if somebody has any questions, please raise your hand and I will throw you this. Don't worry, it doesn't hurt. It's soft. Also, it's a five-minute talk. Yeah, and afterwards if there's some more deep questions, you can discuss it outside. It's okay. Okay, Miko has a question. Don't worry, it doesn't break. Mikko on käsitellyt. Ei huomaa, se ei rikki. Onko tämä mikrofoni? Se on. Laita sen lähelle ja puhutaan siitä. Se ei ole orffi. Miksi ei laiteta ne melkein ilmaiseksi, jotta protokollit käyttäisivät niitä enemmän? them basically almost free so that the protocols would use them more and that way we don't need set of logs because I don't think the impact on the node performance is that bad. As far as I know there is no such big impact on using the logs. The main problem will be, I guess I'm not an expert on that part, but I guess it will be DDoS to the node. So if you can expand as much logs as you want, someone could DDoS and it's not zero cost but it's almost free, the logs in the disk, but if you can reduce the amount that you spend on them, you can DDoS nodes and also you have to careful measure because everything is like an equilibrium in the Ethereum ecosystem so if you modify one constant here in the price it can affect to everything because as far as I know, locks were supposed to be like for real-time notification so user could get real-time feedback and then developers use them to store information so they change it the use case of the lock. So I guess it needs an equilibrium and careful review for that. Hey. Do you think that it's fair to say that the reason that Shadow wants to just eliminate the logs is in order to make the users pay less, but in the end, the users end up using the logs from like Rodkey, from Aave, from every front end. So shouldn't the users actually end up paying for the logs? I mean, doesn't it make sense in the end? For users to pay? Yeah. Yeah, for me, it makes use of what I said a little bit. The user is paying for this information, but they directly have a positive impact from it. So shadow logs are good in cases that you need extra information, like adding more things as a developer if you need analytics or anything. But the user has a real positive impact for having logs. They pay for something that really benefits. And it's not that expensive. For a single user, the whole network can have a high cost but the single user is not paying everything, just paying a fraction of that. So this is positive for them. Okay. Thank you very much, Javier. Time is up. Thank you. Bye. . So now please...", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731646200000, - "slot_end": 1731646800000, + "slot_start": 1731469800000, + "slot_end": 1731470400000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/19tr5hJbHHcDFcMqxEDdnvWaK2uCU2yR2HV12bhQ1NTQ", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/19uBY8dZebCAmZtuh27GvgwcgDo7WY_BpHnT84sKBL6M", + "resources_slides": "https://drive.google.com/file/d/1UGpCD6G-oXiK0AkIq_0k_39KCY7CNyG5/view", "speakers": [ - "yabir-garcia-benchakhtir" + "leo-bautista-gomez" ] }, "vector": [ @@ -499103,7 +497698,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -499561,10 +498155,8 @@ 0, 0, 0, - 6, - 0, - 0, 0, + 6, 0, 0, 0, @@ -499869,6 +498461,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -499880,7 +498473,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -499914,6 +498506,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -499952,7 +498545,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -499966,7 +498558,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -500201,6 +498792,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -500411,17 +499003,15 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, - 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -500435,43 +499025,45 @@ }, { "session": { - "id": "long-term-decentralized-storage-for-blobs", - "sourceId": "RCVFHX", - "title": "Long-term Decentralized Storage for Blobs", - "description": "This talk will present a possible scheme to store blobs and other historical data for the long-term in a decentralized fashion. The technology relies on erasure codes and SNARKs. This talk is related to EIP-4444.", - "track": "Core Protocol", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Product", - "featured": false, + "id": "lunarpunk-endgame", + "sourceId": "EVHFWA", + "title": "Lunarpunk Endgame", + "description": "Global surveillance is a static world where change is surpressed and society cannot evolve. In contrast, an anonymity-enhanced world resembles a forest. New civilizational experiments blossom like flowers, radiating outward from the freedom-fighters of the future.\r\n\r\nThe lunarpunk end game is to enable a new ecology of social orders. This talk will describe the grand vision of lunarpunk: multipolar space-faring civilization, human speciation, and the reproduction life throughout the cosmos.", + "track": "Cypherpunk & Privacy", + "type": "Talk", + "expertise": "Beginner", + "audience": "Engineering", + "featured": true, "doNotRecord": false, "tags": [ - "Core Protocol", - "Blobs", - "Sustainability", - "storage", - "Blobs", - "Core Protocol", - "Sustainability" + "Network State", + "Anonymity", + "Autonomous World", + "lunarpunk", + "Anonymity", + "Autonomous World", + "Network State" ], "keywords": [ - "Storage" + "Lunarpunk" ], - "duration": 324, + "duration": 1589, "language": "en", - "sources_swarmHash": "146236064ab2f260f965665a61e0db66f86769d420f93b351c716f64d8f6e2bf", - "sources_youtubeId": "Bizi9n0t6pY", + "sources_swarmHash": "62abf23b929c7511d2d6ad9d0fd17dbae55f874642acb021ab38481533536a9b", + "sources_youtubeId": "NmrpTB-mfQQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673476a39dbb7a90e1389485", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731469800000, - "slot_end": 1731470400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/19uBY8dZebCAmZtuh27GvgwcgDo7WY_BpHnT84sKBL6M", - "resources_slides": null, + "slot_start": 1731488400000, + "slot_end": 1731490200000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1pdPYWGnlJDvugH2zzLYqzKQrvDlutN5EGd8EBIpbeR4", + "resources_slides": "https://drive.google.com/file/d/1hwOwdlsXkK71jSAqasECVPl7YHbztM7e/view", "speakers": [ - "leo-bautista-gomez" + "rachel-rose-oleary" ] }, "vector": [ @@ -500479,8 +499071,8 @@ 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -501245,6 +499837,10 @@ 0, 0, 0, + 0, + 0, + 0, + 0, 2, 0, 0, @@ -501266,6 +499862,45 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -501577,76 +500212,30 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -501791,15 +500380,13 @@ 0, 2, 0, + 2, 0, 0, 0, 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -501811,45 +500398,44 @@ }, { "session": { - "id": "lunarpunk-endgame", - "sourceId": "EVHFWA", - "title": "Lunarpunk Endgame", - "description": "Global surveillance is a static world where change is surpressed and society cannot evolve. In contrast, an anonymity-enhanced world resembles a forest. New civilizational experiments blossom like flowers, radiating outward from the freedom-fighters of the future.\r\n\r\nThe lunarpunk end game is to enable a new ecology of social orders. This talk will describe the grand vision of lunarpunk: multipolar space-faring civilization, human speciation, and the reproduction life throughout the cosmos.", - "track": "Cypherpunk & Privacy", - "type": "Talk", - "expertise": "Beginner", + "id": "maci-why-do-we-need-private-voting-and-what-are-we-up-to", + "sourceId": "TCJJW3", + "title": "MACI - Why do we need private voting and what are we up to", + "description": "MACI is a protocol that can be used to run private on chain polls. This talk will introduce the protocol, dive into some of the technical aspects. Finally we will talk about the team's plans for the future and how the community can get involved to help improve the project.", + "track": "Applied Cryptography", + "type": "Lightning Talk", + "expertise": "Intermediate", "audience": "Engineering", - "featured": true, + "featured": false, "doNotRecord": false, "tags": [ - "Network State", - "Anonymity", - "Autonomous World", - "lunarpunk", - "Anonymity", - "Autonomous World", - "Network State" + "Coordination", + "Quadratic Voting", + "Public good", + "voting", + "Coordination", + "Public good", + "Quadratic Voting" ], "keywords": [ - "Lunarpunk" + "Privacy", + "Voting" ], - "duration": 1589, + "duration": 606, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "3e12944268e30652d72b931cdbdd1bf68e19741b4d3f57dd9daf2464127f2dd6", + "sources_youtubeId": "18KFAia72Ww", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673476a39dbb7a90e1389485", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731488400000, - "slot_end": 1731490200000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1pdPYWGnlJDvugH2zzLYqzKQrvDlutN5EGd8EBIpbeR4", - "resources_slides": null, + "slot_start": 1731394800000, + "slot_end": 1731395400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1paq5inxTY__nUEseJKES2bwcdoZZSvs-h5ZpEXOfwsg", + "resources_slides": "https://drive.google.com/file/d/1si1cJtSDiv7B1C8w4Z03ivCb46ikn9hq/view", "speakers": [ - "rachel-rose-oleary" + "ctrlc03" ] }, "vector": [ @@ -501858,12 +500444,12 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -502630,9 +501216,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -502651,7 +501234,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -502760,6 +501342,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -502813,6 +501396,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -502964,6 +501548,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -503015,10 +501600,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -503167,9 +501748,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 2, 0, @@ -503189,52 +501770,48 @@ }, { "session": { - "id": "maci-why-do-we-need-private-voting-and-what-are-we-up-to", - "sourceId": "TCJJW3", - "title": "MACI - Why do we need private voting and what are we up to", - "description": "MACI is a protocol that can be used to run private on chain polls. This talk will introduce the protocol, dive into some of the technical aspects. Finally we will talk about the team's plans for the future and how the community can get involved to help improve the project.", - "track": "Applied Cryptography", + "id": "making-defensive-technology-offensive-how-to-get-cypherpunk-ideals-to-the-masses", + "sourceId": "RGMXQ7", + "title": "Making defensive technology offensive: How to get cypherpunk ideals to the masses", + "description": "Cryptography is an inherently defensive tool; it hides your information from adversaries. This is crucial to prevent censorship or monitoring of your data. But it's often sold to consumers with fearmongering about all-powerful malicious actors, which is often ignored by all except the privacy-conscious. We explore real-life examples of offensive cryptographic affordances like interoperability, efficiency, and user consent as stronger motivations for the masses to migrate to cypherpunk tech.", + "track": "Cypherpunk & Privacy", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "expertise": "Beginner", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Coordination", - "Quadratic Voting", - "Public good", - "voting", - "Coordination", - "Public good", - "Quadratic Voting" + "Frameworks", + "Values", + "Use cases of cryptography", + "messaging", + "Frameworks", + "Use cases of cryptography", + "Values" ], "keywords": [ - "Privacy", - "Voting" + "d/acc", + "adoption", + "messaging" ], - "duration": 606, + "duration": 352, "language": "en", - "sources_swarmHash": "3e12944268e30652d72b931cdbdd1bf68e19741b4d3f57dd9daf2464127f2dd6", - "sources_youtubeId": "18KFAia72Ww", + "sources_swarmHash": "879b660e0ee34065abe3a04185d46bd2bb368af1a1b90dea0e9fd1d28e14b236", + "sources_youtubeId": "W5bRYUO-Wk8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6734a08f9dbb7a90e1469361", "eventId": "devcon-7", - "slot_start": 1731394800000, - "slot_end": 1731395400000, + "slot_start": 1731495600000, + "slot_end": 1731496200000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1paq5inxTY__nUEseJKES2bwcdoZZSvs-h5ZpEXOfwsg", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1osFBDl_IG67iwDmsSkuzzcHEUPFlkirPaPwWwqi5bwE", + "resources_slides": "https://drive.google.com/file/d/1-h1NWuXSUWcHu_GcLi3i6U7YExAqhCKL/view", "speakers": [ - "ctrlc03" + "vivek-bhupatiraju" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -503505,6 +502082,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -503695,7 +502273,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -504011,6 +502588,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -504087,10 +502665,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -504106,6 +502684,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -504136,7 +502715,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -504190,7 +502768,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -504343,8 +502920,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -504396,6 +502971,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -504544,7 +503120,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -504554,6 +503129,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -504566,46 +503143,26 @@ }, { "session": { - "id": "making-defensive-technology-offensive-how-to-get-cypherpunk-ideals-to-the-masses", - "sourceId": "RGMXQ7", - "title": "Making defensive technology offensive: How to get cypherpunk ideals to the masses", - "description": "Cryptography is an inherently defensive tool; it hides your information from adversaries. This is crucial to prevent censorship or monitoring of your data. But it's often sold to consumers with fearmongering about all-powerful malicious actors, which is often ignored by all except the privacy-conscious. We explore real-life examples of offensive cryptographic affordances like interoperability, efficiency, and user consent as stronger motivations for the masses to migrate to cypherpunk tech.", - "track": "Cypherpunk & Privacy", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "manu-alzuru", + "sourceId": "GNMHSF", + "title": "Manu Alzuru", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Frameworks", - "Values", - "Use cases of cryptography", - "messaging", - "Frameworks", - "Use cases of cryptography", - "Values" - ], - "keywords": [ - "d/acc", - "adoption", - "messaging" - ], - "duration": 352, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "879b660e0ee34065abe3a04185d46bd2bb368af1a1b90dea0e9fd1d28e14b236", - "sources_youtubeId": "W5bRYUO-Wk8", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6734a08f9dbb7a90e1469361", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731495600000, - "slot_end": 1731496200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1osFBDl_IG67iwDmsSkuzzcHEUPFlkirPaPwWwqi5bwE", - "resources_slides": null, - "speakers": [ - "vivek-bhupatiraju" - ] + "slot_start": 1731657600000, + "slot_end": 1731661200000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1tmd6B8VQ5hfKNgdhvR9sH6CcRr1hFUIZc4PvRiCPHFM", + "resources_slides": "" }, "vector": [ 0, @@ -504613,11 +503170,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -504879,7 +503436,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -505387,7 +503943,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -505464,7 +504019,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -505483,7 +504037,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -505771,8 +504324,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -505927,10 +504478,11 @@ 2, 0, 0, + 2, + 0, 0, 0, 0, - 2, 0, 0, 0, @@ -505944,32 +504496,52 @@ }, { "session": { - "id": "manu-alzuru", - "sourceId": "GNMHSF", - "title": "Manu Alzuru", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", - "audience": "Engineering", + "id": "maximum-viable-security-mvs-a-new-issuance-framework", + "sourceId": "KWUF3N", + "title": "Maximum Viable Security (MVS) – a new issuance framework", + "description": "We derive a new framework for analyzing Ethereum Issuance, based on Ethereum's core values: security and neutrality. Upon discussing various attacks on Ethereum, we study future growth projections and the importance of diverse validator set, and conclude that Ethereum's defendability is the key factor for issuance policy evaluation. Via MVS, we show how the current issuance reduction proposal is dangerous, based on the future staked ETH concentration with CEXs & impact on solo stakers.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Staking", + "Validator Experience", + "Security", + "composability", + "validator", + "set", + "Security", + "Staking", + "Validator Experience" + ], + "keywords": [ + "neutrality", + "autonomy", + "validator set composition" + ], + "duration": 1597, "language": "en", - "speakers": [], + "sources_swarmHash": "cda7c749cb12987f113639b78de67d0d7154fb91ffdc537cde8b5578b7373bb1", + "sources_youtubeId": "Cjr2ZEzNocc", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673cc18e982f234a1281817c", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731657600000, - "slot_end": 1731661200000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1tmd6B8VQ5hfKNgdhvR9sH6CcRr1hFUIZc4PvRiCPHFM" + "slot_start": 1731556800000, + "slot_end": 1731558600000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1ykeBOYepaHLNtCV-zLYv6QDLjqI6Dn-EYre6XtHK8lo", + "resources_slides": "https://drive.google.com/file/d/1R3_alpfLWqPKWaxhOHg1yCZLx7mJHmVT/view", + "speakers": [ + "artem-kotelskiy" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -506435,6 +505007,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -506717,6 +505290,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -506754,6 +505328,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -506844,6 +505419,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -507126,6 +505702,9 @@ 0, 0, 0, + 2, + 2, + 2, 0, 0, 0, @@ -507272,17 +505851,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, 2, 0, 0, @@ -507295,55 +505868,51 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "maximum-viable-security-mvs-a-new-issuance-framework", - "sourceId": "KWUF3N", - "title": "Maximum Viable Security (MVS) – a new issuance framework", - "description": "We derive a new framework for analyzing Ethereum Issuance, based on Ethereum's core values: security and neutrality. Upon discussing various attacks on Ethereum, we study future growth projections and the importance of diverse validator set, and conclude that Ethereum's defendability is the key factor for issuance policy evaluation. Via MVS, we show how the current issuance reduction proposal is dangerous, based on the future staked ETH concentration with CEXs & impact on solo stakers.", - "track": "Core Protocol", + "id": "memecraft-effectively-communicating-crypto-concepts", + "sourceId": "FAKRPS", + "title": "Memecraft: Effectively Communicating Crypto Concepts", + "description": "Memes have been crucial to the proliferation of various concepts and ideas within the crypto space (ultrasound money, (3,3), regen/degen, QF) which has led to real capital being allocated toward impactful outcomes. The downside to some of this memeing however has been misleading narratives and misunderstandings. How do we leverage memetic power for education and tacit understanding of complex concepts?\r\n\r\nThe workshop will include 1) Scene Setting 2) Structured Discussion and a 3) Group Activity.", + "track": "Coordination", "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Staking", - "Validator Experience", - "Security", - "composability", - "validator", - "set", - "Security", - "Staking", - "Validator Experience" + "Public good", + "Marketing", + "User Research", + "memes", + "Marketing", + "Public good", + "User Research" ], "keywords": [ - "neutrality", - "autonomy", - "validator set composition" + "memes" ], - "duration": 1597, + "duration": 1535, "language": "en", - "sources_swarmHash": "cda7c749cb12987f113639b78de67d0d7154fb91ffdc537cde8b5578b7373bb1", - "sources_youtubeId": "Cjr2ZEzNocc", + "sources_swarmHash": "9136a3b8cc8e010316494d6e453d22cb6c424cebbd28d13c9d36695708dd3aa5", + "sources_youtubeId": "BfnDgX9uy5E", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673cc18e982f234a1281817c", + "sources_streamethId": "673dbf5a17a97b4f4d3ce491", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731556800000, - "slot_end": 1731558600000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1ykeBOYepaHLNtCV-zLYv6QDLjqI6Dn-EYre6XtHK8lo", - "resources_slides": null, + "slot_start": 1731642000000, + "slot_end": 1731643800000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1WKMS7RU7L0T4jR34wKgLFODsY4ligbUzbHahkZWhf6I", + "resources_slides": "https://drive.google.com/file/d/1IYAO7yGaKhPZhvBfGT9u1ZTZCYaZg95r/view", "speakers": [ - "artem-kotelskiy" + "joshua-davila", + "beth-mccarthy" ] }, "vector": [ @@ -507351,7 +505920,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -507359,6 +505927,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -507814,6 +506383,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -508098,9 +506668,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -508136,7 +506703,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -508185,6 +506751,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -508206,6 +506773,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -508227,7 +506795,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -508389,6 +506956,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -508512,10 +507080,6 @@ 0, 0, 2, - 2, - 2, - 0, - 0, 0, 0, 0, @@ -508666,11 +507230,11 @@ 0, 0, 0, - 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -508683,49 +507247,47 @@ }, { "session": { - "id": "memecraft-effectively-communicating-crypto-concepts", - "sourceId": "FAKRPS", - "title": "Memecraft: Effectively Communicating Crypto Concepts", - "description": "Memes have been crucial to the proliferation of various concepts and ideas within the crypto space (ultrasound money, (3,3), regen/degen, QF) which has led to real capital being allocated toward impactful outcomes. The downside to some of this memeing however has been misleading narratives and misunderstandings. How do we leverage memetic power for education and tacit understanding of complex concepts?\r\n\r\nThe workshop will include 1) Scene Setting 2) Structured Discussion and a 3) Group Activity.", - "track": "Coordination", - "type": "Talk", + "id": "merkle-proofs-when-leaves-leave-you-vulnerable", + "sourceId": "LAKCG3", + "title": "Merkle Proofs: When Leaves Leave You Vulnerable", + "description": "A Merkle proof is a cryptographically authenticated data structure widely used to minimize on-chain data storage. The Merkle algorithm is neat yet non-trivial to implement correctly and securely; its leaves may leave you vulnerable if not handled properly.", + "track": "Security", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Public good", - "Marketing", - "User Research", - "memes", - "Marketing", - "Public good", - "User Research" + "Auditing", + "Bug", + "merkle", + "Auditing", + "Bug" ], "keywords": [ - "memes" + "Merkle" ], - "duration": 1535, + "duration": 334, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "BfnDgX9uy5E", + "sources_swarmHash": "2acec7178510ddfaad6efbb63c85e3282df87ac7932d16fc39fed44b7ce8b8df", + "sources_youtubeId": "TEBV4hPNm3k", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673dbf5a17a97b4f4d3ce491", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6732face80d989c5b7aebead", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6732face80d989c5b7aebead.vtt", + "transcript_text": " I'm a security engineer from China Security. I'm very happy to share my insights about Merkle proofs. Today, especially, I'm going to talk about when leaves will leave you vulnerable. So, I believe in your life you have seen many trees with different shapes, different structures. For example, it's a very simple tree. It could be bigger and more complex. And in addition, this is good, like it doesn't look like a tree. It could also be the Earth tree in the game Elden Ring. And for Merkle tree, it's very similar. It could be a very balanced and simple Merkle tree, where data is at the bottom and it's balanced. It could also be very imbalanced in this case. And be sure to distinguish Merkle tree from Merkle tree. It's very different. And in addition, we all know the Merkle Patricia tree, which is a very important data structure in the Ethereum global storage. So with all these trees, there are very different Merkle tree algorithms. However, I believe you can find all the important checklists that you should use to ensure a security proof in your protocols. But even though we have this checklist, we cannot ensure that we can find all the vulnerabilities in very different Merkle algorithms. And that is when the leaves can leave your protocol vulnerable. The answer is very clear. The answer is when the leafs can leave your protocol vulnerable. The answer is very clear. The answer is about the beginning. If you don't recognize a node as a leaf, you will never ensure the property from the checklist against it and make it secure. So today, I'm just going to show you one example of this kind of attack that is about Merkle-Monte range. I'm not sure if you have all heard about this algorithm. So let me first introduce what is Merkle monorange. It is a very simple data structure. It's just like a group of sub-Merkle trees. You just add the data at the bottom of this Merkle monorange, and once you keep adding new data, if there are two siblings, we add a parent. If there are two parents, we add a parent. If there are two parents, we add another parent. You do this recursively, and eventually you will get several Merkle trees in this Merkle mountain range with different depths and different size. And we call the top node of this sub-tree peaks. And why do we use Merkle mountain range? As I tell you, if you are adding a new node here, it's very easy to build a tree. And you don't need to re-compute all the nodes in this tree. You just append only. And now we have this sub-trade. If we only want to store a constant size of commitment on the blockchain, what we do is we compute a route. The route will be a nested hash of all the subtree peaks and this process we call it a bagging process. So once you have the route, you can do a normal existence proof of any data within this Merkle monorange. How do you do that? That's also very simple. We first do a proof of any of the leaf within the subtree. And once we have that proof, we can also add more pigs within this proof so we can reconstruct the root at the top. And just take a minute to think about what can go wrong in this algorithm if you want to prove any data within any of the leaves. So we must have some assumptions. So let's assume the people who is adding data to this Merkle-Moner range is trusted. So he won't add any malicious data. He won't add a subtree into these leaves. In addition, let's assume you have sufficient validation of the index and the depth of the leaves when you do the proof so that you can easily use another intermediate node as a leaf here. Like for example, the P1 and D1, they are very different nodes. The P1 has depth one, but D1 has depth two. So you can't use the intermediate node in this proof. Even with these constraints, can you break this system? The answer is yes. If we just take one step back and look at this Merkle-Marner range again, we'll find there is actually a hidden tree when you build the route. That makes up of these peaks. So we have three peaks of the subtrees here. And now we are building another Merkle tree with these peaks to the route. And as you can see, we have no validation of the peaks in the previous assumptions. And that means you can easily find a vulnerability here. And here is one example of this attack. So on the left side, this is the Merkle-Marner range. On the right side, I do the attack by just removing the D5 and D6 leave node. And now, as you can see, the P4, the intermediate node, becomes a leave. And more surprisingly, they have the same depth as the node D5. So now if I'm going to prove P4 in the subtree of N1, it will have depth one. That satisfies all the properties we have validated before, but we have never validated the peaks. So this is a trick that now you can prove something non-existing in your Merkle tree. So that's the example I want to show you today, and the summary is very simple. A non-leaf with leaf could be a leaf. So please ensure that you check all the input and validate all the properties you desire in your protocol. Otherwise it could become vulnerable easily. And thank you very much for listening. If you want to know more about chain security and our work", "eventId": "devcon-7", - "slot_start": 1731642000000, - "slot_end": 1731643800000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1WKMS7RU7L0T4jR34wKgLFODsY4ligbUzbHahkZWhf6I", - "resources_slides": null, + "slot_start": 1731390000000, + "slot_end": 1731390600000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1_G-GfGgNMUn5tiiaH-Srat0PLHtYYRNtiVjZwWlxU_c", + "resources_slides": "https://drive.google.com/file/d/106Up4ALt8E_9hozdiFMGaUJ3oX0zs51D/view", "speakers": [ - "beth-mccarthy", - "joshua-davila" + "shufan-wang" ] }, "vector": [ + 6, 0, 0, 0, @@ -508737,8 +507299,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -509193,10 +507753,9 @@ 0, 0, 0, - 6, - 6, 0, 0, + 6, 0, 0, 0, @@ -509564,7 +508123,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -509586,7 +508144,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -509630,6 +508187,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -509754,6 +508312,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -509770,7 +508329,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -510038,18 +508596,16 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -510062,47 +508618,46 @@ }, { "session": { - "id": "merkle-proofs-when-leaves-leave-you-vulnerable", - "sourceId": "LAKCG3", - "title": "Merkle Proofs: When Leaves Leave You Vulnerable", - "description": "A Merkle proof is a cryptographically authenticated data structure widely used to minimize on-chain data storage. The Merkle algorithm is neat yet non-trivial to implement correctly and securely; its leaves may leave you vulnerable if not handled properly.", - "track": "Security", + "id": "modern-zkp-compilers", + "sourceId": "CV7QXP", + "title": "Modern ZKP compilers", + "description": "At PSE we have done much ZKP advanced development. From that learning we are building a language and compiler, that is summarizing much of this learning.\r\nWe answer questions like: Are compilers necessary in a zkVM world? What is the role of a compiler in ZKP development? What are its most common components? How different ways can this problem be approached?\r\nIn this advanced talk, we will learn how we compile arbitrary boolean expressions, or how the Schwartz–Zippel lemma can be used to optimize", + "track": "Applied Cryptography", "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Auditing", - "Bug", - "merkle", - "Auditing", - "Bug" + "Developer Infrastructure", + "Languages", + "ZKP", + "education", + "Developer Infrastructure", + "Languages", + "ZKP" ], "keywords": [ - "Merkle" + "education" ], - "duration": 334, + "duration": 645, "language": "en", - "sources_swarmHash": "2acec7178510ddfaad6efbb63c85e3282df87ac7932d16fc39fed44b7ce8b8df", - "sources_youtubeId": "TEBV4hPNm3k", + "sources_swarmHash": "ff06f4ba851b1ea9b39cae607b1ef0d62e19962feb32f62ee1611e236c5b5a1c", + "sources_youtubeId": "JX9YtcG_EHk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6732face80d989c5b7aebead", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6732face80d989c5b7aebead.vtt", - "transcript_text": " I'm a security engineer from China Security. I'm very happy to share my insights about Merkle proofs. Today, especially, I'm going to talk about when leaves will leave you vulnerable. So, I believe in your life you have seen many trees with different shapes, different structures. For example, it's a very simple tree. It could be bigger and more complex. And in addition, this is good, like it doesn't look like a tree. It could also be the Earth tree in the game Elden Ring. And for Merkle tree, it's very similar. It could be a very balanced and simple Merkle tree, where data is at the bottom and it's balanced. It could also be very imbalanced in this case. And be sure to distinguish Merkle tree from Merkle tree. It's very different. And in addition, we all know the Merkle Patricia tree, which is a very important data structure in the Ethereum global storage. So with all these trees, there are very different Merkle tree algorithms. However, I believe you can find all the important checklists that you should use to ensure a security proof in your protocols. But even though we have this checklist, we cannot ensure that we can find all the vulnerabilities in very different Merkle algorithms. And that is when the leaves can leave your protocol vulnerable. The answer is very clear. The answer is when the leafs can leave your protocol vulnerable. The answer is very clear. The answer is about the beginning. If you don't recognize a node as a leaf, you will never ensure the property from the checklist against it and make it secure. So today, I'm just going to show you one example of this kind of attack that is about Merkle-Monte range. I'm not sure if you have all heard about this algorithm. So let me first introduce what is Merkle monorange. It is a very simple data structure. It's just like a group of sub-Merkle trees. You just add the data at the bottom of this Merkle monorange, and once you keep adding new data, if there are two siblings, we add a parent. If there are two parents, we add a parent. If there are two parents, we add another parent. You do this recursively, and eventually you will get several Merkle trees in this Merkle mountain range with different depths and different size. And we call the top node of this sub-tree peaks. And why do we use Merkle mountain range? As I tell you, if you are adding a new node here, it's very easy to build a tree. And you don't need to re-compute all the nodes in this tree. You just append only. And now we have this sub-trade. If we only want to store a constant size of commitment on the blockchain, what we do is we compute a route. The route will be a nested hash of all the subtree peaks and this process we call it a bagging process. So once you have the route, you can do a normal existence proof of any data within this Merkle monorange. How do you do that? That's also very simple. We first do a proof of any of the leaf within the subtree. And once we have that proof, we can also add more pigs within this proof so we can reconstruct the root at the top. And just take a minute to think about what can go wrong in this algorithm if you want to prove any data within any of the leaves. So we must have some assumptions. So let's assume the people who is adding data to this Merkle-Moner range is trusted. So he won't add any malicious data. He won't add a subtree into these leaves. In addition, let's assume you have sufficient validation of the index and the depth of the leaves when you do the proof so that you can easily use another intermediate node as a leaf here. Like for example, the P1 and D1, they are very different nodes. The P1 has depth one, but D1 has depth two. So you can't use the intermediate node in this proof. Even with these constraints, can you break this system? The answer is yes. If we just take one step back and look at this Merkle-Marner range again, we'll find there is actually a hidden tree when you build the route. That makes up of these peaks. So we have three peaks of the subtrees here. And now we are building another Merkle tree with these peaks to the route. And as you can see, we have no validation of the peaks in the previous assumptions. And that means you can easily find a vulnerability here. And here is one example of this attack. So on the left side, this is the Merkle-Marner range. On the right side, I do the attack by just removing the D5 and D6 leave node. And now, as you can see, the P4, the intermediate node, becomes a leave. And more surprisingly, they have the same depth as the node D5. So now if I'm going to prove P4 in the subtree of N1, it will have depth one. That satisfies all the properties we have validated before, but we have never validated the peaks. So this is a trick that now you can prove something non-existing in your Merkle tree. So that's the example I want to show you today, and the summary is very simple. A non-leaf with leaf could be a leaf. So please ensure that you check all the input and validate all the properties you desire in your protocol. Otherwise it could become vulnerable easily. And thank you very much for listening. If you want to know more about chain security and our work", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731390000000, - "slot_end": 1731390600000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1_G-GfGgNMUn5tiiaH-Srat0PLHtYYRNtiVjZwWlxU_c", - "resources_slides": null, + "slot_start": 1731393000000, + "slot_end": 1731393600000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1XmimA6xYE2Wr9c4tzpc9e9P7XDxysFx2QT8rBsA-piQ", + "resources_slides": "https://drive.google.com/file/d/1NKw7iFLRw_UYR5UCEQQLqExrMwgp4oQx/view", "speakers": [ - "shufan-wang" + "leo-lara" ] }, "vector": [ - 6, 0, 0, 0, @@ -510113,6 +508668,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -510904,6 +509460,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -510924,6 +509481,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -510939,11 +509497,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -511005,13 +509565,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -511131,7 +509684,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -511270,7 +509822,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -511438,44 +509989,26 @@ }, { "session": { - "id": "modern-zkp-compilers", - "sourceId": "CV7QXP", - "title": "Modern ZKP compilers", - "description": "At PSE we have done much ZKP advanced development. From that learning we are building a language and compiler, that is summarizing much of this learning.\r\nWe answer questions like: Are compilers necessary in a zkVM world? What is the role of a compiler in ZKP development? What are its most common components? How different ways can this problem be approached?\r\nIn this advanced talk, we will learn how we compile arbitrary boolean expressions, or how the Schwartz–Zippel lemma can be used to optimize", - "track": "Applied Cryptography", - "type": "Lightning Talk", - "expertise": "Intermediate", + "id": "mood-rebalancing-singing-bowls-handpan", + "sourceId": "SVAHJU", + "title": "Mood Rebalancing (Singing Bowls + Handpan)", + "description": "By Most Handpan X Ice\r\nThis session helps you feel emotionally centered and peaceful.\r\n- Bring balance to your emotions with singing bowls and handpan. \r\n- Using an emotion wheel, you’ll explore and understand your feelings, a key step to managing them. \r\n\r\nNov 15 10:30 - 11:15", + "track": "Entertainment", + "type": "Mixed Formats", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Developer Infrastructure", - "Languages", - "ZKP", - "education", - "Developer Infrastructure", - "Languages", - "ZKP" - ], - "keywords": [ - "education" - ], - "duration": 645, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "ff06f4ba851b1ea9b39cae607b1ef0d62e19962feb32f62ee1611e236c5b5a1c", - "sources_youtubeId": "JX9YtcG_EHk", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731393000000, - "slot_end": 1731393600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1XmimA6xYE2Wr9c4tzpc9e9P7XDxysFx2QT8rBsA-piQ", - "resources_slides": null, - "speakers": [ - "leo-lara" - ] + "slot_start": 1731641400000, + "slot_end": 1731644100000, + "slot_roomId": "decompression-room", + "resources_presentation": "https://docs.google.com/presentation/d/1STERW4iF8WxYtoPJQKN2mZr5qwM1yuH_XYRcXEVM1pw", + "resources_slides": "" }, "vector": [ 0, @@ -511487,7 +510020,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -511948,7 +510480,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -512283,7 +510814,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -512304,7 +510834,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -512320,13 +510849,11 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -512792,9 +511319,10 @@ 0, 0, 0, - 2, 0, 0, + 2, + 0, 0, 2, 0, @@ -512814,10 +511342,10 @@ }, { "session": { - "id": "mood-rebalancing-singing-bowls-handpan", - "sourceId": "SVAHJU", - "title": "Mood Rebalancing (Singing Bowls + Handpan)", - "description": "By Most Handpan X Ice\r\nThis session helps you feel emotionally centered and peaceful.\r\n- Bring balance to your emotions with singing bowls and handpan. \r\n- Using an emotion wheel, you’ll explore and understand your feelings, a key step to managing them. \r\n\r\nNov 15 10:30 - 11:15", + "id": "mood-uplifting-singing-bowls-handpan", + "sourceId": "H7Y7L8", + "title": "Mood Uplifting (Singing Bowls + Handpan)", + "description": "By Most Handpan X Ice\r\nThis session fills you with positive energy, boosting your mood and clearing your mind.\r\n- Lift your spirits with the bright sounds of singing bowls, handpan, and soft percussion. \r\n\r\nNov 14 15:00 - 15:45", "track": "Entertainment", "type": "Mixed Formats", "expertise": "", @@ -512829,10 +511357,11 @@ "language": "en", "speakers": [], "eventId": "devcon-7", - "slot_start": 1731641400000, - "slot_end": 1731644100000, + "slot_start": 1731571200000, + "slot_end": 1731573900000, "slot_roomId": "decompression-room", - "resources_presentation": "https://docs.google.com/presentation/d/1STERW4iF8WxYtoPJQKN2mZr5qwM1yuH_XYRcXEVM1pw" + "resources_presentation": "https://docs.google.com/presentation/d/1vnIacRdbAcvTa2ioFdaqS_vlSqjDw2GnNcAukvszKyw", + "resources_slides": "" }, "vector": [ 0, @@ -514145,11 +512674,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -514171,37 +512695,48 @@ }, { "session": { - "id": "mood-uplifting-singing-bowls-handpan", - "sourceId": "H7Y7L8", - "title": "Mood Uplifting (Singing Bowls + Handpan)", - "description": "By Most Handpan X Ice\r\nThis session fills you with positive energy, boosting your mood and clearing your mind.\r\n- Lift your spirits with the bright sounds of singing bowls, handpan, and soft percussion. \r\n\r\nNov 14 15:00 - 15:45", - "track": "Entertainment", - "type": "Mixed Formats", - "expertise": "", - "audience": "Engineering", + "id": "mopro-make-client-side-proving-on-mobile-easy", + "sourceId": "BZWFEM", + "title": "Mopro: Make Client-side Proving on Mobile Easy", + "description": "Mopro is a toolkit for ZK app development on mobile. Mopro makes client-side proving on mobile simple. Mopro aims to connect different adapters with different platforms. In this talk, we will share:\r\n- How to use Mopro to develop your own ZK mobile app.\r\n- What is the current development progress, including the current supported proving systems, supported platforms, and mobile GPU exploration results. \r\n- Moreover, we will share the challenges that Mopro faces and our future roadmap.", + "track": "Applied Cryptography", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Developer", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "ZKP", + "Cryptography", + "Mobile", + "android", + "Cryptography", + "Mobile", + "ZKP" + ], + "keywords": [ + "iOS", + "Android" + ], + "duration": 958, "language": "en", - "speakers": [], + "sources_swarmHash": "83f2fcfab64a4052bdaa28b2c9f33ae4f5a4bccdd8fdc70865019c8ab568a649", + "sources_youtubeId": "0ziKiYwhJHk", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731571200000, - "slot_end": 1731573900000, - "slot_roomId": "decompression-room", - "resources_presentation": "https://docs.google.com/presentation/d/1vnIacRdbAcvTa2ioFdaqS_vlSqjDw2GnNcAukvszKyw" + "slot_start": 1731397800000, + "slot_end": 1731398400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1usTBzr557w8yMObkzJBvScKjnAoHQFztqym-wk6b1dk", + "resources_slides": "https://drive.google.com/file/d/1xM2-Vbj6dDtLfGIGo9bx72Z30hArdWIg/view", + "speakers": [ + "ya-wen-jeng", + "moven-tsai" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, 0, 0, 0, @@ -514212,6 +512747,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -514671,6 +513207,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -514960,6 +513498,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -514969,6 +513508,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -515020,6 +513560,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -515306,6 +513847,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -515504,10 +514046,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 2, @@ -515521,53 +514063,50 @@ 0, 0, 0, - 0, - 0, 0 ] }, { "session": { - "id": "mopro-make-client-side-proving-on-mobile-easy", - "sourceId": "BZWFEM", - "title": "Mopro: Make Client-side Proving on Mobile Easy", - "description": "Mopro is a toolkit for ZK app development on mobile. Mopro makes client-side proving on mobile simple. Mopro aims to connect different adapters with different platforms. In this talk, we will share:\r\n- How to use Mopro to develop your own ZK mobile app.\r\n- What is the current development progress, including the current supported proving systems, supported platforms, and mobile GPU exploration results. \r\n- Moreover, we will share the challenges that Mopro faces and our future roadmap.", + "id": "mp-fhe-experiments-our-learnings-trying-to-find-the-next-big-tech-to-focus-on", + "sourceId": "9JYWVP", + "title": "MP-FHE experiments. Our learnings trying to find the next big tech to focus on.", + "description": "This talk mainly focuses on showcasing the work that some PSE members did while starting to dive into MPC-FHE during Q2 2024. This work is composed by various explorations within the MPC-FHE realm that move towards different directions and goals.\r\n\r\nFrom FHE compilers to FFT Bootstrapping GPU optimization proposals, passing by FHE Game demos and many application level implementations, the talk aims to reach beginner-advanced audience on the research/product paths that we have explored so far.", "track": "Applied Cryptography", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "ZKP", - "Cryptography", - "Mobile", - "android", - "Cryptography", - "Mobile", - "ZKP" - ], "keywords": [ - "iOS", - "Android" + "FHE", + "MPC", + "Explorations" + ], + "tags": [ + "Homomorphic Encryption", + "Use cases of cryptography", + "exploration", + "Homomorphic Encryption", + "Use cases of cryptography" ], - "duration": 958, "language": "en", - "sources_swarmHash": "83f2fcfab64a4052bdaa28b2c9f33ae4f5a4bccdd8fdc70865019c8ab568a649", - "sources_youtubeId": "0ziKiYwhJHk", + "sources_swarmHash": "16b50097b34925bd9c17633e4b231fd78b57d4e01d8c707eca5bc13e7d0b475a", + "sources_youtubeId": "Didnvmet5Ng", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "cperezz" + ], "eventId": "devcon-7", - "slot_start": 1731397800000, - "slot_end": 1731398400000, + "slot_start": 1731391800000, + "slot_end": 1731392400000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1usTBzr557w8yMObkzJBvScKjnAoHQFztqym-wk6b1dk", - "resources_slides": null, - "speakers": [ - "ya-wen-jeng", - "moven-tsai" - ] + "resources_presentation": "https://docs.google.com/presentation/d/12k_WqxuHHHeL-ozPhNdmibpCzBNzvOlF-4z0chDHOyY", + "resources_slides": "https://drive.google.com/file/d/1J-VtH2lLo-CkZ9ufw7BOdkTUXdksUDoh/view" }, "vector": [ 0, @@ -516041,56 +514580,8 @@ 0, 0, 0, - 6, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -516334,7 +514825,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -516344,7 +514834,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -516458,6 +514947,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -516684,7 +515174,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -516785,6 +515274,46 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -516884,7 +515413,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -516894,6 +515422,12 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -516906,37 +515440,44 @@ }, { "session": { - "id": "mp-fhe-experiments-our-learnings-trying-to-find-the-next-big-tech-to-focus-on", - "sourceId": "9JYWVP", - "title": "MP-FHE experiments. Our learnings trying to find the next big tech to focus on.", - "description": "This talk mainly focuses on showcasing the work that some PSE members did while starting to dive into MPC-FHE during Q2 2024. This work is composed by various explorations within the MPC-FHE realm that move towards different directions and goals.\r\n\r\nFrom FHE compilers to FFT Bootstrapping GPU optimization proposals, passing by FHE Game demos and many application level implementations, the talk aims to reach beginner-advanced audience on the research/product paths that we have explored so far.", + "id": "mpc-tooling-or-how-to-create-mpc-apps", + "sourceId": "QLMYBD", + "title": "MPC Tooling or How to create MPC apps", + "description": "Let's get into the state of the art of MPC development: we'll discuss different MPC schemes, current MPC tooling & how you can create MPC apps today.\r\nWe'll cover the tech stack from a frontend level (e.g. MPC compilers) to a backend - and of course how we can combine them.", "track": "Applied Cryptography", "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "FHE", + "tags": [ + "Tooling", + "Cryptography", "MPC", - "Explorations" + "Cryptography", + "MPC", + "Tooling" ], - "tags": [ - "Homomorphic Encryption", - "Use cases of cryptography", - "exploration", - "Homomorphic Encryption", - "Use cases of cryptography" + "keywords": [ + "Circom-MPC", + "MPC tooling" ], + "duration": 489, "language": "en", - "speakers": [ - "cperezz" - ], + "sources_swarmHash": "8b8ee46fd9725a4ea9ca521e0da429005bef6925bc6bb11dae9ee6fc11c803aa", + "sources_youtubeId": "eKpcf1JMNak", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731391800000, - "slot_end": 1731392400000, + "slot_start": 1731390600000, + "slot_end": 1731391200000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/12k_WqxuHHHeL-ozPhNdmibpCzBNzvOlF-4z0chDHOyY" + "resources_presentation": "https://docs.google.com/presentation/d/1F2EhWXcgf32_Gh77ty0p18d2rnEPMZymHL7KX7iwSdE", + "resources_slides": "https://drive.google.com/file/d/1NGZv7lW_PwYWoB_O_shLOwvSq1os6-CH/view", + "speakers": [ + "rasul-ibragimov" + ] }, "vector": [ 0, @@ -517700,6 +516241,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -517707,6 +516249,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -517718,12 +516261,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -518108,7 +516645,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -518275,10 +516811,10 @@ }, { "session": { - "id": "mpc-tooling-or-how-to-create-mpc-apps", - "sourceId": "QLMYBD", - "title": "MPC Tooling or How to create MPC apps", - "description": "Let's get into the state of the art of MPC development: we'll discuss different MPC schemes, current MPC tooling & how you can create MPC apps today.\r\nWe'll cover the tech stack from a frontend level (e.g. MPC compilers) to a backend - and of course how we can combine them.", + "id": "mpcstats", + "sourceId": "ND3S9R", + "title": "MPCStats", + "description": "MPCStats is a framework allowing data consumers to query statistical computation from either one or multiple data providers while preserving privacy to those raw data. We support standard statistical operations, including nested and filter ones. Data providers do not leak their data and data consumers can be convinced the computation is done correctly.", "track": "Applied Cryptography", "type": "Lightning Talk", "expertise": "Intermediate", @@ -518287,31 +516823,39 @@ "doNotRecord": false, "tags": [ "Tooling", - "Cryptography", + "Privacy", "MPC", - "Cryptography", + "Public good", + "verification", + "computation", "MPC", + "Privacy", + "Public good", "Tooling" ], "keywords": [ - "Circom-MPC", - "MPC tooling" + "privacy-preserving", + "data analysis", + "MPC", + "statistics", + "verifiable computation" ], - "duration": 489, + "duration": 508, "language": "en", - "sources_swarmHash": "8b8ee46fd9725a4ea9ca521e0da429005bef6925bc6bb11dae9ee6fc11c803aa", - "sources_youtubeId": "eKpcf1JMNak", + "sources_swarmHash": "9b8211a5308190cf41598cd33cefed8af79e239f4d4c5a6648a32a2cbcf77f51", + "sources_youtubeId": "wCp7Zsjou7w", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731390600000, - "slot_end": 1731391200000, + "slot_start": 1731396000000, + "slot_end": 1731396600000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1F2EhWXcgf32_Gh77ty0p18d2rnEPMZymHL7KX7iwSdE", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/10sZNPm9ETDOiRts7vDo9aVWovdRE2PpqvKAxR6_9Lv8", + "resources_slides": "https://drive.google.com/file/d/1-bybulVBrLQLhna46XkTSP5yVxlCF_zJ/view", "speakers": [ - "rasul-ibragimov" + "kevin-chia", + "teeramet-jern-kunpittaya" ] }, "vector": [ @@ -518790,6 +517334,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -519079,10 +517624,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -519175,12 +517716,14 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -519411,6 +517954,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -519481,12 +518025,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -519651,61 +518190,48 @@ }, { "session": { - "id": "mpcstats", - "sourceId": "ND3S9R", - "title": "MPCStats", - "description": "MPCStats is a framework allowing data consumers to query statistical computation from either one or multiple data providers while preserving privacy to those raw data. We support standard statistical operations, including nested and filter ones. Data providers do not leak their data and data consumers can be convinced the computation is done correctly.", - "track": "Applied Cryptography", - "type": "Lightning Talk", + "id": "mud-how-we-built-an-evm-application-framework-from-the-ground-up", + "sourceId": "883QBY", + "title": "MUD - How we built an EVM application framework from the ground up", + "description": "We wanted to accomplish one simple task: put a game—with all its data and logic—on a blockchain. What followed were countless technical challenges, years of efforts, and learnings that are applicable to anyone building complex onchain apps.\r\n\r\nHow should data be structured? How can complex world state stay up-to-date on the client? How do we allow multiple teams to build on one single world, without it all breaking apart? Join us as we share the pitfalls and learnings.", + "track": "Developer Experience", + "type": "Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Tooling", - "Privacy", - "MPC", - "Public good", - "verification", - "computation", - "MPC", - "Privacy", - "Public good", - "Tooling" + "DevEx", + "Frameworks", + "Gaming", + "Autonomous World", + "onchain", + "Autonomous World", + "DevEx", + "Frameworks" ], "keywords": [ - "privacy-preserving", - "data analysis", - "MPC", - "statistics", - "verifiable computation" + "Onchain", + "Games" ], - "duration": 508, + "duration": 1167, "language": "en", - "sources_swarmHash": "9b8211a5308190cf41598cd33cefed8af79e239f4d4c5a6648a32a2cbcf77f51", - "sources_youtubeId": "wCp7Zsjou7w", + "sources_swarmHash": "513b226e691f7a5f2159693e0c0d7ba73d8e00f6db1ff632d1fb557fd67fcb9f", + "sources_youtubeId": "w02aI1S7gcc", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6734401e9dbb7a90e1fc1c7a", "eventId": "devcon-7", - "slot_start": 1731396000000, - "slot_end": 1731396600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/10sZNPm9ETDOiRts7vDo9aVWovdRE2PpqvKAxR6_9Lv8", - "resources_slides": null, + "slot_start": 1731475800000, + "slot_end": 1731477600000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/13IffrHXnDmcykkm_fptRD_pUCl4g2eRLtXlWD6o8UUE", + "resources_slides": "https://drive.google.com/file/d/1XCfl0PQIMidppS580U-TfI-GCZxVKHtJ/view", "speakers": [ - "kevin-chia", - "teeramet-jern-kunpittaya" + "alvarius" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -519820,6 +518346,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -520174,12 +518701,6 @@ 0, 0, 0, - 6, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -520471,7 +518992,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -520490,6 +519010,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -520539,7 +519060,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -520559,7 +519079,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -520571,6 +519090,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -520798,39 +519319,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -520869,7 +519357,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -520912,6 +519399,47 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -521035,52 +519563,45 @@ }, { "session": { - "id": "mud-how-we-built-an-evm-application-framework-from-the-ground-up", - "sourceId": "883QBY", - "title": "MUD - How we built an EVM application framework from the ground up", - "description": "We wanted to accomplish one simple task: put a game—with all its data and logic—on a blockchain. What followed were countless technical challenges, years of efforts, and learnings that are applicable to anyone building complex onchain apps.\r\n\r\nHow should data be structured? How can complex world state stay up-to-date on the client? How do we allow multiple teams to build on one single world, without it all breaking apart? Join us as we share the pitfalls and learnings.", - "track": "Developer Experience", + "id": "mud-past-present-and-future", + "sourceId": "FE9L3P", + "title": "MUD: Past, present, and future", + "description": "MUD--an open-source engine for autonomous worlds--was released two years ago in DEVCON Bogotá. Since then, it has gone through many iterations and helped many developers build their onchain games and worlds. Two years later, MUD core developer Alvarius will take stock of where we are and what the future holds for MUD.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", "type": "Talk", - "expertise": "Intermediate", + "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, + "keywords": [], "tags": [ - "DevEx", + "Autonomous World", "Frameworks", "Gaming", - "Autonomous World", - "onchain", - "Autonomous World", - "DevEx", - "Frameworks" - ], - "keywords": [ - "Onchain", - "Games" + "Tooling" ], - "duration": 1167, "language": "en", - "sources_swarmHash": "513b226e691f7a5f2159693e0c0d7ba73d8e00f6db1ff632d1fb557fd67fcb9f", - "sources_youtubeId": "w02aI1S7gcc", + "sources_swarmHash": "6053e69151ed013c3c0ca3815d2a365dd08d3ba0696adcecafc8f53b1ffcd65a", + "sources_youtubeId": "AbHC8FVGxeU", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734401e9dbb7a90e1fc1c7a", - "eventId": "devcon-7", - "slot_start": 1731475800000, - "slot_end": 1731477600000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/13IffrHXnDmcykkm_fptRD_pUCl4g2eRLtXlWD6o8UUE", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ "alvarius" - ] + ], + "eventId": "devcon-7", + "slot_start": 1731553200000, + "slot_end": 1731554400000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1OeTy66nVePoVL95ayNDdQbYFQRdNCNjTM0xMIccPtWE", + "resources_slides": "" }, "vector": [ 0, 0, 0, - 6, 0, 0, 0, @@ -521090,6 +519611,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -521846,6 +520368,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -521858,10 +520381,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -522248,8 +520767,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -522391,9 +520908,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 2, 0, @@ -522413,32 +520930,43 @@ }, { "session": { - "id": "mud-past-present-and-future", - "sourceId": "FE9L3P", - "title": "MUD: Past, present, and future", - "description": "MUD--an open-source engine for autonomous worlds--was released two years ago in DEVCON Bogotá. Since then, it has gone through many iterations and helped many developers build their onchain games and worlds. Two years later, MUD core developer Alvarius will take stock of where we are and what the future holds for MUD.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Talk", + "id": "multi-party-fhe-for-multi-player-privacy", + "sourceId": "S9S8M9", + "title": "Multi-Party FHE for Multi-Player Privacy", + "description": "Privacy is an unsolved challenge for blockchains and decentralized systems. ZK cryptography gets us there partially, but not all the way. ZK enables “single-player private state,” and certain other kinds of privacy are impossible to realize with ZKPs alone. Panelists, the cryptography library devs, infrastructure builders, and application devs who have recently started to explore programmable encryption will discuss MP-FHE as one such tool for achieving more general privacy capabilities.", + "track": "Applied Cryptography", + "type": "Panel", "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [ - "Autonomous World", - "Frameworks", - "Gaming", - "Tooling" + "tags": [], + "keywords": [ + "mp", + "fhe", + "programmable cryptography" ], + "duration": 3291, "language": "en", - "speakers": [ - "alvarius" - ], + "sources_swarmHash": "ef23b5807bd63ac3c58c1a0c65809124f3f2a09a654b4ad659a05122a52ff664", + "sources_youtubeId": "Md1LKfuBGFo", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735a1499dbb7a90e1b01876", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731553200000, - "slot_end": 1731554400000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1OeTy66nVePoVL95ayNDdQbYFQRdNCNjTM0xMIccPtWE" + "slot_start": 1731564000000, + "slot_end": 1731567600000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1i64ImNoehhB-Dnpix_z7zP--wGTsTmeikoll2OE-lGI", + "resources_slides": "", + "speakers": [ + "eduard-sanou", + "gubsheep", + "janmajaya-mall", + "veronica-zheng" + ] }, "vector": [ 0, @@ -522451,8 +520979,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -522555,9 +521081,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -522574,6 +521097,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -522628,6 +521152,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -522863,6 +521388,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -522919,6 +521445,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -523213,7 +521740,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -523297,13 +521823,10 @@ 0, 0, 0, - 2, 0, 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -523777,94 +522300,47 @@ }, { "session": { - "id": "multi-party-fhe-for-multi-player-privacy", - "sourceId": "S9S8M9", - "title": "Multi-Party FHE for Multi-Player Privacy", - "description": "Privacy is an unsolved challenge for blockchains and decentralized systems. ZK cryptography gets us there partially, but not all the way. ZK enables “single-player private state,” and certain other kinds of privacy are impossible to realize with ZKPs alone. Panelists, the cryptography library devs, infrastructure builders, and application devs who have recently started to explore programmable encryption will discuss MP-FHE as one such tool for achieving more general privacy capabilities.", - "track": "Applied Cryptography", - "type": "Panel", - "expertise": "Beginner", - "audience": "Engineering", + "id": "multi-party-fully-homomorphic-encryption-mp-fhe-in-practice", + "sourceId": "QC7FH7", + "title": "Multi-Party Fully Homomorphic Encryption (MP-FHE) in Practice", + "description": "In this session, we will break down the FHE game Frogzone, which required advancements at every layer of the cryptographic software stack: cryptography libraries and tooling, circuits, software infrastructure, and even DevOps. We will also cover additional use cases for FHE at a technical level.", + "track": "[CLS] Programmable / Frogrammable Cryptography, by 0xPARC", + "type": "Workshop", + "expertise": "Intermediate", + "audience": "", "featured": false, "doNotRecord": false, - "tags": [], + "tags": [ + "Cryptography", + "Homomorphic Encryption" + ], "keywords": [ - "mp", - "fhe", - "programmable cryptography" + "Programmable", + "Cryptography" ], - "duration": 3291, + "duration": 5029, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "e81444659e448d17e4a0dca6fd3e287c5ef8c78525a0ead1b46362bc631cc1e3", + "sources_youtubeId": "uNDFmC4NHkM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735a1499dbb7a90e1b01876", + "sources_streamethId": "673d8e0f17a97b4f4de7517d", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731564000000, - "slot_end": 1731567600000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1i64ImNoehhB-Dnpix_z7zP--wGTsTmeikoll2OE-lGI", - "resources_slides": null, + "slot_start": 1731648600000, + "slot_end": 1731654000000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/15m-ipmgu4kmVNAWtsY-5mdugROSn_lIoAK6AY-lB8wM", + "resources_slides": "https://drive.google.com/file/d/1fWnCKp6dZczMJ1YtHroSxcFWUKVYMF7F/view", "speakers": [ - "eduard-sanou", "gubsheep", - "janmajaya-mall", - "veronica-zheng" + "riley-wong-theythem", + "eduard-sanou", + "han-jian" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -523879,6 +522355,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -523947,7 +522424,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -523997,7 +522473,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -524049,6 +522524,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -524236,7 +522712,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -524293,7 +522768,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -524343,6 +522817,9 @@ 0, 0, 0, + 6, + 6, + 6, 0, 0, 0, @@ -524625,6 +523102,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -524701,6 +523179,45 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -525132,7 +523649,6 @@ 0, 0, 0, - 2, 0, 2, 0, @@ -525147,49 +523663,55 @@ 0, 0, 0, + 0, + 2, + 0, + 0, 0 ] }, { "session": { - "id": "multi-party-fully-homomorphic-encryption-mp-fhe-in-practice", - "sourceId": "QC7FH7", - "title": "Multi-Party Fully Homomorphic Encryption (MP-FHE) in Practice", - "description": "In this session, we will break down the FHE game Frogzone, which required advancements at every layer of the cryptographic software stack: cryptography libraries and tooling, circuits, software infrastructure, and even DevOps. We will also cover additional use cases for FHE at a technical level.", - "track": "[CLS] Programmable / Frogrammable Cryptography, by 0xPARC", - "type": "Workshop", + "id": "multiparty-homomorphic-encryption-from-ring-learning-with-errors", + "sourceId": "KS7H3H", + "title": "Multiparty Homomorphic Encryption from Ring-Learning-with-Errors", + "description": "This talk will introduce Ring Learning with Errors (RLWE) based Multiparty Homomorphic Encryption (MHE).", + "track": "Applied Cryptography", + "type": "Talk", "expertise": "Intermediate", - "audience": "", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Cryptography", - "Homomorphic Encryption" - ], - "keywords": [ - "Programmable", - "Cryptography" + "Open Source Software", + "Homomorphic Encryption", + "Use cases of cryptography", + "Security", + "Use Cases", + "Homomorphic Encryption", + "Open Source Software", + "Security", + "Use Cases", + "Use cases of cryptography" ], - "duration": 5029, + "keywords": [], + "duration": 1051, "language": "en", - "sources_swarmHash": "e81444659e448d17e4a0dca6fd3e287c5ef8c78525a0ead1b46362bc631cc1e3", - "sources_youtubeId": "uNDFmC4NHkM", + "sources_swarmHash": "7864000381cfb0935a18a7ed2bd38df940f72ab2e3a86ec06695f699d94797a8", + "sources_youtubeId": "pEtfafnxCIw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673d8e0f17a97b4f4de7517d", + "sources_streamethId": "6735af949dbb7a90e1cc49c0", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731648600000, - "slot_end": 1731654000000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/15m-ipmgu4kmVNAWtsY-5mdugROSn_lIoAK6AY-lB8wM", - "resources_slides": null, + "slot_start": 1731569400000, + "slot_end": 1731571200000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1qdDRslHeX1rQN30xep6TupLd5KYw4-agBG6u4Zh17dA", + "resources_slides": "https://drive.google.com/file/d/1tQc9iqUvzA5GzA0TE7ZXIqQTZ8i7ltB_/view", "speakers": [ - "eduard-sanou", - "gubsheep", - "han-jian", - "riley-wong-theythem" + "jean-philippe-bossuat" ] }, "vector": [ @@ -525203,10 +523725,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -525374,7 +523892,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -525670,10 +524187,6 @@ 0, 0, 0, - 6, - 6, - 6, - 0, 0, 0, 0, @@ -525681,6 +524194,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -525949,6 +524463,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -525957,7 +524472,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -525977,6 +524491,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -526027,6 +524542,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -526034,10 +524550,10 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, 0, 0, 0, @@ -526045,6 +524561,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -526512,6 +525029,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -526521,7 +525039,6 @@ 0, 0, 0, - 2, 0, 0, 0 @@ -526529,46 +525046,42 @@ }, { "session": { - "id": "multiparty-homomorphic-encryption-from-ring-learning-with-errors", - "sourceId": "KS7H3H", - "title": "Multiparty Homomorphic Encryption from Ring-Learning-with-Errors", - "description": "This talk will introduce Ring Learning with Errors (RLWE) based Multiparty Homomorphic Encryption (MHE).", - "track": "Applied Cryptography", - "type": "Talk", + "id": "my-mother-will-not-use-it", + "sourceId": "HKKFQX", + "title": "\"My mother will not use it\"", + "description": "In this Talk, I want to cover the different mindsets designers need to improve and optimize the work for web3.\r\nIf we're going to change the way we interact with each other and aim to profoundly improve society with this technology, we can't think and use the same methodologies.\r\nWe will cover topics such as the target audience (the title of the Talk), testing, the learning curve, web2 to web3, and more.", + "track": "Usability", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Design", "featured": false, "doNotRecord": false, "tags": [ - "Open Source Software", - "Homomorphic Encryption", - "Use cases of cryptography", - "Security", - "Use Cases", - "Homomorphic Encryption", - "Open Source Software", - "Security", - "Use Cases", - "Use cases of cryptography" + "inspiration", + "Design", + "Design Thinking", + "UI/UX" ], - "keywords": [], - "duration": 1051, + "keywords": [ + "Inspiration" + ], + "duration": 498, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "pEtfafnxCIw", + "sources_swarmHash": "ad423cbfb70b66caa453adc36d1d22548841d8553d166dad612d7ad3d0a943f9", + "sources_youtubeId": "147hrjj2onM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735af949dbb7a90e1cc49c0", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "673584719dbb7a90e1e74aca", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673584719dbb7a90e1e74aca.vtt", + "transcript_text": " So my name is Nuno, designer at the foundation. I work for Ethereum.org mostly. That's where I spend all my time and this is my talk. I submitted my talk based on this tweet from Wesley that he said my mom will never use crypto. Trying to optimize for her is a waste of time and this triggered something on me because I used on my other talks this is for each Prague back in June I guess I had one slide with the same title amongst the other things that I normally do and it's something that I it's a rant mostly why that this is something that we hear a lot out there. In other conversations out there, like, my mother will not use it. Oh, this app is not for my mother. It's not for, like, it's something that always gets me a rank that's why my mother, why the elder generation would use this? That's something that it's almost a rank. So let's start with the question. When did a mass adoption of a technology started with an elder generation? Can we pinpoint something? Can we like, top of our mind? I can't. That never happened, I would say. Never ever happened. Like take a drink of salt. I did not research this. I'm just saying out loud like might be something out of this. but I would say that it never started and we are on the brink of a revolution. So let's break even further. So why, who is my mother? Let's try to persona who is our mother. So this is my mother, my mother, Defcon, Defcon, my mother. She's a lovely lady from Lisbon, so nice to meet you all. She doesn't know he's here, so don't tell her. I'm doxing her. So, again, let's see who is our mother. Our mother might be like roughly, if average 30 years old in the audience, if she gives birth at 25, she might be 55 plus, something like that. Let's try to put her in this level. She'll be around this gap, which will make her a baby boomer and a Gen X, beginning Gen X, probably halfway through Gen X. But this is probably our mother collectively. Classifies the generation that we are sometimes thinking of who's being adopted. So she watched this You know what film this is? show hands Okay, some do She grew up This technology this was the technology that she was exposed at an influential level like all of this Probably some of you still use this who who got one, show of hands, yay, some, so was impactful, but in the end, like drilling down that analog generation, high trust in institutions in the sense that they grew up like trusting corporations, trusting governments in some countries, I don't know, But looking forward, post offices, banks, other corporations, was something that was trustworthy. Limits access to technology. They don't know how things work. I ask you, probably to you folks, how things work. And it's a cultural shift for them to onboard new technology, new stuff on a digital level. So this is my second conclusion. Why do we even bother thinking about them when we are building products for Web3? So I'm almost sure that everything that we will build will start with a young generation. This is one of the key points that I want to bring. Regardless of what we build, they will follow. So let's stop talking about, then my mother will use it. So to Wesley's point, this is a waste of time, probably, to think about them as user profiles, as personas on how to target our audience of our products. And even more, to all the Gen Zs and Alphas out there, there's a bunch of you out there that points out, you guys and all your friends and all your relatives about the same generation will be the ones that are going to use the Web3 project that we are building right now. They will have the financial upside very soon. So there's a bunch of information that the great wealth transfer will happen very soon because of the baby boomers through the other generations, they will have the financial incentive to do so. So it's not a question of money to target someone that has money. So we need to focus on the younger generation to build the new set of products. They will be the next billion users. So let's focus on them once and for all. So let's stop saying my mother will not use it. Let's start saying the youth will not use it. My kid, if you're a parent, a young parent, my kid will not use it. Probably the TikTokers will use it. Probably the Swifties. Who knows? They are the ones that will probably use our products. So this is the key point that I wanted to bring you guys. Let's start building because this is to you and for you above all and think about them and don't think about your mother, parents, grandmother, whatever you think like this. And that's what I have to you today. Thank you. Thank you Nuno. We have time for some questions. Raise your hand if you would like to ask one. It can be about your mother. It can be about Nuno's mother. Don't ask me about my mother. You don't want to hear that. Yes, yes, yes. Come on. Last push. There is a break after this. Ask a question and then go out into the world knowing you've accomplished great things at this session. Question, question. Okay, while you're thinking... Everyone is angry, I think. I know, I know. While you're thinking, I'll ask one question of my own. So same as the one I asked before. If you want people to take one thing from your talk, go back to their normal life, what's the one thing you want them to take away? Mostly as a designer, I target designers as well, other builders that are thinking new products. Keep critical thinking on the way you approach. I think we take from granted everything from Web 2.0. And I've seen a lot of talks, even the one before us, that's always like, let's do Web 2.0 way of doing Web 3.0. And it's mostly one of the topics that I bring in my talks. Probably I'm a little bit older. Probably it's my generation. I've built something of Web 2.0 I was around. And we've did a lot of mistakes. And sometimes we are passing those mistakes to Web3 as well. So be critical. Think a little bit beyond just what is presented, what is easy to present to the user. And this is one example. If the maturity of our technology that we're building is going to be like three, five years in a row, like start building for the generation that will use it. Not the elders, not the ones that probably now have the funds to invest in you, something like that. Target the ones that probably now have the funds to invest in you, something like that. Target the ones that probably will use it. Those will be the next billion. That was the thing that brought me to the talk. Perfect. Thank you. Anyone has come up with another question in the meantime? Yes. Amazing. Oh, this will be hard. Okay, yeah, let's do it this way. Okay. hard can maybe okay yeah let's do it this way this is a microphone okay uh what do tiktokers want that's the real question that you all need to figure out that's that's not an easy but we kind of know what they what they look like we can profile them at this point and that's one of the key highlights like we know what they look like. We can profile them at this point. And that's one of the key highlights. We know how they use. We know the interactions. We know what engages them to probably use the product that you're building. We like profiling. As users, we kind of know them already. So we need to understand them better. All right. Another question? Nobody? Okay, on that case, we'll conclude this morning's session. Thank you so much for being here. Thank you, Nuno, again.", "eventId": "devcon-7", - "slot_start": 1731569400000, - "slot_end": 1731571200000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1qdDRslHeX1rQN30xep6TupLd5KYw4-agBG6u4Zh17dA", - "resources_slides": null, + "slot_start": 1731559800000, + "slot_end": 1731560400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1phw7po5lIFL6aJaipzIR4HdBRmhdugA212mJKjaQfoc", + "resources_slides": "https://drive.google.com/file/d/1-cq2xdTKBFwYb2HlpG4kvTjh9nZ8pohd/view", "speakers": [ - "jean-philippe-bossuat" + "nuno-loureiro" ] }, "vector": [ @@ -526580,8 +525093,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -527052,10 +525563,9 @@ 0, 0, 0, - 6, - 0, 0, 0, + 6, 0, 0, 0, @@ -527323,7 +525833,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -527351,8 +525860,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -527385,6 +525892,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -527402,7 +525910,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -527413,7 +525920,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -527421,7 +525927,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -527471,6 +525976,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -527746,6 +526253,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -527891,7 +526399,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -527902,48 +526409,59 @@ 0, 0, 0, + 2, 0, 0 ] }, { "session": { - "id": "my-mother-will-not-use-it", - "sourceId": "HKKFQX", - "title": "\"My mother will not use it\"", - "description": "In this Talk, I want to cover the different mindsets designers need to improve and optimize the work for web3.\r\nIf we're going to change the way we interact with each other and aim to profoundly improve society with this technology, we can't think and use the same methodologies.\r\nWe will cover topics such as the target audience (the title of the Talk), testing, the learning curve, web2 to web3, and more.", - "track": "Usability", + "id": "mycofi-mycelial-design-patterns-for-web3-and-beyond", + "sourceId": "8CDPFC", + "title": "MycoFi: Mycelial Design Patterns for Web3 & Beyond", + "description": "Exploring MycoFi guides readers on an underground exploration into the world wise web of mycelial networks, the most prolific producers of public goods on Earth. This talk examines how the evolutionary adaptability of fungi could help us imagine biomimetic alternatives to status-quo economic systems that demand infinite growth on a finite planet. If we aim to design regenerative economies, what better\r\nplace to start than with the thriving evolutionary patterns of nature?", + "track": "Coordination", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Design", + "expertise": "Beginner", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "inspiration", - "Design", + "Collective Intelligence", + "Conviction", + "Consensus Mechanisms", + "Civil Resistance", + "Sustainability", + "Public good", + "Regenerative Applications", "Design Thinking", - "UI/UX" + "Civil Resistance", + "Collective Intelligence", + "Consensus Mechanisms", + "Conviction", + "Design Thinking", + "Public good", + "Regenerative Applications", + "Sustainability" ], "keywords": [ - "Inspiration" + "nope" ], - "duration": 498, + "duration": 544, "language": "en", - "sources_swarmHash": "ad423cbfb70b66caa453adc36d1d22548841d8553d166dad612d7ad3d0a943f9", - "sources_youtubeId": "147hrjj2onM", + "sources_swarmHash": "f520b12bc12e6e339bfff3be9b1d59b5019047a45c6c94f2fc1557b7e458af07", + "sources_youtubeId": "0A4jXL5eBaI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673584719dbb7a90e1e74aca", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673584719dbb7a90e1e74aca.vtt", - "transcript_text": " So my name is Nuno, designer at the foundation. I work for Ethereum.org mostly. That's where I spend all my time and this is my talk. I submitted my talk based on this tweet from Wesley that he said my mom will never use crypto. Trying to optimize for her is a waste of time and this triggered something on me because I used on my other talks this is for each Prague back in June I guess I had one slide with the same title amongst the other things that I normally do and it's something that I it's a rant mostly why that this is something that we hear a lot out there. In other conversations out there, like, my mother will not use it. Oh, this app is not for my mother. It's not for, like, it's something that always gets me a rank that's why my mother, why the elder generation would use this? That's something that it's almost a rank. So let's start with the question. When did a mass adoption of a technology started with an elder generation? Can we pinpoint something? Can we like, top of our mind? I can't. That never happened, I would say. Never ever happened. Like take a drink of salt. I did not research this. I'm just saying out loud like might be something out of this. but I would say that it never started and we are on the brink of a revolution. So let's break even further. So why, who is my mother? Let's try to persona who is our mother. So this is my mother, my mother, Defcon, Defcon, my mother. She's a lovely lady from Lisbon, so nice to meet you all. She doesn't know he's here, so don't tell her. I'm doxing her. So, again, let's see who is our mother. Our mother might be like roughly, if average 30 years old in the audience, if she gives birth at 25, she might be 55 plus, something like that. Let's try to put her in this level. She'll be around this gap, which will make her a baby boomer and a Gen X, beginning Gen X, probably halfway through Gen X. But this is probably our mother collectively. Classifies the generation that we are sometimes thinking of who's being adopted. So she watched this You know what film this is? show hands Okay, some do She grew up This technology this was the technology that she was exposed at an influential level like all of this Probably some of you still use this who who got one, show of hands, yay, some, so was impactful, but in the end, like drilling down that analog generation, high trust in institutions in the sense that they grew up like trusting corporations, trusting governments in some countries, I don't know, But looking forward, post offices, banks, other corporations, was something that was trustworthy. Limits access to technology. They don't know how things work. I ask you, probably to you folks, how things work. And it's a cultural shift for them to onboard new technology, new stuff on a digital level. So this is my second conclusion. Why do we even bother thinking about them when we are building products for Web3? So I'm almost sure that everything that we will build will start with a young generation. This is one of the key points that I want to bring. Regardless of what we build, they will follow. So let's stop talking about, then my mother will use it. So to Wesley's point, this is a waste of time, probably, to think about them as user profiles, as personas on how to target our audience of our products. And even more, to all the Gen Zs and Alphas out there, there's a bunch of you out there that points out, you guys and all your friends and all your relatives about the same generation will be the ones that are going to use the Web3 project that we are building right now. They will have the financial upside very soon. So there's a bunch of information that the great wealth transfer will happen very soon because of the baby boomers through the other generations, they will have the financial incentive to do so. So it's not a question of money to target someone that has money. So we need to focus on the younger generation to build the new set of products. They will be the next billion users. So let's focus on them once and for all. So let's stop saying my mother will not use it. Let's start saying the youth will not use it. My kid, if you're a parent, a young parent, my kid will not use it. Probably the TikTokers will use it. Probably the Swifties. Who knows? They are the ones that will probably use our products. So this is the key point that I wanted to bring you guys. Let's start building because this is to you and for you above all and think about them and don't think about your mother, parents, grandmother, whatever you think like this. And that's what I have to you today. Thank you. Thank you Nuno. We have time for some questions. Raise your hand if you would like to ask one. It can be about your mother. It can be about Nuno's mother. Don't ask me about my mother. You don't want to hear that. Yes, yes, yes. Come on. Last push. There is a break after this. Ask a question and then go out into the world knowing you've accomplished great things at this session. Question, question. Okay, while you're thinking... Everyone is angry, I think. I know, I know. While you're thinking, I'll ask one question of my own. So same as the one I asked before. If you want people to take one thing from your talk, go back to their normal life, what's the one thing you want them to take away? Mostly as a designer, I target designers as well, other builders that are thinking new products. Keep critical thinking on the way you approach. I think we take from granted everything from Web 2.0. And I've seen a lot of talks, even the one before us, that's always like, let's do Web 2.0 way of doing Web 3.0. And it's mostly one of the topics that I bring in my talks. Probably I'm a little bit older. Probably it's my generation. I've built something of Web 2.0 I was around. And we've did a lot of mistakes. And sometimes we are passing those mistakes to Web3 as well. So be critical. Think a little bit beyond just what is presented, what is easy to present to the user. And this is one example. If the maturity of our technology that we're building is going to be like three, five years in a row, like start building for the generation that will use it. Not the elders, not the ones that probably now have the funds to invest in you, something like that. Target the ones that probably now have the funds to invest in you, something like that. Target the ones that probably will use it. Those will be the next billion. That was the thing that brought me to the talk. Perfect. Thank you. Anyone has come up with another question in the meantime? Yes. Amazing. Oh, this will be hard. Okay, yeah, let's do it this way. Okay. hard can maybe okay yeah let's do it this way this is a microphone okay uh what do tiktokers want that's the real question that you all need to figure out that's that's not an easy but we kind of know what they what they look like we can profile them at this point and that's one of the key highlights like we know what they look like. We can profile them at this point. And that's one of the key highlights. We know how they use. We know the interactions. We know what engages them to probably use the product that you're building. We like profiling. As users, we kind of know them already. So we need to understand them better. All right. Another question? Nobody? Okay, on that case, we'll conclude this morning's session. Thank you so much for being here. Thank you, Nuno, again.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731559800000, - "slot_end": 1731560400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1phw7po5lIFL6aJaipzIR4HdBRmhdugA212mJKjaQfoc", - "resources_slides": null, + "slot_start": 1731410400000, + "slot_end": 1731411000000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1vPpKjEWNW5rkIevCpxSX6qLuE5usbq91oz2FVQk6gWw", + "resources_slides": "https://drive.google.com/file/d/1nKmQK56TC5kRva-1yCnmf5UqyHz8gjtS/view", "speakers": [ - "nuno-loureiro" + "jeff-emmett" ] }, "vector": [ @@ -527955,10 +526473,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -528729,6 +527247,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -528757,23 +527276,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -528820,6 +527322,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -528842,7 +527345,6 @@ 0, 0, 2, - 2, 0, 0, 0, @@ -528855,6 +527357,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -528904,6 +527407,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -528938,6 +527442,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -529058,6 +527563,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -529119,7 +527625,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -529129,6 +527634,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -529261,7 +527767,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -529271,64 +527776,67 @@ 0, 0, 0, + 2, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, 0, 0, - 2, 0, 0 ] }, { "session": { - "id": "mycofi-mycelial-design-patterns-for-web3-and-beyond", - "sourceId": "8CDPFC", - "title": "MycoFi: Mycelial Design Patterns for Web3 & Beyond", - "description": "Exploring MycoFi guides readers on an underground exploration into the world wise web of mycelial networks, the most prolific producers of public goods on Earth. This talk examines how the evolutionary adaptability of fungi could help us imagine biomimetic alternatives to status-quo economic systems that demand infinite growth on a finite planet. If we aim to design regenerative economies, what better\r\nplace to start than with the thriving evolutionary patterns of nature?", - "track": "Coordination", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Research", - "featured": false, + "id": "native-account-abstraction-in-pectra-rollups-and-beyond-combining-eof-eip-7702-and-rip-7560", + "sourceId": "7AWG3A", + "title": "Native Account Abstraction in Pectra, rollups and beyond: combining EOF, EIP-7702 and RIP-7560", + "description": "Account Abstraction has rightfully become one of the most discussed topics in the Ethereum ecosystem.\r\nThe upcoming Pectra upgrade is set to be the first one to improve EOAs by including EIP-7702.\r\nBut can EIP-7702 alone achieve \"Account Abstraction\"?\r\n\r\nWe will discuss the challenges and benefits of EIP-7702, and break down the team's vision for achieving \"complete\" Native Account Abstraction with RIP-7560/EIP-7701 and how it differs from ERC-4337 + EIP-7702.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Expert", + "audience": "Engineering", + "featured": true, "doNotRecord": false, "tags": [ - "Collective Intelligence", - "Conviction", - "Consensus Mechanisms", - "Civil Resistance", - "Sustainability", - "Public good", - "Regenerative Applications", - "Design Thinking", - "Civil Resistance", - "Collective Intelligence", - "Consensus Mechanisms", - "Conviction", - "Design Thinking", - "Public good", - "Regenerative Applications", - "Sustainability" + "In-protocol Account Abstraction", + "Rollups", + "Account Abstraction", + "eip-7702", + "Account Abstraction", + "In-protocol Account Abstraction", + "Rollups" ], "keywords": [ - "nope" + "Native Account Abstraction", + "RIP-7560", + "EIP-7702" ], - "duration": 544, + "duration": 1521, "language": "en", - "sources_swarmHash": "f520b12bc12e6e339bfff3be9b1d59b5019047a45c6c94f2fc1557b7e458af07", - "sources_youtubeId": "0A4jXL5eBaI", + "sources_swarmHash": "442776890274122c7546b739cfa176e5bc49f849f150eed1b8cc1acf78398aa2", + "sources_youtubeId": "FYanFF-yU6w", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673867c81b0f83434dd66c44", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673867c81b0f83434dd66c44.vtt", + "transcript_text": " . Hello everyone. My name is Alex. I work on account abstraction and today I will follow your talk with a deep dive into the future of native account abstraction and our plans for it. So for the purpose of this talk, I suggest that we all agree that we want native account abstraction, the way for account abstraction is to enshrine it in layer tools in the mainnet. And we need to answer the following questions before we go into it. First, we need to know which part of native account abstraction is already happening in the next Ethereum hard fork. We need to see why it is not enough and what is still missing for us to achieve the account abstraction endgame. I want to explain how we plan to achieve it and also explain how other companies and teams can participate in this effort and honestly look at the possible alternatives to doing what we are proposing. So, a quick recap of where we stand with account abstraction right now, if somebody was not involved into it. So, the first account abstraction proposal is ERC-4367. It solves account abstraction without making consensus changes on the Ethereum protocol. It allowed us, purely out of consensus, to provide account abstraction solution. And it did solve a vast majority of cases. And it has been released more than a year ago. The mainnet and the mempool have been launched. So it's no longer a new project. It has been used for a year by a very serious project. EAP-7702 is a very important proposal because it's the first time mainnet is getting some account abstraction features. This EAP allows, as you know, EOS to role-play for a time as smart accounts, and it's scheduled for the next hard fork. You have mentioned RIP 7560. This is our proposal to enshrine the design of ERC-437 as part of Layer 2 consensus, and it has been implemented, and there is a DevNet ready implementation. And now we are also proposing EAP 7701. This is somewhat similar to 7560, but it is trying to be less opinionated using less of the protocol parts and it targets Ethereum layer one, and it relies on EOF to do so. It's an early draft stage, and we request everybody to provide their feedback on it. So a little bit more on 7702. This is how the account looks for once you update it with 7702. So you still have the private key, but you also have smart contract code. So it changes the behavior of current EOAs, allows them to have code as well. And this fully solves the execution part of account abstraction. Your account can do multiple operations in the same time, do intends, whatever it is. It does not, however, solve the security part of account abstraction. Because you still have a private key, you still have an EOA, you still have 12 words that can override your smart contract wallet. And there also needs to be another EOA that creates a transaction to use your authorization. The upside of this is that it works great with ERC 4337. And such accounts can be part of account abstraction ecosystem and get gas abstraction and many other features. So one question you can ask is, great, so next we'll just wait for the rest of account abstraction to be enshrined in Ethereum mainnet. Well, if you look at the specs for the next three hard forks, this is a list of EAPs that are considered or scheduled for inclusion for the next three hard forks, this is a list of EAPs that are considered or scheduled for inclusion in the next three hard forks. This is a long list. These changes will take quite a lot of time. And so if we were to just wait out to introduce account abstraction on layer one, this could take a very long time. And also, it's a high bar to clear on layer one, this could take a very long time and also it's a high bar to clear in terms of production tested specifications, full spec roadmap, and it requires a unanimous consensus among core Ethereum developers to do such a feature. It doesn't mean that we will just wait for these things to happen. We have a lot of activity we can do on layer tools who are eager to innovate with account abstraction right now. Another alternative is just to keep using ERC 4337 forever. So can we keep using it? Well, kind of, yes. It's good enough in many cases, but it's very much not perfect. The main downside is it still relies on EOAs to act as bundlers. So you have an account abstraction solution, but you still need EOAs for that. We also create a lot of complexity by implementing a lot of parallel technical stacks, parallel mempools, parallel bundlers and modifications to the node. And as layer twos want to innovate, they start implementing their own native account abstraction solutions. There are chains who did that. And it is a problem because it breaks the compatibility. And also there are still new EAPs that introduce new features to Ethereum. One big example is inclusion list. You mentioned Fossil. And these EAPs don't benefit account obstruction users and account by the way. So let's zoom into a flow of a single user operation in ERC-437. So what happens is the user signs and creates a user operation, and the user has to provide it to a bundler server. The bundler server then collects other user operations and bundles them together and provides it as a transaction to the block builder, and it has to use this conditional API, and provides it as a transaction to the block builder, and it has to use this conditional API, meaning that it performs a validation, and he provides a condition for this transaction to be valid. And then the block builder can include this as a call on chain. With RIP 7560, we make all these superstructures that we had with the bundler and conditional API and entry point contract part of consensus protocol. So it very much simplifies and flattens out the complexity. And for the user, they sign this transaction and they broadcast it to the mempool and the protocol takes care of the rest. And the complexity becomes part of the protocol, but again, it's simplified. So how it works is that already now, like all transactions that we broadcast to Mempool and include on chain, they have a validation code. However, this code is not a solidity EVM code. It's a, you probably go code that a block builder have. We validate signatures, nonces, balances, gas limits, base fees, and these checks are done in Go as part of consensus, and then the execution is done on the EVM. With 7560, we split the transaction into two parts, and the validation part is also Solidity code that also runs on-chain, but it's still a single transaction that is split into the validation part and the execution part, and error in the validation part means that the transaction is not valid. It's not included on-chain and reverted. It just cannot be included in a block. So if you're familiar with ERC-437, the most complex possibly path for transaction to take is to have GAZ sponsoring and to deploy a contract, a smart contract, as part of the first transaction interaction. So these are all execution passes in account abstraction. And this is what it looks like in 7560, meaning that for a transaction type, we add a number of fields, and what happens during this transaction flow is first step, user creates a transaction, sends it to the block builder. As part of a transaction, smart contract gets deployed. Paymaster gets queried if it agrees to pay gas for this transaction. Then the account is queried to see if it accepts this transaction as valid, checks the signature and everything. Then the transaction gets actually executed and reaches the target contract. And if Paymaster wanted to, it gets also notified that transaction execution has finished. However, what needs to be said, RIP 7560 is not meant to be included in layer 2 in its current form. The RIP process itself was started for features that are common between various Layer 2s, but not necessarily target Layer 1. It provides us a lot of flexibility because we don't need unanimous agreement of all core developers. It's an opt-in process where rollups can decide to pick up features and implement it on the networks and it's very feasible and like logical for some RIPs who get adopted to evolve into EAPs. So what prevents 75-60 approach to being part of the mainnet EAP. Well, a huge part of it, it defines validation as solidity methods. Like we define solidity methods, and we say that this method has to return correctly, then the transaction is valid. It is a little problematic because EVM is supposed to be language agnostic, and methods are just part of solidity programming language. It's not such a big deal for layer 2s because almost all of them already have some kind of precompiles that are defined fully in solidity and they already do it. Another thing is that EOF, Ethereum EVM object format, introduces the deployment time code validation, and account abstraction could greatly benefit from code validation. However, without EOF definitions, we would not be able to do it with method selectors. And it can be a problem that your validation code is a part of your contract that can be called by other contracts in some scenarios that can lead to vulnerabilities. So a reminder, in EOF, the contract is being split into parts. So legacy contracts, they have the blob that includes all the code and data of your contract. With EOF, the contracts are split into the header, the code section, and the data section. What we are suggesting with EAP-77-01 is to also split the code section into parts that have roles assigned to them. So the contract would have an EOF validation code, execution code, any other code, and we can verify the code of the validation section before deploying such contracts. And this code doesn't have to be observable on-chain from within EOF, but it is still executed as part of transaction validation. So if you look again on all the flows, the flow remains exactly the same. It's just instead of calling specific functions, the EVM executes certain predefined sections of your EOF contracts. And this allows us to get away from these magic method selectors into a more mainnet-level solution. So now it's time to talk about challenges of account abstraction. People have been talking about it for 10 years. It's still not on mainnet. This is because it's actually hard. If you see somebody talking about validation scope on account abstraction you immediately think about this picture and the main problems for that account abstraction creates is the cross dependencies between transaction and validations and the complexity you get in building blocks efficiently and maintaining a decentralized peer-to-peer mempool efficiently. So let me try to explain these problems. So the cross-transactional dependencies look like that. You have transaction 4. It modifies the state, and it makes the transaction 5 invalid. So when you receive the transaction 5 individually, it seemed valid to you because A was equal to 0. But now you started building a block, you include transaction 4 first, and now A equals 1 and it's not a valid transaction. And in order to work around this issue in general, we just have to split the transaction into two parts and have the validations run separately from executions in their separate place in the block. So these are still three account abstraction transactions, but their validation parts are separated from them and they run before any execution code starts running. Now you may ask, but what if validations invalidate each other? What if the validation code changes the state that another validation uses? And in general, it would be possible. And in order to prevent that, we need to sandbox the validation code to prevent it from doing certain things that it should not be allowed to do. So what are those things? It's accessing other people's storage and accessing environment opcodes. So environment opcodes are block number, timestamp, base fee, everything that may change from validation and execution or between phases. And other people's code is code in other contracts unless it is in a mapping mapped to your address. And on layer 2s, it's also any stateful precompile. So doing this is illegal. All other things are allowed in validation. And it allows us to do many great things. You can use tokens, you can transfer tokens, you can do all of that in validation functions. There are other small complexities. One example is you don't need to invalidate a transaction to make it hard for a block builder to build a block with account abstraction. One good example is unused gas and using unused gas as a vector. For example, you're building a block, you include five transactions, you see that you still have a lot of available space because transaction four requested 10 million gas limit but didn't use it, so you start adding another transaction to your block. And what happens is, transaction four saw the change in the state and started using more gas. And now you are, like, you have a recursive,-and-egg problem because now the transaction 6 doesn't fit your block and you need to exclude it. And you can get to square one. We do solve it by introducing a gas charge on unused gas. But I'm just using it to showcase the kind of problems we need to solve when implementing account abstraction natively. And another thing is maintaining an efficient mempool to receive a huge number of transactions simultaneously and validate them. You need to parallelize their validation. So assume, like, here's a block builder. It has six CPU cores, and it's performing validation of six transactions in parallel. So it runs them individually, meaning they don't access each other state. And if it finds a transaction that is not valid individually, it gets included from block building. And the next step for the block builder is to validate all transactions that are remaining in its mempool and build a valid block. If we were not able to separate the scope of validation code in one of these transactions, what could happen is we could have a mass invalidation event. When one transaction changes some state and makes all other transactions in your block invalid. And it provides a huge DDoS vector for mempool participants and block builders. Because we don't want them propagating invalid blocks. Now, developers who are interested, especially if you're working on layer 2s, what can you do to make native account abstraction happen? First, do get in touch with us on any of our channels, Discord, Telegram, and let's talk about native account abstraction. Do just read and get familiar with either both RIP 7560 and EAP 7701. We are looking for feedback on it. And you can also dive into the code. This is a link to our reference implementation of RIP 7560. There is also a roll call event for the RIP process. I guess everybody knows it. Just join it, add RIP 7560 to the agenda and discuss it with other layers too. And let's start building it. So here are our websites and that's it for now. Thank you. Thank you so much, Alex. Let me start with questions. The first one is if 4.337 bundlers don't yet support the aggregation portion of the spec, how can we be ready to make it part of the protocol? Yeah, so aggregation is a part of 4.3.3.7. It will be part of native account obstruction eventually. We have a draft EAP for that. However, it's a complex topic in itself. The aggregated signatures are complex, and there has been little adoption of aggregation yet. It does provide additional challenges in the context of native account abstraction, but I think we will overcome them. And again, our approach is to make all these changes very, very modular. We don't want to make one mega account abstraction spec and implement it in one fork. We want to make basically as many EAPs as possible, reasonably, so the chains can adopt them meaningfully, but one by one. Next one, how critical and how centralized is the bundle? Are the bundled? Is the bundled? Can I see? Sorry. How critical and how centralized is the bundled? I guess the bundler. So in ERC-437, bundler is critical. Not very centralized if you use it correctly. So if you're using it as a mempool, that is not like... You don't depend on any centralized bundler. With 7560, the role of the bundler changes, it becomes more of an assistant to the block builder, and then it's as decentralized as the underlying network. There is no added decentralization vector on the bundlers. Does that make sense? Yeah, okay. Thank you. Next one, we have 433775607702. Can you describe how they all play nicely with each other and how can we avoid once again fragmentation in this space? In this case, with regard to account abstraction solutions. Right. So 7.7.0.2 does not pose any fragmentation challenges whatsoever because this is just a new feature of the EVM and I assume it will get supported pretty widely and it's also an addition to account abstraction roadmap. With 467 and 7560 we keep the user flow so similar that it can have very minimal friction in terms of fragmentation because if you have two accounts and their differences are so minimal, I assume just most wallets would support both and both of them will coexist peacefully for a period between now and when 7701 is implemented on mainnet. And so they are all still part of the same ecosystem, so it's not fragmented. It's just different flavors of the same thing, if that makes sense. What's your recommended roadmap for MetaMask to achieve the end goal of account obstruction? Yeah, so MetaMask and other wallets, UA wallets, it would be great if they started looking into account abstraction. They can start with 7702, with getting all users to also include some kind of code in their EOAs. It can be very simple, like recovery or gas sponsoring and everything, but crucially it will get a production experience of having and managing a fleet of smart contract wallets because up until now, most wallets have only been managing the private keys part and did not look at users' wallets as smart contracts. And after that, there is no reason for MetaMask not to use ERC4337 wallets. For people who are starting now, you probably don't need to steer them into using 7702 if they are newcomers. You can just get them to deploy an upgradable proxy smart contract wallet. And as long as it's an upgradeable proxy, they can evolve together with account abstraction, like versioning and all that. Right? What do you mean by other code, in brackets, section at 7701? Yeah, this is referring to this slide. I don't have slides. So you can have any number of code sections in an EOF account. So any code that does not have a role assigned to it, as we described it, is just an external code, so it can be called by, like, other contracts or other addresses can just call into this code and if the code is assigned a role you cannot call into this code it has to be executed as part of a eap 7701 transaction right it's uh so this is the difference between like other code and a code with an assigned purpose in terms of account obstruction. Now, the last question. What's the point of separating validation and execution in a theorem's account obstruction if conflicts can still arise during execution, even after validation under restricted rules? Yeah, great question. The problem is if the execution conflicts with each other, you can get a state you didn't initially want. For example, you can get a transaction to revert, but the transaction is still valid. And another thing is execution is not limited, not sandboxed. It can be a 20 million gas operation, and it can write all the storage in the world. Validation is limited and sandboxed. So we extract all the validations because we assume they are small functions, relatively, like I don't want to say pure, but clean functions that only access accounts on storage and if they were to collide that would make block invalid. The difference between the reverted transaction in block and in the valid block is the difference between a block builder being DDoSed and somebody having to redo his action.", "eventId": "devcon-7", - "slot_start": 1731410400000, - "slot_end": 1731411000000, + "slot_start": 1731562200000, + "slot_end": 1731564000000, "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1vPpKjEWNW5rkIevCpxSX6qLuE5usbq91oz2FVQk6gWw", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1sZ2P4U7wWwVav4ska4SCGMtylu-lx2sWw0ymD92gTtY", + "resources_slides": "https://drive.google.com/file/d/1f6xZ-3MnyS8zHxF7QZ4PxZY3KGHNTrkv/view", "speakers": [ - "jeff-emmett" + "alex-forshtat" ] }, "vector": [ @@ -529336,6 +527844,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -529343,7 +527852,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -530117,12 +528625,13 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -530130,6 +528639,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -530192,7 +528702,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -530214,7 +528723,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -530227,7 +528735,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -530277,7 +528784,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -530301,6 +528807,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -530312,7 +528819,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -530505,8 +529011,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -530649,8 +529153,6 @@ 0, 0, 2, - 0, - 0, 2, 0, 0, @@ -530663,52 +529165,48 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "native-account-abstraction-in-pectra-rollups-and-beyond-combining-eof-eip-7702-and-rip-7560", - "sourceId": "7AWG3A", - "title": "Native Account Abstraction in Pectra, rollups and beyond: combining EOF, EIP-7702 and RIP-7560", - "description": "Account Abstraction has rightfully become one of the most discussed topics in the Ethereum ecosystem.\r\nThe upcoming Pectra upgrade is set to be the first one to improve EOAs by including EIP-7702.\r\nBut can EIP-7702 alone achieve \"Account Abstraction\"?\r\n\r\nWe will discuss the challenges and benefits of EIP-7702, and break down the team's vision for achieving \"complete\" Native Account Abstraction with RIP-7560/EIP-7701 and how it differs from ERC-4337 + EIP-7702.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Expert", + "id": "native-implementation-of-ephemery-testnet-on-besu-and-teku-client-pairs", + "sourceId": "EAABPS", + "title": "Native Implementation of Ephemery Testnet on Besu & Teku Client Pairs", + "description": "This presentation covers the work done to enable native support for Ephemery on Teku and Besu clients. Ephemery is a short-lived, resettable testnet designed to automatically revert to its genesis state after a defined period, making it ideal for intensive, short-term testing without concerns over issues like insufficient testnet funds, inactive validators, or state bloat that long-running testnets often face.", + "track": "[CLS] EPF Day", + "type": "Lightning Talk", + "expertise": "Intermediate", "audience": "Engineering", - "featured": true, + "featured": false, "doNotRecord": false, "tags": [ - "In-protocol Account Abstraction", - "Rollups", - "Account Abstraction", - "eip-7702", - "Account Abstraction", - "In-protocol Account Abstraction", - "Rollups" + "Consensus", + "Developer Infrastructure", + "User Experience" ], "keywords": [ - "Native Account Abstraction", - "RIP-7560", - "EIP-7702" + "Client Development", + "Teku", + "Besu", + "Ephemery" ], - "duration": 1521, + "duration": 847, "language": "en", - "sources_swarmHash": "442776890274122c7546b739cfa176e5bc49f849f150eed1b8cc1acf78398aa2", - "sources_youtubeId": "FYanFF-yU6w", + "sources_swarmHash": "4be7f94081d144df637bc9d6a640778b80c300ec6a7e9d2689a3fc17055bfd24", + "sources_youtubeId": "Rpg9irPn3jI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673867c81b0f83434dd66c44", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673867c81b0f83434dd66c44.vtt", - "transcript_text": " . Hello everyone. My name is Alex. I work on account abstraction and today I will follow your talk with a deep dive into the future of native account abstraction and our plans for it. So for the purpose of this talk, I suggest that we all agree that we want native account abstraction, the way for account abstraction is to enshrine it in layer tools in the mainnet. And we need to answer the following questions before we go into it. First, we need to know which part of native account abstraction is already happening in the next Ethereum hard fork. We need to see why it is not enough and what is still missing for us to achieve the account abstraction endgame. I want to explain how we plan to achieve it and also explain how other companies and teams can participate in this effort and honestly look at the possible alternatives to doing what we are proposing. So, a quick recap of where we stand with account abstraction right now, if somebody was not involved into it. So, the first account abstraction proposal is ERC-4367. It solves account abstraction without making consensus changes on the Ethereum protocol. It allowed us, purely out of consensus, to provide account abstraction solution. And it did solve a vast majority of cases. And it has been released more than a year ago. The mainnet and the mempool have been launched. So it's no longer a new project. It has been used for a year by a very serious project. EAP-7702 is a very important proposal because it's the first time mainnet is getting some account abstraction features. This EAP allows, as you know, EOS to role-play for a time as smart accounts, and it's scheduled for the next hard fork. You have mentioned RIP 7560. This is our proposal to enshrine the design of ERC-437 as part of Layer 2 consensus, and it has been implemented, and there is a DevNet ready implementation. And now we are also proposing EAP 7701. This is somewhat similar to 7560, but it is trying to be less opinionated using less of the protocol parts and it targets Ethereum layer one, and it relies on EOF to do so. It's an early draft stage, and we request everybody to provide their feedback on it. So a little bit more on 7702. This is how the account looks for once you update it with 7702. So you still have the private key, but you also have smart contract code. So it changes the behavior of current EOAs, allows them to have code as well. And this fully solves the execution part of account abstraction. Your account can do multiple operations in the same time, do intends, whatever it is. It does not, however, solve the security part of account abstraction. Because you still have a private key, you still have an EOA, you still have 12 words that can override your smart contract wallet. And there also needs to be another EOA that creates a transaction to use your authorization. The upside of this is that it works great with ERC 4337. And such accounts can be part of account abstraction ecosystem and get gas abstraction and many other features. So one question you can ask is, great, so next we'll just wait for the rest of account abstraction to be enshrined in Ethereum mainnet. Well, if you look at the specs for the next three hard forks, this is a list of EAPs that are considered or scheduled for inclusion for the next three hard forks, this is a list of EAPs that are considered or scheduled for inclusion in the next three hard forks. This is a long list. These changes will take quite a lot of time. And so if we were to just wait out to introduce account abstraction on layer one, this could take a very long time. And also, it's a high bar to clear on layer one, this could take a very long time and also it's a high bar to clear in terms of production tested specifications, full spec roadmap, and it requires a unanimous consensus among core Ethereum developers to do such a feature. It doesn't mean that we will just wait for these things to happen. We have a lot of activity we can do on layer tools who are eager to innovate with account abstraction right now. Another alternative is just to keep using ERC 4337 forever. So can we keep using it? Well, kind of, yes. It's good enough in many cases, but it's very much not perfect. The main downside is it still relies on EOAs to act as bundlers. So you have an account abstraction solution, but you still need EOAs for that. We also create a lot of complexity by implementing a lot of parallel technical stacks, parallel mempools, parallel bundlers and modifications to the node. And as layer twos want to innovate, they start implementing their own native account abstraction solutions. There are chains who did that. And it is a problem because it breaks the compatibility. And also there are still new EAPs that introduce new features to Ethereum. One big example is inclusion list. You mentioned Fossil. And these EAPs don't benefit account obstruction users and account by the way. So let's zoom into a flow of a single user operation in ERC-437. So what happens is the user signs and creates a user operation, and the user has to provide it to a bundler server. The bundler server then collects other user operations and bundles them together and provides it as a transaction to the block builder, and it has to use this conditional API, and provides it as a transaction to the block builder, and it has to use this conditional API, meaning that it performs a validation, and he provides a condition for this transaction to be valid. And then the block builder can include this as a call on chain. With RIP 7560, we make all these superstructures that we had with the bundler and conditional API and entry point contract part of consensus protocol. So it very much simplifies and flattens out the complexity. And for the user, they sign this transaction and they broadcast it to the mempool and the protocol takes care of the rest. And the complexity becomes part of the protocol, but again, it's simplified. So how it works is that already now, like all transactions that we broadcast to Mempool and include on chain, they have a validation code. However, this code is not a solidity EVM code. It's a, you probably go code that a block builder have. We validate signatures, nonces, balances, gas limits, base fees, and these checks are done in Go as part of consensus, and then the execution is done on the EVM. With 7560, we split the transaction into two parts, and the validation part is also Solidity code that also runs on-chain, but it's still a single transaction that is split into the validation part and the execution part, and error in the validation part means that the transaction is not valid. It's not included on-chain and reverted. It just cannot be included in a block. So if you're familiar with ERC-437, the most complex possibly path for transaction to take is to have GAZ sponsoring and to deploy a contract, a smart contract, as part of the first transaction interaction. So these are all execution passes in account abstraction. And this is what it looks like in 7560, meaning that for a transaction type, we add a number of fields, and what happens during this transaction flow is first step, user creates a transaction, sends it to the block builder. As part of a transaction, smart contract gets deployed. Paymaster gets queried if it agrees to pay gas for this transaction. Then the account is queried to see if it accepts this transaction as valid, checks the signature and everything. Then the transaction gets actually executed and reaches the target contract. And if Paymaster wanted to, it gets also notified that transaction execution has finished. However, what needs to be said, RIP 7560 is not meant to be included in layer 2 in its current form. The RIP process itself was started for features that are common between various Layer 2s, but not necessarily target Layer 1. It provides us a lot of flexibility because we don't need unanimous agreement of all core developers. It's an opt-in process where rollups can decide to pick up features and implement it on the networks and it's very feasible and like logical for some RIPs who get adopted to evolve into EAPs. So what prevents 75-60 approach to being part of the mainnet EAP. Well, a huge part of it, it defines validation as solidity methods. Like we define solidity methods, and we say that this method has to return correctly, then the transaction is valid. It is a little problematic because EVM is supposed to be language agnostic, and methods are just part of solidity programming language. It's not such a big deal for layer 2s because almost all of them already have some kind of precompiles that are defined fully in solidity and they already do it. Another thing is that EOF, Ethereum EVM object format, introduces the deployment time code validation, and account abstraction could greatly benefit from code validation. However, without EOF definitions, we would not be able to do it with method selectors. And it can be a problem that your validation code is a part of your contract that can be called by other contracts in some scenarios that can lead to vulnerabilities. So a reminder, in EOF, the contract is being split into parts. So legacy contracts, they have the blob that includes all the code and data of your contract. With EOF, the contracts are split into the header, the code section, and the data section. What we are suggesting with EAP-77-01 is to also split the code section into parts that have roles assigned to them. So the contract would have an EOF validation code, execution code, any other code, and we can verify the code of the validation section before deploying such contracts. And this code doesn't have to be observable on-chain from within EOF, but it is still executed as part of transaction validation. So if you look again on all the flows, the flow remains exactly the same. It's just instead of calling specific functions, the EVM executes certain predefined sections of your EOF contracts. And this allows us to get away from these magic method selectors into a more mainnet-level solution. So now it's time to talk about challenges of account abstraction. People have been talking about it for 10 years. It's still not on mainnet. This is because it's actually hard. If you see somebody talking about validation scope on account abstraction you immediately think about this picture and the main problems for that account abstraction creates is the cross dependencies between transaction and validations and the complexity you get in building blocks efficiently and maintaining a decentralized peer-to-peer mempool efficiently. So let me try to explain these problems. So the cross-transactional dependencies look like that. You have transaction 4. It modifies the state, and it makes the transaction 5 invalid. So when you receive the transaction 5 individually, it seemed valid to you because A was equal to 0. But now you started building a block, you include transaction 4 first, and now A equals 1 and it's not a valid transaction. And in order to work around this issue in general, we just have to split the transaction into two parts and have the validations run separately from executions in their separate place in the block. So these are still three account abstraction transactions, but their validation parts are separated from them and they run before any execution code starts running. Now you may ask, but what if validations invalidate each other? What if the validation code changes the state that another validation uses? And in general, it would be possible. And in order to prevent that, we need to sandbox the validation code to prevent it from doing certain things that it should not be allowed to do. So what are those things? It's accessing other people's storage and accessing environment opcodes. So environment opcodes are block number, timestamp, base fee, everything that may change from validation and execution or between phases. And other people's code is code in other contracts unless it is in a mapping mapped to your address. And on layer 2s, it's also any stateful precompile. So doing this is illegal. All other things are allowed in validation. And it allows us to do many great things. You can use tokens, you can transfer tokens, you can do all of that in validation functions. There are other small complexities. One example is you don't need to invalidate a transaction to make it hard for a block builder to build a block with account abstraction. One good example is unused gas and using unused gas as a vector. For example, you're building a block, you include five transactions, you see that you still have a lot of available space because transaction four requested 10 million gas limit but didn't use it, so you start adding another transaction to your block. And what happens is, transaction four saw the change in the state and started using more gas. And now you are, like, you have a recursive,-and-egg problem because now the transaction 6 doesn't fit your block and you need to exclude it. And you can get to square one. We do solve it by introducing a gas charge on unused gas. But I'm just using it to showcase the kind of problems we need to solve when implementing account abstraction natively. And another thing is maintaining an efficient mempool to receive a huge number of transactions simultaneously and validate them. You need to parallelize their validation. So assume, like, here's a block builder. It has six CPU cores, and it's performing validation of six transactions in parallel. So it runs them individually, meaning they don't access each other state. And if it finds a transaction that is not valid individually, it gets included from block building. And the next step for the block builder is to validate all transactions that are remaining in its mempool and build a valid block. If we were not able to separate the scope of validation code in one of these transactions, what could happen is we could have a mass invalidation event. When one transaction changes some state and makes all other transactions in your block invalid. And it provides a huge DDoS vector for mempool participants and block builders. Because we don't want them propagating invalid blocks. Now, developers who are interested, especially if you're working on layer 2s, what can you do to make native account abstraction happen? First, do get in touch with us on any of our channels, Discord, Telegram, and let's talk about native account abstraction. Do just read and get familiar with either both RIP 7560 and EAP 7701. We are looking for feedback on it. And you can also dive into the code. This is a link to our reference implementation of RIP 7560. There is also a roll call event for the RIP process. I guess everybody knows it. Just join it, add RIP 7560 to the agenda and discuss it with other layers too. And let's start building it. So here are our websites and that's it for now. Thank you. Thank you so much, Alex. Let me start with questions. The first one is if 4.337 bundlers don't yet support the aggregation portion of the spec, how can we be ready to make it part of the protocol? Yeah, so aggregation is a part of 4.3.3.7. It will be part of native account obstruction eventually. We have a draft EAP for that. However, it's a complex topic in itself. The aggregated signatures are complex, and there has been little adoption of aggregation yet. It does provide additional challenges in the context of native account abstraction, but I think we will overcome them. And again, our approach is to make all these changes very, very modular. We don't want to make one mega account abstraction spec and implement it in one fork. We want to make basically as many EAPs as possible, reasonably, so the chains can adopt them meaningfully, but one by one. Next one, how critical and how centralized is the bundle? Are the bundled? Is the bundled? Can I see? Sorry. How critical and how centralized is the bundled? I guess the bundler. So in ERC-437, bundler is critical. Not very centralized if you use it correctly. So if you're using it as a mempool, that is not like... You don't depend on any centralized bundler. With 7560, the role of the bundler changes, it becomes more of an assistant to the block builder, and then it's as decentralized as the underlying network. There is no added decentralization vector on the bundlers. Does that make sense? Yeah, okay. Thank you. Next one, we have 433775607702. Can you describe how they all play nicely with each other and how can we avoid once again fragmentation in this space? In this case, with regard to account abstraction solutions. Right. So 7.7.0.2 does not pose any fragmentation challenges whatsoever because this is just a new feature of the EVM and I assume it will get supported pretty widely and it's also an addition to account abstraction roadmap. With 467 and 7560 we keep the user flow so similar that it can have very minimal friction in terms of fragmentation because if you have two accounts and their differences are so minimal, I assume just most wallets would support both and both of them will coexist peacefully for a period between now and when 7701 is implemented on mainnet. And so they are all still part of the same ecosystem, so it's not fragmented. It's just different flavors of the same thing, if that makes sense. What's your recommended roadmap for MetaMask to achieve the end goal of account obstruction? Yeah, so MetaMask and other wallets, UA wallets, it would be great if they started looking into account abstraction. They can start with 7702, with getting all users to also include some kind of code in their EOAs. It can be very simple, like recovery or gas sponsoring and everything, but crucially it will get a production experience of having and managing a fleet of smart contract wallets because up until now, most wallets have only been managing the private keys part and did not look at users' wallets as smart contracts. And after that, there is no reason for MetaMask not to use ERC4337 wallets. For people who are starting now, you probably don't need to steer them into using 7702 if they are newcomers. You can just get them to deploy an upgradable proxy smart contract wallet. And as long as it's an upgradeable proxy, they can evolve together with account abstraction, like versioning and all that. Right? What do you mean by other code, in brackets, section at 7701? Yeah, this is referring to this slide. I don't have slides. So you can have any number of code sections in an EOF account. So any code that does not have a role assigned to it, as we described it, is just an external code, so it can be called by, like, other contracts or other addresses can just call into this code and if the code is assigned a role you cannot call into this code it has to be executed as part of a eap 7701 transaction right it's uh so this is the difference between like other code and a code with an assigned purpose in terms of account obstruction. Now, the last question. What's the point of separating validation and execution in a theorem's account obstruction if conflicts can still arise during execution, even after validation under restricted rules? Yeah, great question. The problem is if the execution conflicts with each other, you can get a state you didn't initially want. For example, you can get a transaction to revert, but the transaction is still valid. And another thing is execution is not limited, not sandboxed. It can be a 20 million gas operation, and it can write all the storage in the world. Validation is limited and sandboxed. So we extract all the validations because we assume they are small functions, relatively, like I don't want to say pure, but clean functions that only access accounts on storage and if they were to collide that would make block invalid. The difference between the reverted transaction in block and in the valid block is the difference between a block builder being DDoSed and somebody having to redo his action.", + "sources_streamethId": "67347bee9dbb7a90e163940f", "eventId": "devcon-7", - "slot_start": 1731562200000, - "slot_end": 1731564000000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1sZ2P4U7wWwVav4ska4SCGMtylu-lx2sWw0ymD92gTtY", - "resources_slides": null, + "slot_start": 1731484800000, + "slot_end": 1731485700000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/18GeJQc_Z-ecQcsBsDbtRfawOuvBvptEDPby_7BDdqUU", + "resources_slides": "https://drive.google.com/file/d/1cgYKFUdZHK8ss3hDYnFg-G6oL5j3F0Fn/view", "speakers": [ - "alex-forshtat" + "glory-justin" ] }, "vector": [ @@ -530716,7 +529214,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -530728,6 +529225,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -531465,6 +529963,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -531475,6 +529974,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -531505,8 +530005,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -531682,10 +530180,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -531816,7 +530310,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -532026,11 +530519,11 @@ 0, 0, 0, + 2, 0, 0, 0, 2, - 2, 0, 0, 0, @@ -532048,42 +530541,47 @@ }, { "session": { - "id": "native-implementation-of-ephemery-testnet-on-besu-and-teku-client-pairs", - "sourceId": "EAABPS", - "title": "Native Implementation of Ephemery Testnet on Besu & Teku Client Pairs", - "description": "This presentation covers the work done to enable native support for Ephemery on Teku and Besu clients. Ephemery is a short-lived, resettable testnet designed to automatically revert to its genesis state after a defined period, making it ideal for intensive, short-term testing without concerns over issues like insufficient testnet funds, inactive validators, or state bloat that long-running testnets often face.", - "track": "[CLS] EPF Day", - "type": "Lightning Talk", + "id": "navigating-developer-liability-in-open-source-code", + "sourceId": "EXNLU9", + "title": "Navigating Developer Liability in Open-Source Code", + "description": "In software development, open-source code has become a cornerstone of innovation and collaboration. However, with this comes the issue of developer liability. As seen by the Tornado Cash case, developers and users can be held liable for how open-source code is used, showing the need for developers to be aware of, and navigate, the legal landscape to mitigate potential risks. This session will demystify the legal implications for developers contributing to and using open-source code projects.", + "track": "Coordination", + "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Consensus", - "Developer Infrastructure", - "User Experience" + "DevEx", + "Open Source Software", + "Regulation", + "developer", + "liability", + "DevEx", + "Open Source Software", + "Regulation" ], "keywords": [ - "Client Development", - "Teku", - "Besu", - "Ephemery" + "developer", + "liability" ], - "duration": 847, + "duration": 1396, "language": "en", - "sources_swarmHash": "4be7f94081d144df637bc9d6a640778b80c300ec6a7e9d2689a3fc17055bfd24", - "sources_youtubeId": "Rpg9irPn3jI", + "sources_swarmHash": "725daf103ad2d9e99dbbfb3874409fc3934d26162f1afaa8aacebb484caaaa25", + "sources_youtubeId": "APCQJY953Q8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67347bee9dbb7a90e163940f", + "sources_streamethId": "6736f97a74749a4b899f37a4", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731484800000, - "slot_end": 1731485700000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/18GeJQc_Z-ecQcsBsDbtRfawOuvBvptEDPby_7BDdqUU", - "resources_slides": null, + "slot_start": 1731651000000, + "slot_end": 1731652800000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1FCTkULbE1nJ5N4av3cRDnv1nW2exLL3rZv06S06zjGU", + "resources_slides": "https://drive.google.com/file/d/1TuD-E-NGpOXRS5IlB6qGqgdV4NEvHZ5G/view", "speakers": [ - "glory-justin" + "eva-wong" ] }, "vector": [ @@ -532098,10 +530596,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -532571,32 +531065,11 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -532843,7 +531316,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -532854,7 +531326,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -532960,6 +531431,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -532968,6 +531440,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -533035,6 +531508,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -533281,6 +531755,38 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -533385,23 +531891,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, 0, 0, 0, @@ -533411,6 +531900,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -533423,52 +531916,52 @@ }, { "session": { - "id": "navigating-developer-liability-in-open-source-code", - "sourceId": "EXNLU9", - "title": "Navigating Developer Liability in Open-Source Code", - "description": "In software development, open-source code has become a cornerstone of innovation and collaboration. However, with this comes the issue of developer liability. As seen by the Tornado Cash case, developers and users can be held liable for how open-source code is used, showing the need for developers to be aware of, and navigate, the legal landscape to mitigate potential risks. This session will demystify the legal implications for developers contributing to and using open-source code projects.", - "track": "Coordination", - "type": "Talk", + "id": "navigating-stablecoin-yields-and-risks", + "sourceId": "YT9SMK", + "title": "Navigating Stablecoin Yields and Risks", + "description": "This panel brings DeFi experts together to discuss stablecoin risks, including economic risks related to stabilisation methods, technical risks of smart contracts, and regulatory challenges. We will discuss solutions that can help mitigate risks in this rapidly evolving space and the challenges of promoting risk-driven decisions over trend-driven ones.", + "track": "Cryptoeconomics", + "type": "Panel", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "DevEx", - "Open Source Software", - "Regulation", - "developer", - "liability", - "DevEx", - "Open Source Software", - "Regulation" - ], "keywords": [ - "developer", - "liability" + "Stablecoin", + "DeFi" + ], + "tags": [ + "Frameworks", + "Best Practices", + "defi", + "Best Practices", + "Frameworks" ], - "duration": 1396, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "0162dccab9a5b86cc3b69290b4f0d831598fc81773c7ecd199c88267fcfe7814", + "sources_youtubeId": "AsJS4vv0J8Y", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736f97a74749a4b899f37a4", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", + "speakers": [ + "ariah-klages-mundt", + "tammy-yang", + "alessandro-buser", + "colin-platt" + ], "eventId": "devcon-7", - "slot_start": 1731651000000, - "slot_end": 1731652800000, + "slot_start": 1731488400000, + "slot_end": 1731492000000, "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1FCTkULbE1nJ5N4av3cRDnv1nW2exLL3rZv06S06zjGU", - "resources_slides": null, - "speakers": [ - "eva-wong" - ] + "resources_presentation": "https://docs.google.com/presentation/d/15OlMPy7qIjacZlozudJLl0FrCp0kPt_kx5nIRNHipwE", + "resources_slides": "" }, "vector": [ 0, 0, + 6, 0, 0, 0, @@ -533478,7 +531971,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -533953,11 +532445,9 @@ 0, 0, 6, - 0, - 0, - 0, - 0, - 0, + 6, + 6, + 6, 0, 0, 0, @@ -534316,9 +532806,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -534393,7 +532880,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -534412,6 +532898,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -534641,7 +533129,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -534785,9 +533272,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -534803,42 +533290,38 @@ }, { "session": { - "id": "navigating-stablecoin-yields-and-risks", - "sourceId": "YT9SMK", - "title": "Navigating Stablecoin Yields and Risks", - "description": "This panel brings DeFi experts together to discuss stablecoin risks, including economic risks related to stabilisation methods, technical risks of smart contracts, and regulatory challenges. We will discuss solutions that can help mitigate risks in this rapidly evolving space and the challenges of promoting risk-driven decisions over trend-driven ones.", - "track": "Cryptoeconomics", - "type": "Panel", + "id": "neuroai-for-ai-safety", + "sourceId": "ANUNJW", + "title": "NeuroAI for AI safety", + "description": "Powerful unaligned AIs pose risks to humans. This talk will explore how neuroscience-inspired AI–or NeuroAI–can lead to a deeper understanding of the human brain, and help us build more secure AI. I’ll connect these ideas to d/acc, arguing that neuroAI can play an enabling role in creating technologies that are inherently defense-favoring and promote human well-being.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Academic", "featured": false, "doNotRecord": false, + "tags": [], "keywords": [ - "Stablecoin", - "DeFi" - ], - "tags": [ - "Frameworks", - "Best Practices", - "defi", - "Best Practices", - "Frameworks" + "d/acc" ], + "duration": 476, "language": "en", - "speakers": [ - "ariah-klages-mundt", - "tammy-yang", - "alessandro-buser", - "colin-platt" - ], + "sources_swarmHash": "0f032086c3904fa7333685c2ecd4f5fe0ff0a750e1ffc2fea31c8898de4951fb", + "sources_youtubeId": "5Wm5rR_L32g", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673578c69dbb7a90e1b6afc1", "eventId": "devcon-7", - "slot_start": 1731488400000, - "slot_end": 1731492000000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/15OlMPy7qIjacZlozudJLl0FrCp0kPt_kx5nIRNHipwE" + "slot_start": 1731556680000, + "slot_end": 1731557160000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1c6dtMFBwrLngeeenxxoRO7mPchxToNPT-x38ox27h0o", + "resources_slides": "https://drive.google.com/file/d/1QJGFrKubZn_dDlpbpLCHtzpxjZ1CbkVn/view", + "speakers": [ + "patrick-mineault" + ] }, "vector": [ - 0, 0, 6, 0, @@ -535324,14 +533807,13 @@ 0, 0, 0, - 6, - 6, - 6, - 6, 0, 0, 0, 0, + 6, + 0, + 0, 0, 0, 0, @@ -535620,7 +534102,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -535694,7 +534175,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -535780,7 +534260,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -536156,7 +534635,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -536166,6 +534644,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -536174,36 +534653,37 @@ }, { "session": { - "id": "neuroai-for-ai-safety", - "sourceId": "ANUNJW", - "title": "NeuroAI for AI safety", - "description": "Powerful unaligned AIs pose risks to humans. This talk will explore how neuroscience-inspired AI–or NeuroAI–can lead to a deeper understanding of the human brain, and help us build more secure AI. I’ll connect these ideas to d/acc, arguing that neuroAI can play an enabling role in creating technologies that are inherently defense-favoring and promote human well-being.", + "id": "neurons-to-networks-whole-brain-emulation", + "sourceId": "ZMP7AG", + "title": "Neurons to Networks: Whole Brain Emulation", + "description": "The pursuit of whole brain emulation (WBE) represents one of humanity's most ambitious scientific endeavors, requiring unprecedented coordination between neuroscience, computer science, and institutional frameworks. This talk examines the evolving landscape of WBE research through the lens of institutional support mechanisms, with particular focus on the pioneering role of the Foresight Institute in fostering early discourse around brain emulation technologies (Fellowships, Prizes, Grants)", "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Academic", + "expertise": "Beginner", + "audience": "Research", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [ - "d/acc" + "keywords": [], + "tags": [ + "DeSci" ], - "duration": 476, "language": "en", - "sources_swarmHash": "0f032086c3904fa7333685c2ecd4f5fe0ff0a750e1ffc2fea31c8898de4951fb", - "sources_youtubeId": "5Wm5rR_L32g", + "sources_swarmHash": "2b833cbe82a2cdc4ecf891423a6de6042b4b61c5725a4c3152630f81f792fcf4", + "sources_youtubeId": "Q5o_IDX9H8U", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673578c69dbb7a90e1b6afc1", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "niamh" + ], "eventId": "devcon-7", - "slot_start": 1731556680000, - "slot_end": 1731557160000, + "slot_start": 1731555480000, + "slot_end": 1731555960000, "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1c6dtMFBwrLngeeenxxoRO7mPchxToNPT-x38ox27h0o", - "resources_slides": null, - "speakers": [ - "patrick-mineault" - ] + "resources_presentation": "https://docs.google.com/presentation/d/10jF6UddyGhtID4JHs8yJOU2RqS0OrJMOgHWiqU5tBx0", + "resources_slides": "https://drive.google.com/file/d/1IzxXQxixFVzZx9Y1fb_LHA7JiMFwX4Wc/view" }, "vector": [ 0, @@ -537088,10 +535568,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -537523,6 +536000,7 @@ 2, 0, 0, + 2, 0, 0, 0, @@ -537533,38 +536011,40 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0 ] }, { "session": { - "id": "neurons-to-networks-whole-brain-emulation", - "sourceId": "ZMP7AG", - "title": "Neurons to Networks: Whole Brain Emulation", - "description": "The pursuit of whole brain emulation (WBE) represents one of humanity's most ambitious scientific endeavors, requiring unprecedented coordination between neuroscience, computer science, and institutional frameworks. This talk examines the evolving landscape of WBE research through the lens of institutional support mechanisms, with particular focus on the pioneering role of the Foresight Institute in fostering early discourse around brain emulation technologies (Fellowships, Prizes, Grants)", + "id": "neurotech-humanitys-next-frontier", + "sourceId": "GMSXUV", + "title": "Neurotech - humanity’s next frontier", + "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Research", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, + "tags": [], "keywords": [], - "tags": [ - "DeSci" - ], + "duration": 617, "language": "en", - "speakers": [ - "niamh" - ], + "sources_swarmHash": "363f6e1847ca2d59e02168e9772ee69e6a04064044d084783d5277e0122d51ec", + "sources_youtubeId": "aM62xYINTx4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673574789dbb7a90e1a6c27d", "eventId": "devcon-7", - "slot_start": 1731555480000, - "slot_end": 1731555960000, + "slot_start": 1731555000000, + "slot_end": 1731555480000, "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/10jF6UddyGhtID4JHs8yJOU2RqS0OrJMOgHWiqU5tBx0" + "resources_presentation": "https://docs.google.com/presentation/d/17GDo2qkBsW9cNEfQVEMckFKyyYZZ0KEwY1Wo37pv0iM", + "resources_slides": "", + "speakers": [ + "juan-benet" + ] }, "vector": [ 0, @@ -538452,27 +536932,21 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -538898,14 +537372,15 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "neurotech-humanitys-next-frontier", - "sourceId": "GMSXUV", - "title": "Neurotech - humanity’s next frontier", + "id": "neurotechnology-opportunities-and-challenges", + "sourceId": "EJZNQX", + "title": "Neurotechnology: Opportunities and Challenges", "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", "type": "Lightning Talk", @@ -538915,21 +537390,23 @@ "doNotRecord": false, "tags": [], "keywords": [], - "duration": 617, + "duration": 709, "language": "en", - "sources_swarmHash": "363f6e1847ca2d59e02168e9772ee69e6a04064044d084783d5277e0122d51ec", - "sources_youtubeId": "aM62xYINTx4", + "sources_swarmHash": "852eaee2e37a13f88b9cbb6db73a2435d4ae6ac9829466f8639a54b6942172fc", + "sources_youtubeId": "Q5o_IDX9H8U", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673574789dbb7a90e1a6c27d", + "sources_streamethId": "673575df9dbb7a90e1ac4199", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673575df9dbb7a90e1ac4199.vtt", + "transcript_text": " Because you've had three talks so far. Okay, cool. Right, so kia ora, my name is Niamh, I am from Foresight Institute, I'm Chief of Innovation there, and I care a lot about whole brain emulation. So we are a non-profit in San Francisco, we're a 501c3 and we were co-founded by Eric Drexler who's the godfather of nanotechnology and Christine Pedersen who coined the term open source. We're 40 years old, we're currently an all-woman team and we're looking to accelerate whole brain emulation. So we have five key focus areas, nanotech, neurotech, biotech, space tech, and computation with an overarching track called existential hope, in which we hope to build out flourishing futures. As chief of innovation and strategy, I oversee the fellowships, the prizes, and the grants, and I also build out future programs, two of which we will be discussing here today. One is a lo-fi whole brain emulation mouse brain prize, which we'll be launching next year, as well as our whole brain emulation fast grants and grand prize, because we are looking to accelerate this field dramatically. So what is whole brain emulation? You've heard about it from Juan, but essentially it's the uploading of the brain onto the platform. Why do we want to do this? Because with the, you know, short timelines, we think that this is a really great way that we might be able to coordinate and cooperate rather than just, yeah, evolving essentially with AIs, which is what we want to do. We also know that neurotech is super cool and that it's going to help us with modern day science and medicine, cognitive enhancement, space tech, who doesn't want to jump in a spaceship right now and travel two million light years away? I do. I think it's kind of sad that I'm limited to my own physical being right now. And I think also that whole brain emulation is possibly going to help us with better understanding consciousness. Obviously, just like whole brain emulation is possibly going to help us with better understanding consciousness. Obviously, just like whole brain emulation has never been done before, the human brain has never been mapped before, consciousness itself has also never really been defined properly. So we've got a lot to cover, and this is why it really is the new frontier. So I joined Foresight two years ago. One of the first projects that I really built out was our first whole brain emulation workshop. This here was a pretty big deal because it was the very first in the world. And what we did is we brought together the original authors of the roadmap and asked them alongside AI alignment researchers and neuroscientists, like, why is this stagnated? The report was written 20 years ago. What's happened? Is this stagnated? The report was written 20 years ago, what's happened? Is this still relevant? And what bottlenecks do we need to overcome? The biggest thing for them was money. And it was also because money is really hard to get when there's a lot of stigma involved with whole brain emulation. It's kind of seen as a little bit weird, a little bit icky, even though as we've just listed, there's a lot of benefits to actually exploring this. And so that's what I can do and that's how I can help them. As you can see here, it's blown up at Foresight. Our community is like three times bigger than it has been and just keeps growing. We actually had to reject a bunch of people which is pretty sad to do from a workshop. We are already giving out with our AI safety grants money to help people that are working in our neurotech for AI, so safer AI with neurotech. These are some of our grantees. I don't know if you haven't read yet, but Mark posted on LessWrong. It's very, very good, so please take the time to read it. And this is Catalin's first whole brain emulation wet lab for mouse brains. It's a pretty big deal because it's the very first in Europe, and we help fund that with that. So we are doing all that we can to progress things, and we're building out as we go. Why is our community worth somewhat supporting? Because they're pretty cool, and they're pretty good at what they do, and they're all radical moonshots. shots. So we've now had two Nobel Prize winners come out of our Feynman Prizes. As you can see, we've got Sir Fraser Stott out here. He submitted an essay, the exact same essay nine years later went on to win a Nobel Prize. And then this year, we also had Nick Baker win a Nobel Prize with the same work that he submitted 20 years earlier. So a little bit more of a long game there, but we're with him all the way. And as you might see, Jean Herbet, who's a pretty big deal in longevity, the Amaranth Foundation named his work One of the Cornerstones, and it's about essentially replacement therapies for ageing brain tissue. So we're all on the cusp there. Great fellows, including some of our core community. I see Michael Andregs in the audience. He's working on hi-fi approaches to mouse brains at the moment. He's very good at hardware, so, you know, chat to him if you're interested. Please do. I've got some incredible fellows. This guy here, Akash, is working in India using wearables to better understand scent, which is pretty unmapped at the moment. But also these BCIs are directly capturing EEG data and training AI so that we have better diagnostic tooling for cancer, which is pretty cool. Samner Norman is working on ultrasound for BCIs with his technology and his FRO. Not only has he mapped the human brain as best as it has been yet with ultrasound, he's also managed to turn ultrasound, which is normally like the size of a fridge, into a teeny tiny chip, which is this size. And two weeks ago, I stood in my living room with this chip and found my own heart valve beating on my own TV screen with it, which is pretty huge. I mean, this is kind of what happens when you start exploring things, because you start seeing how innovation can just like fly, and that's pretty great for humanity. So there are a lot of enabling technologies already pushing toward whole brain emulation. If you haven't seen yet, we've just mapped the fruit fly brain for the first time. That's 138,000 neurons, which is a pretty big deal. You should watch the video on science if you haven't already. So that's why we think it's possible. We are launching next year a mouse brain lo-fi emulation prize. That will be a million dollars. A, that incentivises researchers to start working in this field. It also just helps progress things all together. We think the timeline for this to be one is two to three years, but actually, simply through positive competition, this might be one sooner than that, which would be even greater because when we're looking at shortening timelines, we need to do all that we can to find progress, and so that's what's happening there. We are hoping to launch a whole-brain emulation fast grants and grand prize. So similar to Metaculous grants and the COVID fast grants, we know that we can stimulate an industry this way. We can help people with their pre-R&D proofs so that they can then go to the VCs with the proof and actually get the money to build it out. But we need a little bit of help first. And so that's where we're hoping to lean in and be able to actually create space for this. And obviously, like, $20 million is, like, the minimum that we're working with right now. If we can grow that further, then we can obviously, like, help more people and accelerate this even further. So that's, like, a big deal for us. Also, a $20 million prize for whole brain emulation of a human brain is definitely going to get people up in Adam. That's what we think. So come chat to me because I know that we're pretty short on time. Sorry if I went a little bit long. If you'd like to know more about what we're doing and also want to get involved with whole brain emulation. There are a lot of risks but we do want to do it the right way and we think we have the community and the means to be that. Yeah, thank you. Thank you, Neve. Again, if you'd like to go outside and have anyone ask questions, please do. Next up, we have our first remote speaker of the day, Milan Czikovic, he's building a startup in Neurotech right now, so can't be with us today, but I would love to bring Milind onto the screen. We'll be ready to go. Thanks so much, Janine. Let's see, can I be heard? I heard a yes. Okay, excellent. Awesome. Well, I'm very sad I couldn't be there in person. The pictures have been absolutely amazing that I've been seeing, but it's an absolute pleasure to be here virtually. My name, as Janine said, is Milan Svitkovich. I'm the co-founder of Integral, a startup based in San Francisco, building deep brain implants to treat severe neurological and psychiatric disease. But I'm not here today on behalf of my company, but rather as an advocate for the entire space of neurotechnology development. I'm often asked to talk about why I think neurotechnology is so important, but you all are very lucky that you have already heard Juan and have Patrick later in this session who are making that case much more compellingly than I'm sure that I would. So I will just reiterate that I think neurotechnology is indeed a critical technology to ensure humanity's successful transition to an AI world. But moreover, even in a world where I didn't exist, I would contend that neurotechnology is definitionally the most transformative sector of technological progress for enhancing the human experience and improving human well-being in as much as the definitions of experience and well-being are that they are things produced by brains. Instead, though, I was asked to talk about opportunities and challenges. So let's get into that, and we'll see if my AV works. Did that slide change? I think that's a yes. All right. So those who aren't familiar, let me just give a far too brief and absolutely non-exhaustive context for the state of the field of neurotechnology as of 2024. So a neurotechnology is any tool that directly exogenously, so not thinking, but somehow externally observes or manipulates the state of biological nervous systems, and in particular the human brain is often the one we care about, although animal communication is obviously an exciting prospect. The definition that I just gave there encompasses a huge range of technologies and also goals that we might want to achieve with them. With respect to the many goals, Juan already went over some of this. And to be clear, I'm not intending anyone to be able to read the list on the left. I made it comically small to make a point, which is one of the actually kind of biggest opportunities and or challenges in the space, which is unlike a field like longevity or climate technology, where at least at a high level, I think there's some clarity and agreement on what the goal of the field is. Neurotechnology is a little bit more like the early days of the internet or speaking outside my expertise compared to those listening for sure. But what I imagined in the early days of the Ethereum ecosystem were like, which is everyone was really excited about building this platform, this fundamental technology, but it wasn't clear at the outset what all the killer use cases would be or even what would be possible or even what will be desirable. And neurotechnology is a little bit like that. It's something of a marketing problem for the field. We can talk about a lot of exciting use cases, but it doesn't pull the imagination to any one vision. So that's both an opportunity and a challenge, depending on how you look at things. On the right hand side, there are a number of existing and emerging technologies that we can use for observing and manipulating the brain. I'll just highlight some important ones and really not go into too much detail here at all. There's not time.", "eventId": "devcon-7", - "slot_start": 1731555000000, - "slot_end": 1731555480000, + "slot_start": 1731555960000, + "slot_end": 1731556680000, "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/17GDo2qkBsW9cNEfQVEMckFKyyYZZ0KEwY1Wo37pv0iM", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1SnBZ54kfiM59nvSu08JQKHZSM7N-GjLiVE_3-95veIw", + "resources_slides": "https://drive.google.com/file/d/1zrzBFrutxefs6lbXLtltvt8vEzrjlv3N/view", "speakers": [ - "juan-benet" + "milan-cvitkovic" ] }, "vector": [ @@ -540243,11 +538720,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -540269,41 +538741,49 @@ }, { "session": { - "id": "neurotechnology-opportunities-and-challenges", - "sourceId": "EJZNQX", - "title": "Neurotechnology: Opportunities and Challenges", - "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "", - "audience": "Engineering", + "id": "next-generation-amms-eliminating-lvr", + "sourceId": "8DCP9K", + "title": "Next Generation AMMs - Eliminating LVR", + "description": "Loss-Versus-Rebalancing (LVR) is the most significant form of MEV, yet it has the fewest solutions addressing it. LVR remains a significant challenge for AMMs. This session delves into a comprehensive analysis of how CoW AMM addresses the problem of LVR through its unique batch mechanism. Drawing from 9 months of empirical data, the talk will explore the effectiveness of CoW AMM in mitigating LVR and offer insights into the impact of LVR resistant design on trading outcomes and market efficiency", + "track": "Cryptoeconomics", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 709, + "tags": [ + "MEV", + "AMMs", + "lvr", + "AMMs", + "MEV" + ], + "keywords": [ + "LVR" + ], + "duration": 1516, "language": "en", - "sources_swarmHash": "852eaee2e37a13f88b9cbb6db73a2435d4ae6ac9829466f8639a54b6942172fc", - "sources_youtubeId": "Q5o_IDX9H8U", + "sources_swarmHash": "0f34f3bc88fee6ea2aa68cadc3132cdc7973a7cd04c404abb8c9b80574351b71", + "sources_youtubeId": "hOrDqlGcmJE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673575df9dbb7a90e1ac4199", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673575df9dbb7a90e1ac4199.vtt", - "transcript_text": " Because you've had three talks so far. Okay, cool. Right, so kia ora, my name is Niamh, I am from Foresight Institute, I'm Chief of Innovation there, and I care a lot about whole brain emulation. So we are a non-profit in San Francisco, we're a 501c3 and we were co-founded by Eric Drexler who's the godfather of nanotechnology and Christine Pedersen who coined the term open source. We're 40 years old, we're currently an all-woman team and we're looking to accelerate whole brain emulation. So we have five key focus areas, nanotech, neurotech, biotech, space tech, and computation with an overarching track called existential hope, in which we hope to build out flourishing futures. As chief of innovation and strategy, I oversee the fellowships, the prizes, and the grants, and I also build out future programs, two of which we will be discussing here today. One is a lo-fi whole brain emulation mouse brain prize, which we'll be launching next year, as well as our whole brain emulation fast grants and grand prize, because we are looking to accelerate this field dramatically. So what is whole brain emulation? You've heard about it from Juan, but essentially it's the uploading of the brain onto the platform. Why do we want to do this? Because with the, you know, short timelines, we think that this is a really great way that we might be able to coordinate and cooperate rather than just, yeah, evolving essentially with AIs, which is what we want to do. We also know that neurotech is super cool and that it's going to help us with modern day science and medicine, cognitive enhancement, space tech, who doesn't want to jump in a spaceship right now and travel two million light years away? I do. I think it's kind of sad that I'm limited to my own physical being right now. And I think also that whole brain emulation is possibly going to help us with better understanding consciousness. Obviously, just like whole brain emulation is possibly going to help us with better understanding consciousness. Obviously, just like whole brain emulation has never been done before, the human brain has never been mapped before, consciousness itself has also never really been defined properly. So we've got a lot to cover, and this is why it really is the new frontier. So I joined Foresight two years ago. One of the first projects that I really built out was our first whole brain emulation workshop. This here was a pretty big deal because it was the very first in the world. And what we did is we brought together the original authors of the roadmap and asked them alongside AI alignment researchers and neuroscientists, like, why is this stagnated? The report was written 20 years ago. What's happened? Is this stagnated? The report was written 20 years ago, what's happened? Is this still relevant? And what bottlenecks do we need to overcome? The biggest thing for them was money. And it was also because money is really hard to get when there's a lot of stigma involved with whole brain emulation. It's kind of seen as a little bit weird, a little bit icky, even though as we've just listed, there's a lot of benefits to actually exploring this. And so that's what I can do and that's how I can help them. As you can see here, it's blown up at Foresight. Our community is like three times bigger than it has been and just keeps growing. We actually had to reject a bunch of people which is pretty sad to do from a workshop. We are already giving out with our AI safety grants money to help people that are working in our neurotech for AI, so safer AI with neurotech. These are some of our grantees. I don't know if you haven't read yet, but Mark posted on LessWrong. It's very, very good, so please take the time to read it. And this is Catalin's first whole brain emulation wet lab for mouse brains. It's a pretty big deal because it's the very first in Europe, and we help fund that with that. So we are doing all that we can to progress things, and we're building out as we go. Why is our community worth somewhat supporting? Because they're pretty cool, and they're pretty good at what they do, and they're all radical moonshots. shots. So we've now had two Nobel Prize winners come out of our Feynman Prizes. As you can see, we've got Sir Fraser Stott out here. He submitted an essay, the exact same essay nine years later went on to win a Nobel Prize. And then this year, we also had Nick Baker win a Nobel Prize with the same work that he submitted 20 years earlier. So a little bit more of a long game there, but we're with him all the way. And as you might see, Jean Herbet, who's a pretty big deal in longevity, the Amaranth Foundation named his work One of the Cornerstones, and it's about essentially replacement therapies for ageing brain tissue. So we're all on the cusp there. Great fellows, including some of our core community. I see Michael Andregs in the audience. He's working on hi-fi approaches to mouse brains at the moment. He's very good at hardware, so, you know, chat to him if you're interested. Please do. I've got some incredible fellows. This guy here, Akash, is working in India using wearables to better understand scent, which is pretty unmapped at the moment. But also these BCIs are directly capturing EEG data and training AI so that we have better diagnostic tooling for cancer, which is pretty cool. Samner Norman is working on ultrasound for BCIs with his technology and his FRO. Not only has he mapped the human brain as best as it has been yet with ultrasound, he's also managed to turn ultrasound, which is normally like the size of a fridge, into a teeny tiny chip, which is this size. And two weeks ago, I stood in my living room with this chip and found my own heart valve beating on my own TV screen with it, which is pretty huge. I mean, this is kind of what happens when you start exploring things, because you start seeing how innovation can just like fly, and that's pretty great for humanity. So there are a lot of enabling technologies already pushing toward whole brain emulation. If you haven't seen yet, we've just mapped the fruit fly brain for the first time. That's 138,000 neurons, which is a pretty big deal. You should watch the video on science if you haven't already. So that's why we think it's possible. We are launching next year a mouse brain lo-fi emulation prize. That will be a million dollars. A, that incentivises researchers to start working in this field. It also just helps progress things all together. We think the timeline for this to be one is two to three years, but actually, simply through positive competition, this might be one sooner than that, which would be even greater because when we're looking at shortening timelines, we need to do all that we can to find progress, and so that's what's happening there. We are hoping to launch a whole-brain emulation fast grants and grand prize. So similar to Metaculous grants and the COVID fast grants, we know that we can stimulate an industry this way. We can help people with their pre-R&D proofs so that they can then go to the VCs with the proof and actually get the money to build it out. But we need a little bit of help first. And so that's where we're hoping to lean in and be able to actually create space for this. And obviously, like, $20 million is, like, the minimum that we're working with right now. If we can grow that further, then we can obviously, like, help more people and accelerate this even further. So that's, like, a big deal for us. Also, a $20 million prize for whole brain emulation of a human brain is definitely going to get people up in Adam. That's what we think. So come chat to me because I know that we're pretty short on time. Sorry if I went a little bit long. If you'd like to know more about what we're doing and also want to get involved with whole brain emulation. There are a lot of risks but we do want to do it the right way and we think we have the community and the means to be that. Yeah, thank you. Thank you, Neve. Again, if you'd like to go outside and have anyone ask questions, please do. Next up, we have our first remote speaker of the day, Milan Czikovic, he's building a startup in Neurotech right now, so can't be with us today, but I would love to bring Milind onto the screen. We'll be ready to go. Thanks so much, Janine. Let's see, can I be heard? I heard a yes. Okay, excellent. Awesome. Well, I'm very sad I couldn't be there in person. The pictures have been absolutely amazing that I've been seeing, but it's an absolute pleasure to be here virtually. My name, as Janine said, is Milan Svitkovich. I'm the co-founder of Integral, a startup based in San Francisco, building deep brain implants to treat severe neurological and psychiatric disease. But I'm not here today on behalf of my company, but rather as an advocate for the entire space of neurotechnology development. I'm often asked to talk about why I think neurotechnology is so important, but you all are very lucky that you have already heard Juan and have Patrick later in this session who are making that case much more compellingly than I'm sure that I would. So I will just reiterate that I think neurotechnology is indeed a critical technology to ensure humanity's successful transition to an AI world. But moreover, even in a world where I didn't exist, I would contend that neurotechnology is definitionally the most transformative sector of technological progress for enhancing the human experience and improving human well-being in as much as the definitions of experience and well-being are that they are things produced by brains. Instead, though, I was asked to talk about opportunities and challenges. So let's get into that, and we'll see if my AV works. Did that slide change? I think that's a yes. All right. So those who aren't familiar, let me just give a far too brief and absolutely non-exhaustive context for the state of the field of neurotechnology as of 2024. So a neurotechnology is any tool that directly exogenously, so not thinking, but somehow externally observes or manipulates the state of biological nervous systems, and in particular the human brain is often the one we care about, although animal communication is obviously an exciting prospect. The definition that I just gave there encompasses a huge range of technologies and also goals that we might want to achieve with them. With respect to the many goals, Juan already went over some of this. And to be clear, I'm not intending anyone to be able to read the list on the left. I made it comically small to make a point, which is one of the actually kind of biggest opportunities and or challenges in the space, which is unlike a field like longevity or climate technology, where at least at a high level, I think there's some clarity and agreement on what the goal of the field is. Neurotechnology is a little bit more like the early days of the internet or speaking outside my expertise compared to those listening for sure. But what I imagined in the early days of the Ethereum ecosystem were like, which is everyone was really excited about building this platform, this fundamental technology, but it wasn't clear at the outset what all the killer use cases would be or even what would be possible or even what will be desirable. And neurotechnology is a little bit like that. It's something of a marketing problem for the field. We can talk about a lot of exciting use cases, but it doesn't pull the imagination to any one vision. So that's both an opportunity and a challenge, depending on how you look at things. On the right hand side, there are a number of existing and emerging technologies that we can use for observing and manipulating the brain. I'll just highlight some important ones and really not go into too much detail here at all. There's not time.", + "sources_streamethId": "6735a84e9dbb7a90e155c38e", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731555960000, - "slot_end": 1731556680000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1SnBZ54kfiM59nvSu08JQKHZSM7N-GjLiVE_3-95veIw", - "resources_slides": null, + "slot_start": 1731564000000, + "slot_end": 1731565800000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1Zivx1-urETlnczibMYsiNyH4-ey3zg3vSAD7YDHJeJk", + "resources_slides": "https://drive.google.com/file/d/1-wvMOXn0NNLpn1rX7wXgANIWlzn52dDP/view", "speakers": [ - "milan-cvitkovic" + "anna-george" ] }, "vector": [ 0, - 6, 0, + 6, 0, 0, 0, @@ -541051,6 +539531,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -541131,6 +539612,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -541470,6 +539952,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -541607,18 +540090,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, 2, 0, 0, @@ -541631,60 +540107,59 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "next-generation-amms-eliminating-lvr", - "sourceId": "8DCP9K", - "title": "Next Generation AMMs - Eliminating LVR", - "description": "Loss-Versus-Rebalancing (LVR) is the most significant form of MEV, yet it has the fewest solutions addressing it. LVR remains a significant challenge for AMMs. This session delves into a comprehensive analysis of how CoW AMM addresses the problem of LVR through its unique batch mechanism. Drawing from 9 months of empirical data, the talk will explore the effectiveness of CoW AMM in mitigating LVR and offer insights into the impact of LVR resistant design on trading outcomes and market efficiency", - "track": "Cryptoeconomics", + "id": "next-generation-based-rollups-a-practical-approach-to-unifying-ethereum", + "sourceId": "GHVK8E", + "title": "Next Generation Based Rollups: A Practical Approach to Unifying Ethereum", + "description": "I plan to speak on the concept of based sequencing (based rollups). I want to not only introduce the concept but also explain recent developments (what I like to call next generation based rollups). This includes based preconfirmations, fast->realtime proving, customizable composability, practical synchronous composability, among others. I will introduce I also plan to provide a brief summary to my Bankless Summit talk on ETH value accrual in the presence of based rollups.", + "track": "Layer 2", "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "MEV", - "AMMs", - "lvr", - "AMMs", - "MEV" + "Fragmentation", + "Frameworks", + "Layer 2s" ], "keywords": [ - "LVR" + "based rollups", + "sequencing", + "composability" ], - "duration": 1516, + "duration": 1378, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "ec4e2a052bdc5fc188946c250ffacdba021eaa61b2efc79b21737c18ae7d1f3a", + "sources_youtubeId": "Ier_f5V4_ow", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735a84e9dbb7a90e155c38e", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "673839411b0f83434dfbaecb", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673839411b0f83434dfbaecb.vtt", + "transcript_text": " . Hello, DevCon. All right, so we're going to talk about next generation base rollups today. There are some base rollups that are next generation. There are some base rollups that are last generation. You guys just learned about some. I'm going to tell you more about how you can become a next generation base rollup, kind of what the definition is, and then a little bit more about how I see the future of based rollups evolving. Cool, so this first section we're just going to go through some of the traditional based rollup research, talk about the the based rollups that have been, you know, explored in the past, some of the designs have been proposed, and then we're gonna understand some of their flaws, some of their weaknesses. So one of the very first designs that is used by Tycho today on mainnets is total anarchy based sequencing. This is like kind of letter of the law, exactly what Justin Drake proposed in last year, right? And it's very simple, very basic, right? It's called total anarchy because it uses a election system that is permissionless and allows anyone, that's the anarchy bit, to propose a block. So one key point here is that the sequencer is not elected beforehand. You don't know who the sequencer will be beforehand. You might know the Ethereum proposer because you can look at the Ethereum look ahead, we know 32 in advance, but the sequencer of the base roll-up might not be the proposer. In Tyco's case, it is Tyco Labs for the majority of blocks. One other thing to note is that layer 2 block proposing is completely permissionless, right? There's no permissions involved. The way that this ends up playing out in practice is that we have this is we have a bunch of proposers in Tycho's system that give their blocks to the builder, that would be Titan, Wintermute, et cetera. And they would take these blocks, order them in the layer one block, each block proposal is a layer one transaction, and then send this completed block to the proposer through a relay and through the MevBoost auction, right? And then eventually the proposer would propose this to Ethereum once they've selected the block with the highest bid. So it's up to the proposer and the builder to choose which block created by these Layer 2 sequencers or proposers actually gets onto the chain. Now, one design that aims to solve a few of the problems of the previous design is vanilla-based sequencing. So Limechain, George Spasov put out this excellent bit of research that explained how we can actually solve a few of the foundational problems with total anarchy-based sequencing, the TECO model. So the first thing that we try with vanilla-based sequencing is a simple sequencer election, right? That gives us the ability to choose a sequencer based on some rule, have a primary rule, and then we have a fallback as well. So the fallback in vanilla-based sequencing case is just total anarchy. So there's still some total anarchy here. And the primary sequencing is based on some election mechanism that is not clearly defined and kind of left to implementation in the initial research. So the other thing that vanilla-based sequencing includes is kind of very basic primitive support for delegation, which is allowing the Ethereum proposer to give the rights to their base drawout block to an external builder without directly going through MevBoost. It's like external to the MevBoost auction. Vanilla-based sequencing also included basic support for pre-confirmations, which gives the, you know, we heard about it last talk, but it gives the proposer the ability to promise things about the block that they will eventually propose. It does require some sort of delegation or some increased hardware constraints on the proposer. And so the design for pre-confirmations has kind of wide-reaching implications for centralization vectors for stake and also for pre-confirmations in real time. Now, the other thing that vanilla-based sequencing introduced that really caught my eye was revenue generation from a percent of congestion fees. And you'll hear Justin Drake talk about this all the time. You'll hear me talk about this. Base rollups can capture revenue from congestion trivially, right? These base fees, if you're doing EIP-1559 EVM, right, they are represented on-chain and are kind of understandable from within the EVM, within the state transition function. So you can view things about the fees, you can take these and you can do things with them, right? You can direct them to a treasury, you give some of them to the proposer, or you could return them back to users. So kind of the problems with these that are actually really big problems, like Tyco on mainnet live is extremely inefficient. All forks of Tyco without major changes will continue to be extremely inefficient. One other problem with kind of total anarchy and vanilla-based sequencing is that there's kind of no built-in composability. Any composability is on a layer above the sequencing. And that's really unfortunate because whenever you have to build kind of another layer, you introduce complexity and centralization vectors of some sort. In the case of base rollups, that's quite possibly around pre-confirmations, which means we could have centralized Ethereum stake, which we will want to avoid. The other thing is MEV revenue. So lots of people still have this misconception that base rollups leak MEV revenue, and that's true for traditional base rollups, because they do not use an auction of any sort to kind of get an Oracle into MEV. So if we look into layer one MEV research, we'll see things like execution tickets and execution auctions that are all designed to get some Oracle price essentially of the MEV in a block. Now the other thing to note about based relapse like I mentioned before is inefficiency. So Tycho has a about a $10,000 a day cost to do their sequencing. Before Blobs, they had about $100,000 a day. This is a lot of money, clearly, and the kind of thing that you need a massive amount of users and network effects to actually be sustainable. And that's something we obviously want to avoid. But there is a solution. For some of these Tyco forks, we've been looking at doing slower block time. So Tyco now has a block time of 33 seconds instead of 12 seconds, which is of course Ethereum block time. That is, you know, that doesn't allow their cost to come down by about a third, which is good for them. But it's also really bad for users. You get a worse user experience. So the way we can patch over this is with pre-confirmations. Just throw some pre-confs on top. The downside of that is that without doing your pre-confirmation designs in a clever way, you're going to end up with limited benefits. So your users are going to get very limited availability of pre-confirmations and of course limited composability. So unless you're using a shared pre-confirmation protocol, you're not going to have composability. So we start breaking a lot of things when we add pre-confirmations. The other thing that, you know, specifically Tycho's case, you lose composability with layer one because pre-confirmations happen, you know, over a course of an entire epoch, perhaps, and during that time, layer one state could change a ton. And so the kind of current layer one proposer is very different than who's actually sequencing the rollup. That's not shared sequencing. That's not based. But we can do better. Traditional designs are not good enough. We have, over the past two years, we have new research. We have new teams. We have a lot more data about base rollups because we have one live, and we have a lot better technology. So one of the big things that we'll be talking about in the next section is ZK, using snarks to save costs and improve efficiency. So let's kind of take a step back, let's think from first principles with all the data, with all the research, and with all the teams that we have now, what are the things that we really want to get out of base sequencing? The first thing is, in my mind, the most important, which is synchronous atomic composability. And this is a buzzword for sure. What I think that means is atomic cross-chain contract calls that are sufficient enough for Flash Loans. And the reason for that is because flash loans are a foundational tool for doing atomic arbitrage between domains. So once you have cross-chain contract calls, you can do arbitrage. You can do flash loans for arbitrage. And this gives you the ability to essentially arbitrage prices on different domains, giving users kind of the best prices across chains. You can also use that for doing like a swap that uses multiple chains liquidity. If I'm on Arbitrum, I could directly use Ethereum Layer 1 liquidity without waiting for a service provider or some collateral. Now, that's pretty straightforward. The other thing to look at is a seamless user experience and developer experience. So anyone who's ever tried making an asynchronous application will know that it's ten times easier, I'm not joking, to build a synchronous application. If it's asynchronous you have to deal with the case where something goes wrong and you have a desync. Now the other thing is of course network effects, right? Once we have the efficient synchronous composability between chains, we have the ability to build network effects on one chain and use that on another. And because Ethereum already has massive network effects, that means base rollups can use that, those network effects, those liquidity users, protocols, et cetera, to directly improve the user and developer experience on their rollup. Now, the other thing that we want, of course, all the time is faster things and cheaper things. On the faster side, we're looking at good pre-confirmations that don't break things and that are fulfilled 100% of the time. Frequent block proposing. The more we talk to Ethereum, the more we can compose with Ethereum. Custom frontends. So this is kind of changing the Uniswap style swap interface to something that actually represents what a pre-confirmation might look like. So that's a green check mark, some confetti of some sort. It doesn't seem like a lot technically, but it's actually really important to improving the user experience materially. One thing is getting rid of the approvals and then swaps. You can get rid of that completely with pre-confirmations. Another thing is of course cheaper things, which is better execution. So just writing your smart contracts better and not making the mistakes that we've made in the past when we were rushing to get code out. Aggregate everything. There's a slide on that, so I'll wait. And of course, efficiency of all shapes and sizes. Now, we want to do all of these things, and both of these things, I guess, without sacrificing decentralization and censorship resistance, liveness, and sustainability. So this is where you can get faster and cheaper. If you go to all layer one, you can do it on a centralized sequence or layer two. What you can't do is faster and cheaper with decentralization, censorship resistance, liveness, and sustainability without some form of base sequencing is my opinion. So we know what we want, it's not that complicated, we want fast things, we want cheap things, and we want everything to feel like one chain for developers and for users. Getting there. Next generation base sequencing. This is the subject of these three companies up here, this is kind of the industry that we're in, RISE, Spire Labs, and Tycho Gwyneth. And we're taking a very practical approach to unifying Ethereum. So we're trying to avoid the kind of discussions where we get into the ivory tower semantics and actually build tangible, useful products that are good for users and developers. So let's get into the specifics. This is an intermediate talk, so we're going to get a little bit technical here. We're going to share literally everything. So when it comes to things we share, like if the answer is no, then you're probably lying. We have deposits, proposing blobs, proving network effects, assets, contract calls, off-chain infrastructure, security, economies of scale, et cetera, et cetera, et cetera. So shared deposits, that's AgLayer. These are teams that are working to reduce costs of posting ZK proofs to mainnet by aggregating ZK proofs. Also has the excellent benefit that you can do interrupt between layer twos without ever touching layer one. So you save a lot of costs with that as well. Shared proposing saves a lot of costs and enables atomic composability, again, between base rollups if we can share blobs So if you're a base rollup and you're frequently checkpointing or posting to aetherium you're gonna be purchasing a blob for da in most cases and Well, they're pretty big and most base rollups do not use 100% of the blob space in a single blob. So there's a lot of empty space And there's a whole bunch of teams, Spire's kind of leading some of the research on this side, to figure out how we can have multiple rollups use the same blob. So we build some crypto economic system to make everything a little more efficient and a lot cheaper for base rollups. We also look at things like shared proving to save costs, shared network effects. So we want the canonical ledger of last resort for every asset to be on layer one. We want fungibility between these. That's something we get with shared deposits, but it's a big priority in and of itself. Contract calls, of course. We want to do contract calls. We want to batch those. We want to make sure that we have a good balance between the two. So we want to have a good balance between the two. that's something we get with shared deposits, but it's a big priority in and of itself. Contract calls, of course, we want to do contract calls, we want to batch those, you know, that's pretty obvious, security, that makes sense, and then of course economies of scale in everything we do. So one of these kind of tools is pre-confirmations. We have a, over here in number one, we have a centralized sequencer that gives out pre-confirmations. This is what every layer two does today except for Tycho. And number two, we have some proposed models for based pre-confirmations where the layer one proposer, I mean, you know, the guy running on a Raspberry Pi with a dial-up, is the sequencer and they're the one directly giving out pre-confirmations and directly interacting with the people who are requesting pre-confirmations. So obviously a pretty huge bandwidth and compute resources cost. And number three is the design being explored today, which is some form of delegation. That's that purple line to a gateway who actually gives out pre-confirmations to users. And then they create some builder constraints, some set of rules that builders must build blocks that comply to, right? So the gateway is acting somewhat like a relay in this case, but with a little bit more constraint around the builders. So we still have an auction taking place, very similar to Metaboost. And of course the builder gives the block to our Ethereum proposer who goes and gives that out to layer one. Now the other thing that we want to do is MEV retention, censorship resistance being included here, and of course doing this all with pre-confs, which turns out to be a really difficult problem, but we have some excellent research from the Ethereum layer one about execution tickets. So we've kind of extended this to layer two on base relapse. We have these things that look very similar to execution tickets. We distribute these in an auction, and then you must burn a ticket to propose a layer 2 block, although we do some of the burning and kind of registration early in the epoch and off-chain optimistically so that it's not an expensive thing to do on-chain. And of course with this, we can throw in some no delay forced inclusion. So every rollup today has a drift where you can deposit, make a deposit transaction on layer one, and then you might wait a few layer one blocks before that's included in layer two. In a base rollups case, you can pretty straightforwardly build no delay forced inclusion right into a base rollup, which gives you excellent central persistence equivalent to that of Ethereum. Now, one other important thing about next-generation based relops is checkpointing. So whenever we, TychoCos is proposing by the way, but whenever we want to kind of put our sequence onto layer one we need to make a checkpoint here. And this is little red flag on my diagrams but what we're actually doing is making kind of a point of atomicity. A layer one EVM transaction is atomic. You can't revert part of it. And because of this, you can do cross-chain contract calls based on that atomicity. You can also checkpoint together. That's this bottom left diagram where we share lots of checkpoints in one, which saves a ton of costs. The other thing we can do, of course, is other things while we checkpoint. bottom left diagram where we share lots of checkpoints in one which saves a ton of costs the other thing we can do of course is other things while we checkpoint so we can use layer one liquidity during our checkpoint make this available on layer twos we can also you know save the states of things on layer one we could change the state of a layer one contract to simulate layer one execution but actually doing the execution on layer two. Cool. So one important kind of piece of technology, and probably the most important for the base roll-up teams to compete on and be the first to, is validity-proving and fast validity-proving and shared deposits. So there's a whole bunch of ZK teams working on this. Succinct and RISC-0 are two examples. And the goal is to take fast-float d-proofs for a whole bunch of layer twos, combine these, aggregate these into one fast-ZK proof, and then put this onto Ethereum in a shared deposits bridge contract that has funds for lots and lots of roll-ups. Now, this has security kind of concerns. You have to trust your ZK, so we probably want multi-proving. And it's also, today, really slow. So the biggest competitive advantage of Bayes-Scalops in the future will be how fast is your proving. So this is the magic of Bayes sequencing that we've been working on at Spire to some extent, of course. And that is using a sensory resistance committee to sacrifice liveness for sensory resistance. You can read more about this in our light paper. And then forward-based sequencing, which is using a sequence posted in a layer one slot to affect a future layer one sequence. This enables you to do based sequencing and traditional shared sequencing at the same time, which is obviously super powerful. So if anyone from Espresso is in the room take a look at this Cool. So after we've done all this, what do we get? Harmony and more specifically deconstructing economies of scale to enable parallel innovation while maintaining network effects I've said this in all of my talks so far this week and it's so important, right? We don't want economies of scale to introduce centralization risks, and we want to promote parallel innovation as much as possible. The other thing, one of our important goals for the user and developer side is a monolithic experience, right? Nothing monolithic in practice. We want to have the ability for users and developers to feel like they're on one chain just to get that clearly better user experience. And, of course, doing all this while establishing infinite expressivity, giving builders the direct ability to customize whatever they want about their roll-up, including execution environments, gas fees, et cetera. Infinite gardens, break walls. Next generation-based roll-ups actually solve problems. Stay based. Oh, here's a quote from Heraclitus Drake. Stay based for one man's liquidity is another man's liquidity. Thank you. Okay, thank you a lot. So we have a few questions. So, first question, what can Tyco do to become a next generation based rollup? Well, first of all, upgrade their bridge contract so it can share proposals and deposits and ZK proofs with other rollups and then add actually good pre-confirmations. Yeah, they have a lot to do. Yeah, same question. As someone new to the space, I find it hard to factor out the marketing. How does your approach compare to Espresso? Sounds similar at least. That's a same question. As someone new to the space, I find it hard to factor out the marketing. How does your approach compare to Espresso? Sounds similar, at least. That's a good question. So Espresso is building a shared sequencer, and they're building a form of what they call based Espresso, which is not actually based sequencing. They just call it that. It's a marketing thing. And the goal of Espresso is to unify layer twos. The goal of next-generation base relapse is to unify Ethereum and layer twos, so we're involving Ethereum in the equation. But to get there, you can do shared sequencing. You can even involve the layer one sequencer, but that doesn't mean anything unless you do this checkpointing, unless you do this atomic synchronous composability. So base espresso will not allow you to do cross-chain contract calls. Okay, thank you. the same thing. So, I think that's the main thing. I think that's the main thing. So, I think that's the main thing. So, I think that's the main thing. Thank you. Do we have any other questions? We still have time. So, this is something that every base rollup must do, which is reorg with Ethereum. So, in the case of an Ethereum reorg, then your base rollup must always reorg. It doesn't have to change anything, but it will reorg. And this is something that is very hard to fix in protocol. We need an Ethereum upgrade to reduce the risk of reorg or decrease time to finality. But there are a lot of other protocol things we can do. If you get lots of proposers opted in, in the look ahead, you can just ask them not to reorg, have them put up some collateral, and you'll be able to reduce the risk of reorgs. You can also paste over it on the user experience side. You can have a solver or somebody take a duration risk. The user never has to experience a reorg. And then Spyro's also working on an RPC that's custom, kind of retrofit, to deal with common reorgs. One thing I would add is that base drops will reorg as much as Ethereum does now. So I don't know how much you use Ethereum, but if you do a lot of DeFi or trading, you've still probably never experienced a reorg that has affected your life. So I wouldn't expect that it's a massive user experience problem. Yeah, we would have to reorg. Okay, thanks. And then, sorry, just another question. I thought cross-chain contract one was very interesting, but I still can't imagine how it works across, let's say, a bunch of different chains that all operate at a different speed. So could you maybe provide some practical examples with a bit more details? Come talk to me afterwards, because that's a longer discussion. So all base rollups have a batching time of 12 seconds and a block time of theoretically less. This means that the composability you do between them can only happen at batching time because that's the only time we have atomicity and synchronicity. We can compose every 12 seconds, we can do a cross-chain contract call every 12 seconds, and then within that boundary We can't do any composability that is enforced by cryptography. We can still do intents or other kinds of interop.", "eventId": "devcon-7", - "slot_start": 1731564000000, - "slot_end": 1731565800000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1Zivx1-urETlnczibMYsiNyH4-ey3zg3vSAD7YDHJeJk", - "resources_slides": null, + "slot_start": 1731642600000, + "slot_end": 1731644400000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1Ftf3rfy0W2vOu0uKzcm-Qyqhd_eURotVsS5HzTB9jFw", + "resources_slides": "https://drive.google.com/file/d/1NC_7AWntfiYe_4I6veFUfkNF24gUZAYo/view", "speakers": [ - "anna-george" + "mteam" ] }, "vector": [ 0, 0, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -542430,12 +540905,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -542474,6 +540943,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -542496,6 +540966,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -542511,7 +540982,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -542535,6 +541005,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -542852,7 +541323,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -542995,7 +541465,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -543008,48 +541477,48 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "next-generation-based-rollups-a-practical-approach-to-unifying-ethereum", - "sourceId": "GHVK8E", - "title": "Next Generation Based Rollups: A Practical Approach to Unifying Ethereum", - "description": "I plan to speak on the concept of based sequencing (based rollups). I want to not only introduce the concept but also explain recent developments (what I like to call next generation based rollups). This includes based preconfirmations, fast->realtime proving, customizable composability, practical synchronous composability, among others. I will introduce I also plan to provide a brief summary to my Bankless Summit talk on ETH value accrual in the presence of based rollups.", - "track": "Layer 2", + "id": "non-native-arithmetic-via-crt-codes", + "sourceId": "B7CJU8", + "title": "Non-Native Arithmetic via CRT Codes", + "description": "Non-native arithmetic is an important and costly operation in SNARKs. It is essential for proving validity of general cryptographic data like RSA signatures, non-native elliptic curve arithmetic like secp256r1, and general SNARK proof composition. We investigate a new approach to prove non-native integer arithmetic using Residue Number Systems and a batch proximity test for Chinese Remainder Theorem (CRT) codes, as well as surprising connections to STARK soundness.", + "track": "Applied Cryptography", "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "expertise": "Expert", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Fragmentation", - "Frameworks", - "Layer 2s" + "Cryptography", + "SNARK", + "Zero-Knowledge" ], "keywords": [ - "based rollups", - "sequencing", - "composability" + "Coding Theory", + "Math" ], - "duration": 1378, + "duration": 1400, "language": "en", - "sources_swarmHash": "ec4e2a052bdc5fc188946c250ffacdba021eaa61b2efc79b21737c18ae7d1f3a", - "sources_youtubeId": "Ier_f5V4_ow", + "sources_swarmHash": "ee183d21ef8cc107dffe649816f123f9afc2ef6ecc7f2906454a6c588c5bdf3c", + "sources_youtubeId": "OucSo6xjlBQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673839411b0f83434dfbaecb", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673839411b0f83434dfbaecb.vtt", - "transcript_text": " . Hello, DevCon. All right, so we're going to talk about next generation base rollups today. There are some base rollups that are next generation. There are some base rollups that are last generation. You guys just learned about some. I'm going to tell you more about how you can become a next generation base rollup, kind of what the definition is, and then a little bit more about how I see the future of based rollups evolving. Cool, so this first section we're just going to go through some of the traditional based rollup research, talk about the the based rollups that have been, you know, explored in the past, some of the designs have been proposed, and then we're gonna understand some of their flaws, some of their weaknesses. So one of the very first designs that is used by Tycho today on mainnets is total anarchy based sequencing. This is like kind of letter of the law, exactly what Justin Drake proposed in last year, right? And it's very simple, very basic, right? It's called total anarchy because it uses a election system that is permissionless and allows anyone, that's the anarchy bit, to propose a block. So one key point here is that the sequencer is not elected beforehand. You don't know who the sequencer will be beforehand. You might know the Ethereum proposer because you can look at the Ethereum look ahead, we know 32 in advance, but the sequencer of the base roll-up might not be the proposer. In Tyco's case, it is Tyco Labs for the majority of blocks. One other thing to note is that layer 2 block proposing is completely permissionless, right? There's no permissions involved. The way that this ends up playing out in practice is that we have this is we have a bunch of proposers in Tycho's system that give their blocks to the builder, that would be Titan, Wintermute, et cetera. And they would take these blocks, order them in the layer one block, each block proposal is a layer one transaction, and then send this completed block to the proposer through a relay and through the MevBoost auction, right? And then eventually the proposer would propose this to Ethereum once they've selected the block with the highest bid. So it's up to the proposer and the builder to choose which block created by these Layer 2 sequencers or proposers actually gets onto the chain. Now, one design that aims to solve a few of the problems of the previous design is vanilla-based sequencing. So Limechain, George Spasov put out this excellent bit of research that explained how we can actually solve a few of the foundational problems with total anarchy-based sequencing, the TECO model. So the first thing that we try with vanilla-based sequencing is a simple sequencer election, right? That gives us the ability to choose a sequencer based on some rule, have a primary rule, and then we have a fallback as well. So the fallback in vanilla-based sequencing case is just total anarchy. So there's still some total anarchy here. And the primary sequencing is based on some election mechanism that is not clearly defined and kind of left to implementation in the initial research. So the other thing that vanilla-based sequencing includes is kind of very basic primitive support for delegation, which is allowing the Ethereum proposer to give the rights to their base drawout block to an external builder without directly going through MevBoost. It's like external to the MevBoost auction. Vanilla-based sequencing also included basic support for pre-confirmations, which gives the, you know, we heard about it last talk, but it gives the proposer the ability to promise things about the block that they will eventually propose. It does require some sort of delegation or some increased hardware constraints on the proposer. And so the design for pre-confirmations has kind of wide-reaching implications for centralization vectors for stake and also for pre-confirmations in real time. Now, the other thing that vanilla-based sequencing introduced that really caught my eye was revenue generation from a percent of congestion fees. And you'll hear Justin Drake talk about this all the time. You'll hear me talk about this. Base rollups can capture revenue from congestion trivially, right? These base fees, if you're doing EIP-1559 EVM, right, they are represented on-chain and are kind of understandable from within the EVM, within the state transition function. So you can view things about the fees, you can take these and you can do things with them, right? You can direct them to a treasury, you give some of them to the proposer, or you could return them back to users. So kind of the problems with these that are actually really big problems, like Tyco on mainnet live is extremely inefficient. All forks of Tyco without major changes will continue to be extremely inefficient. One other problem with kind of total anarchy and vanilla-based sequencing is that there's kind of no built-in composability. Any composability is on a layer above the sequencing. And that's really unfortunate because whenever you have to build kind of another layer, you introduce complexity and centralization vectors of some sort. In the case of base rollups, that's quite possibly around pre-confirmations, which means we could have centralized Ethereum stake, which we will want to avoid. The other thing is MEV revenue. So lots of people still have this misconception that base rollups leak MEV revenue, and that's true for traditional base rollups, because they do not use an auction of any sort to kind of get an Oracle into MEV. So if we look into layer one MEV research, we'll see things like execution tickets and execution auctions that are all designed to get some Oracle price essentially of the MEV in a block. Now the other thing to note about based relapse like I mentioned before is inefficiency. So Tycho has a about a $10,000 a day cost to do their sequencing. Before Blobs, they had about $100,000 a day. This is a lot of money, clearly, and the kind of thing that you need a massive amount of users and network effects to actually be sustainable. And that's something we obviously want to avoid. But there is a solution. For some of these Tyco forks, we've been looking at doing slower block time. So Tyco now has a block time of 33 seconds instead of 12 seconds, which is of course Ethereum block time. That is, you know, that doesn't allow their cost to come down by about a third, which is good for them. But it's also really bad for users. You get a worse user experience. So the way we can patch over this is with pre-confirmations. Just throw some pre-confs on top. The downside of that is that without doing your pre-confirmation designs in a clever way, you're going to end up with limited benefits. So your users are going to get very limited availability of pre-confirmations and of course limited composability. So unless you're using a shared pre-confirmation protocol, you're not going to have composability. So we start breaking a lot of things when we add pre-confirmations. The other thing that, you know, specifically Tycho's case, you lose composability with layer one because pre-confirmations happen, you know, over a course of an entire epoch, perhaps, and during that time, layer one state could change a ton. And so the kind of current layer one proposer is very different than who's actually sequencing the rollup. That's not shared sequencing. That's not based. But we can do better. Traditional designs are not good enough. We have, over the past two years, we have new research. We have new teams. We have a lot more data about base rollups because we have one live, and we have a lot better technology. So one of the big things that we'll be talking about in the next section is ZK, using snarks to save costs and improve efficiency. So let's kind of take a step back, let's think from first principles with all the data, with all the research, and with all the teams that we have now, what are the things that we really want to get out of base sequencing? The first thing is, in my mind, the most important, which is synchronous atomic composability. And this is a buzzword for sure. What I think that means is atomic cross-chain contract calls that are sufficient enough for Flash Loans. And the reason for that is because flash loans are a foundational tool for doing atomic arbitrage between domains. So once you have cross-chain contract calls, you can do arbitrage. You can do flash loans for arbitrage. And this gives you the ability to essentially arbitrage prices on different domains, giving users kind of the best prices across chains. You can also use that for doing like a swap that uses multiple chains liquidity. If I'm on Arbitrum, I could directly use Ethereum Layer 1 liquidity without waiting for a service provider or some collateral. Now, that's pretty straightforward. The other thing to look at is a seamless user experience and developer experience. So anyone who's ever tried making an asynchronous application will know that it's ten times easier, I'm not joking, to build a synchronous application. If it's asynchronous you have to deal with the case where something goes wrong and you have a desync. Now the other thing is of course network effects, right? Once we have the efficient synchronous composability between chains, we have the ability to build network effects on one chain and use that on another. And because Ethereum already has massive network effects, that means base rollups can use that, those network effects, those liquidity users, protocols, et cetera, to directly improve the user and developer experience on their rollup. Now, the other thing that we want, of course, all the time is faster things and cheaper things. On the faster side, we're looking at good pre-confirmations that don't break things and that are fulfilled 100% of the time. Frequent block proposing. The more we talk to Ethereum, the more we can compose with Ethereum. Custom frontends. So this is kind of changing the Uniswap style swap interface to something that actually represents what a pre-confirmation might look like. So that's a green check mark, some confetti of some sort. It doesn't seem like a lot technically, but it's actually really important to improving the user experience materially. One thing is getting rid of the approvals and then swaps. You can get rid of that completely with pre-confirmations. Another thing is of course cheaper things, which is better execution. So just writing your smart contracts better and not making the mistakes that we've made in the past when we were rushing to get code out. Aggregate everything. There's a slide on that, so I'll wait. And of course, efficiency of all shapes and sizes. Now, we want to do all of these things, and both of these things, I guess, without sacrificing decentralization and censorship resistance, liveness, and sustainability. So this is where you can get faster and cheaper. If you go to all layer one, you can do it on a centralized sequence or layer two. What you can't do is faster and cheaper with decentralization, censorship resistance, liveness, and sustainability without some form of base sequencing is my opinion. So we know what we want, it's not that complicated, we want fast things, we want cheap things, and we want everything to feel like one chain for developers and for users. Getting there. Next generation base sequencing. This is the subject of these three companies up here, this is kind of the industry that we're in, RISE, Spire Labs, and Tycho Gwyneth. And we're taking a very practical approach to unifying Ethereum. So we're trying to avoid the kind of discussions where we get into the ivory tower semantics and actually build tangible, useful products that are good for users and developers. So let's get into the specifics. This is an intermediate talk, so we're going to get a little bit technical here. We're going to share literally everything. So when it comes to things we share, like if the answer is no, then you're probably lying. We have deposits, proposing blobs, proving network effects, assets, contract calls, off-chain infrastructure, security, economies of scale, et cetera, et cetera, et cetera. So shared deposits, that's AgLayer. These are teams that are working to reduce costs of posting ZK proofs to mainnet by aggregating ZK proofs. Also has the excellent benefit that you can do interrupt between layer twos without ever touching layer one. So you save a lot of costs with that as well. Shared proposing saves a lot of costs and enables atomic composability, again, between base rollups if we can share blobs So if you're a base rollup and you're frequently checkpointing or posting to aetherium you're gonna be purchasing a blob for da in most cases and Well, they're pretty big and most base rollups do not use 100% of the blob space in a single blob. So there's a lot of empty space And there's a whole bunch of teams, Spire's kind of leading some of the research on this side, to figure out how we can have multiple rollups use the same blob. So we build some crypto economic system to make everything a little more efficient and a lot cheaper for base rollups. We also look at things like shared proving to save costs, shared network effects. So we want the canonical ledger of last resort for every asset to be on layer one. We want fungibility between these. That's something we get with shared deposits, but it's a big priority in and of itself. Contract calls, of course. We want to do contract calls. We want to batch those. We want to make sure that we have a good balance between the two. So we want to have a good balance between the two. that's something we get with shared deposits, but it's a big priority in and of itself. Contract calls, of course, we want to do contract calls, we want to batch those, you know, that's pretty obvious, security, that makes sense, and then of course economies of scale in everything we do. So one of these kind of tools is pre-confirmations. We have a, over here in number one, we have a centralized sequencer that gives out pre-confirmations. This is what every layer two does today except for Tycho. And number two, we have some proposed models for based pre-confirmations where the layer one proposer, I mean, you know, the guy running on a Raspberry Pi with a dial-up, is the sequencer and they're the one directly giving out pre-confirmations and directly interacting with the people who are requesting pre-confirmations. So obviously a pretty huge bandwidth and compute resources cost. And number three is the design being explored today, which is some form of delegation. That's that purple line to a gateway who actually gives out pre-confirmations to users. And then they create some builder constraints, some set of rules that builders must build blocks that comply to, right? So the gateway is acting somewhat like a relay in this case, but with a little bit more constraint around the builders. So we still have an auction taking place, very similar to Metaboost. And of course the builder gives the block to our Ethereum proposer who goes and gives that out to layer one. Now the other thing that we want to do is MEV retention, censorship resistance being included here, and of course doing this all with pre-confs, which turns out to be a really difficult problem, but we have some excellent research from the Ethereum layer one about execution tickets. So we've kind of extended this to layer two on base relapse. We have these things that look very similar to execution tickets. We distribute these in an auction, and then you must burn a ticket to propose a layer 2 block, although we do some of the burning and kind of registration early in the epoch and off-chain optimistically so that it's not an expensive thing to do on-chain. And of course with this, we can throw in some no delay forced inclusion. So every rollup today has a drift where you can deposit, make a deposit transaction on layer one, and then you might wait a few layer one blocks before that's included in layer two. In a base rollups case, you can pretty straightforwardly build no delay forced inclusion right into a base rollup, which gives you excellent central persistence equivalent to that of Ethereum. Now, one other important thing about next-generation based relops is checkpointing. So whenever we, TychoCos is proposing by the way, but whenever we want to kind of put our sequence onto layer one we need to make a checkpoint here. And this is little red flag on my diagrams but what we're actually doing is making kind of a point of atomicity. A layer one EVM transaction is atomic. You can't revert part of it. And because of this, you can do cross-chain contract calls based on that atomicity. You can also checkpoint together. That's this bottom left diagram where we share lots of checkpoints in one, which saves a ton of costs. The other thing we can do, of course, is other things while we checkpoint. bottom left diagram where we share lots of checkpoints in one which saves a ton of costs the other thing we can do of course is other things while we checkpoint so we can use layer one liquidity during our checkpoint make this available on layer twos we can also you know save the states of things on layer one we could change the state of a layer one contract to simulate layer one execution but actually doing the execution on layer two. Cool. So one important kind of piece of technology, and probably the most important for the base roll-up teams to compete on and be the first to, is validity-proving and fast validity-proving and shared deposits. So there's a whole bunch of ZK teams working on this. Succinct and RISC-0 are two examples. And the goal is to take fast-float d-proofs for a whole bunch of layer twos, combine these, aggregate these into one fast-ZK proof, and then put this onto Ethereum in a shared deposits bridge contract that has funds for lots and lots of roll-ups. Now, this has security kind of concerns. You have to trust your ZK, so we probably want multi-proving. And it's also, today, really slow. So the biggest competitive advantage of Bayes-Scalops in the future will be how fast is your proving. So this is the magic of Bayes sequencing that we've been working on at Spire to some extent, of course. And that is using a sensory resistance committee to sacrifice liveness for sensory resistance. You can read more about this in our light paper. And then forward-based sequencing, which is using a sequence posted in a layer one slot to affect a future layer one sequence. This enables you to do based sequencing and traditional shared sequencing at the same time, which is obviously super powerful. So if anyone from Espresso is in the room take a look at this Cool. So after we've done all this, what do we get? Harmony and more specifically deconstructing economies of scale to enable parallel innovation while maintaining network effects I've said this in all of my talks so far this week and it's so important, right? We don't want economies of scale to introduce centralization risks, and we want to promote parallel innovation as much as possible. The other thing, one of our important goals for the user and developer side is a monolithic experience, right? Nothing monolithic in practice. We want to have the ability for users and developers to feel like they're on one chain just to get that clearly better user experience. And, of course, doing all this while establishing infinite expressivity, giving builders the direct ability to customize whatever they want about their roll-up, including execution environments, gas fees, et cetera. Infinite gardens, break walls. Next generation-based roll-ups actually solve problems. Stay based. Oh, here's a quote from Heraclitus Drake. Stay based for one man's liquidity is another man's liquidity. Thank you. Okay, thank you a lot. So we have a few questions. So, first question, what can Tyco do to become a next generation based rollup? Well, first of all, upgrade their bridge contract so it can share proposals and deposits and ZK proofs with other rollups and then add actually good pre-confirmations. Yeah, they have a lot to do. Yeah, same question. As someone new to the space, I find it hard to factor out the marketing. How does your approach compare to Espresso? Sounds similar at least. That's a same question. As someone new to the space, I find it hard to factor out the marketing. How does your approach compare to Espresso? Sounds similar, at least. That's a good question. So Espresso is building a shared sequencer, and they're building a form of what they call based Espresso, which is not actually based sequencing. They just call it that. It's a marketing thing. And the goal of Espresso is to unify layer twos. The goal of next-generation base relapse is to unify Ethereum and layer twos, so we're involving Ethereum in the equation. But to get there, you can do shared sequencing. You can even involve the layer one sequencer, but that doesn't mean anything unless you do this checkpointing, unless you do this atomic synchronous composability. So base espresso will not allow you to do cross-chain contract calls. Okay, thank you. the same thing. So, I think that's the main thing. I think that's the main thing. So, I think that's the main thing. So, I think that's the main thing. Thank you. Do we have any other questions? We still have time. So, this is something that every base rollup must do, which is reorg with Ethereum. So, in the case of an Ethereum reorg, then your base rollup must always reorg. It doesn't have to change anything, but it will reorg. And this is something that is very hard to fix in protocol. We need an Ethereum upgrade to reduce the risk of reorg or decrease time to finality. But there are a lot of other protocol things we can do. If you get lots of proposers opted in, in the look ahead, you can just ask them not to reorg, have them put up some collateral, and you'll be able to reduce the risk of reorgs. You can also paste over it on the user experience side. You can have a solver or somebody take a duration risk. The user never has to experience a reorg. And then Spyro's also working on an RPC that's custom, kind of retrofit, to deal with common reorgs. One thing I would add is that base drops will reorg as much as Ethereum does now. So I don't know how much you use Ethereum, but if you do a lot of DeFi or trading, you've still probably never experienced a reorg that has affected your life. So I wouldn't expect that it's a massive user experience problem. Yeah, we would have to reorg. Okay, thanks. And then, sorry, just another question. I thought cross-chain contract one was very interesting, but I still can't imagine how it works across, let's say, a bunch of different chains that all operate at a different speed. So could you maybe provide some practical examples with a bit more details? Come talk to me afterwards, because that's a longer discussion. So all base rollups have a batching time of 12 seconds and a block time of theoretically less. This means that the composability you do between them can only happen at batching time because that's the only time we have atomicity and synchronicity. We can compose every 12 seconds, we can do a cross-chain contract call every 12 seconds, and then within that boundary We can't do any composability that is enforced by cryptography. We can still do intents or other kinds of interop.", + "sources_streamethId": "6735cfae9dbb7a90e109be5d", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735cfae9dbb7a90e109be5d.vtt", + "transcript_text": " Coming to my talk. In writing the talk, I found that I think it probably makes sense to mostly go over what non-native arithmetic is and build up to the question. Because this is sort of an idea I've been casually thinking about for a while and I don't have like, you know, definitive answer to yet. So, yeah. Did I push the button or how does that? Is this working? The clicker? Oh, wait, it is working. Okay. Okay, so, for people who don't know, what is the context for all of this so non-native arithmetic is something that comes up in snarks which are succinct non-interactive arguments of knowledge may have heard of like zero knowledge proofs and this is the same kind of thing so in a snark, the prover, wants to convince the verifier that they know some witness. And they want to do this by generating a proof without interacting with the verifier, so non-interactively. And the proof should be small. And if it's zero knowledge, it keeps the witness secret. Yeah, and sorry for the formatting. I did the slides in Beamer and had to import them to Google Slides, so it might be a little weird. But, yeah, so the way that we usually write this is there's some C, which is like a circuit that has some public input X and some private input W, and the protocol allows the prover to convince the verifier that they know W by sending a proof. So, for example, right, you could have some, like, chain state X, which is public, some transaction W that is private, and you want to prove that you know W for a given chain state. And so not all transactions will be valid valid and so that's what C checks Yeah, so the the quarter important takeaway for this from this for our for our topic today is That C is defined typically typically, over a field. So a field is like a place where we can do arithmetic, addition, multiplication, etc. And that field will be defined modulo some prime P. The field is sometimes sort of like a free parameter, depending on the proof system. You can pick the field freely. And sometimes it's not. So if you're using like a Stark, you can sort of, in principle, use any fields, just some caveats. But for like an elliptic curve-based proof system, it's usually fixed as part of the curve, which, again, depending on the particulars, is something you probably can't control. So the question that motivates non-native arithmetic is what happens if we want to prove a statement about some arithmetic that lies in a different field than the one that we're defining our relation over. So right suppose you want to verify a signature that is defined over some other curve externally to your proof system, right? You want to verify a Bitcoin transaction or something inside of BN254. And in that case, both of your fields will be fixed and you can't, you know, change them. And they're different. So you have to simulate one field inside of the other. And yes, we need to emulate or simulate the non-native field inside of the field that our relation is defined over. So just as a simple example of why this doesn't work, right? Suppose we wanted to check that like 4 times 5 mod 7. You can check that that, I think all the arithmetic there is right. And 4 times 5 is 6 mod 7. But then if our proof system is defined mod 5, then directly carrying out the computation will yield the wrong result. So they're fundamentally different spaces for doing arithmetic. And so you need to simulate the one inside of the other. So what can we do? The situation is not hopeless. Like in principle, right, this is a solvable problem because you can simulate or you can encode any NP relation into an arithmetic circuit over any field. You know, you can encode it into binary digits and check that the digits are binary and encode the bitwise operations. And so this is just sort of like an existence proof. But this is, you know, very slow. We would prefer to do things more efficiently than this. And intuitively, you'd kind of expect it to be possible to do something better, because fields are similar, right? They wrap around, but for small values, they behave kind of like the integers. You're just adding numbers, and if you don't exceed the modulus, then maybe we can exploit that somehow. That's what we're going to try and do. So the first observation is to check modular arithmetic. We just have to check integer arithmetic, because if you want to check something mod R, this is the same thing as just checking some integer relation for a quotient. And in some cases, this is actually sort of enough by itself. So depending on the relative sizes of the fields, it might be possible to do this. Like if you check that A, B, C, and Q are all small, you can just check this relation, as written here, directly in the larger field. And the reason this works is because it will never wrap around the larger field. So if all the sizes of everything are small, or provably small, then if it's equal to 0 mod p, it has to hold mod r. You can convince yourself of this, I think. It's not that strange, right? Because if something has magnitude less than p and it holds mod p, that means it's 0 mod p. And since it has magnitude less than p, the only thing that's 0 mod p with magnitude less than p is 0. And so this works sort of like up to square root of the size of the field, basically. And that's coming from the fact that we're checking a multiplication. If you were checking a degree 3 thing, then you'd want a cube root or something like that. But what if our field's not small, or our modulus is not small? Maybe it's even bigger than the prime that we're working over. Then you can't expect your encodings of the small values in the larger fields to behave well. In fact, it might not be possible at all. If your elements are larger than the field that you're encoding your relation into, you can't even commit to a big value in a single field element. So this technique, I think originally, or at least I first heard about it from Aztec, but there's this ancient theorem, the Chinese remainder theorem, that allows us to work around this problem. And so what that says is if you want to work in a big modulus, it's sufficient to work in two smaller moduli that divide the larger one, or that multiply to give the larger one, if they're co-prime. And it's sort of like both sides of that are rings, right? So you can take a value, and instead of working mod AB, you work mod A, and you work mod B. And these two rings are isomorphic. And so people call it a residue number system, and you can represent a large number, mod AB, by representing it by encoding all of its residues mod different small primes that divide AB. Here I've written it with two, but you can sort of recursively apply this equivalence to include arbitrary numbers of co-prime divisors. And so remember we were working mod P, and we had had some R and our R might be too big. But now we can pick whatever modulus we want, right? We can pick a bunch of small primes. So it's always large enough to recover the value that we're operating over. And so long as everything kind of remains small, that Q is supposed to be a product. So long as everything remains small, this works. And like before, how we didn't want to wrap around P, here we don't want to wrap around M. But here M is a free parameter. It's not fixed by the proof system. Okay. So now like a slight digression to explain the idea. Read Solomon codes are an error correcting code scheme that is ubiquitous in in snarks if you've heard of starks or fry this is you know essential to how they work and the basic idea is we have some message that we treat as a vector of dimension K and we encode it as a polynomial as the coefficients of a polynomial and then that polynomial we evaluate at more points. So remember a polynomial of degree k, or k minus 1, is determined by k points, and by evaluating it at more points, you're adding redundant information. And so, like in the original conception of Reed-Solomon codes, this was to deal with the fact that some of the information might be corrupted. So you only need, even if some of the data is maliciously altered, you can still recover the original message. Certainly given all of the points, you can recover. So given a polynomial, and we evaluate it at some set of endpoints, this is equivalent to reducing the polynomial mod this kind of degree one ideal, X minus RI. And so this insight allows us to generalize Reed-Solomon codes to other domains, ideal domains. And for example, algebraic geometry codes, which replace polynomials and evaluations with functions of some curve and divisors can be cast in this language as well and this is also how we're going to introduce CRT codes here CRT is still Chinese remainder theorem. So the idea of a CRT code is rather than encoding our message as a polynomial, we encode it as an integer. And then, rather than evaluating the polynomial at multiple points, we take the integer, mod, a bunch of small primes. And so you can think of these mod small primes as analogous to evaluating a polynomial at a point. It's not exactly the same, right? Because the ring you get from reducing mod different primes is not the same. But the ring you get from evaluating at a point is always the same. It's not really important for our purposes. Yeah, so if you have enough primes so that the product of all the primes exceeds the bound of your original sort of space of integers that you're encoding into, then even if some of the primes are wrong, in the same way that with the Reed-Solomon code you can correct errors, with the CRT code you can also correct errors. And this encoding is the same as a residue number system. So you're taking an integer, you're reducing it mod a bunch of small primes, you're keeping the residues and you can just work with those. And for those who are familiar, there's a lot of similarities here with how, you know, Starks work and other kinds of algebraic proof type stuff. Okay, so this slide I just screenshotted because it was horribly mangled by the formatting. So the sketch of the protocol is, suppose we want to verify some non-native arithmetic. So we have some system of polynomial equations, the f in different x's, which are integers, and the f's have degree d, and all the integers are bounded. So in the original version of this, we'd be checking arithmetic mods some other prime, and you can encode that as an integer relation. And so the primes are fixed in advance, and you have some M, which is larger than the kind of maximum value achievable by F. As written, it's a little wrong because you probably want to account for the additions, but it's something like that. And then you encode your integers, model the small primes, as we described, and also provide quotients for the evaluations of these functions. I guess as written, those should all be zero, so maybe there's a slight confusion there. But yeah, so you prove your encoding of all of the integers is sort of within its appropriate range, and then you choose a random subset of the primes to test the relations over. And for simplicity, assuming the primes are all like roughly the same size, then you can calculate the success probability of a dishonest prover pretty straightforwardly. You know, it's just the probability of having like some code word, right, some set of residues that does not correspond to a small integer, or that's like wrong in some position, and then, yeah, anyway. Yeah, so it's similar to how starks work. Okay. Yeah, okay. So this is the interesting stuff. As described in the previous protocol, we're testing each encoding of X is close to a small integer directly. You're just taking your set of residues and checking that it is close to a small integer or an integer within the right bound. This used to be how fry-based proofs did things, but it turns out that there's a much more efficient batch proximity test. The proximity test here is you're testing the closeness of the set of residues to its space of correct messages. And what people do now with Starks is take a random linear combination of encoded columns or encoded integers and test that the random linear combination is close to a code word. So instead of testing each one independently, you can just take a random linear combination and test that one thing. So the question is, can we do the same thing here with CRT codes? So I think so, but it's very different, right? Because in the Reed-Solomon setting you have polynomials and you're working over a field and you're taking a random linear combination over the field here we have a set of integers and we're taking a random linear combination of these integers but there's no obvious base field in the same way that there would be for polynomials to take a random linear combination over. So instead we would pick a random integer linear combination to take of the integers. And this has a lot of interesting issues. So for example, this is not sensitive to encodings of small rational numbers. If you encode it like one half, then there's like a one in two chance that a random linear combination will cause the one half to go away. And you might think, well, we could pick primes to multiply by, but that doesn't really work. So it is in some sense fundamentally different, but I think that it's possible to do this. The question is just like how good is it and if those things can be worked around. So for example, for working mod R, this is kind of fine because small rationals are also valid sort of to reduce mod R for large R, but there's lots of details to be worked out. So, yeah, just some overview of the things I haven't talked about in this talk. So I mentioned a little bit that CRT codes, the notion of distance is a little bit subtle, especially if the primes are not all, like, the same size. They can't sometimes always be, I mean, right, like every prime is of a different size, so you have to account for that. It's not like a fundamental problem, it's just a little more complicated. As well, I mentioned this decoding to rationals rather than integers, which in some applications is fine. It might not always be fine, so I have to work that out as well. And then another thing I haven't talked about, but that could actually be worth considering in practice, is all of this generalizes from integers to number fields. And in number fields, you have a sort of different situation where the first point, you actually can have primes that are like of the same size, right? If you have some prime over rationals, integers that splits over number fields, then that might yield like a more convenient thing to work over. And in some cases, this sort of works. And then you have to ask, how do you define the size of things and so on? But just more technicalities, I think. And then one other interesting thing worth considering is, so we moved to the integers, and we were no longer able to take random linear combinations over an extension of the base field. But what happens if we work over Starks with polynomials, and instead of taking a random linear combination over the base field, we took a random linear combination of columns by random polynomials, not over an extension field. In this way, it's sort of analogous to taking an integer-linear combination of integers, and it has a lot of the same problems, but it would allow us to avoid ever working with extension fields, potentially. So I think that would be an interesting direction to explore if this work ends up making sense. And that is all I have. Thank you. All right. Thank you so much, Liam. We do have a question here. Could you repeat how you calculate which primes are small enough to use for creating a CRT encoding? So I guess there's two ways that I'm not sure what the question is referring to but For encoding into like a residue number system the primes can be any primes That you you usually choose them to be small because working mod small primes is much more efficient. This question could also be talking about the example that I gave where you can simulate arithmetic mod a square root sized number in a larger field. And that is just coming from the fact that the relation that you're calculating over can't exceed the field you're operating in. So you fix some field that you're actually working over, and then if your field that you're simulating is small enough, then everything kind of just works out, as long as everything remains smaller than the characteristic of the larger field. All right, perfect. We do have a little bit of time for questions, so let's give it another 10 seconds to see if we have any questions. What is the size of size 2 to the 128 sized space. All right, perfect. Thank you so much for the amazing talk, Liam. So we will resume at 5 for our next talk.", "eventId": "devcon-7", - "slot_start": 1731642600000, - "slot_end": 1731644400000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1Ftf3rfy0W2vOu0uKzcm-Qyqhd_eURotVsS5HzTB9jFw", - "resources_slides": null, + "slot_start": 1731576600000, + "slot_end": 1731578400000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/15NH3bC1NnjmkyRycEK1VaWR9dgZMJsH0PJMf-OTgOyA", + "resources_slides": "https://drive.google.com/file/d/1roAOatZGYwzShqJODF3ECwVwVJNYEUOT/view", "speakers": [ - "mteam" + "liam-eagen" ] }, "vector": [ @@ -543060,10 +541529,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -543813,6 +542282,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -543847,43 +542318,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -543909,7 +542343,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -544121,6 +542554,37 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -544367,12 +542831,12 @@ 0, 0, 0, - 2, 0, 0, 0, 2, 0, + 2, 0, 0, 0, @@ -544389,43 +542853,45 @@ }, { "session": { - "id": "non-native-arithmetic-via-crt-codes", - "sourceId": "B7CJU8", - "title": "Non-Native Arithmetic via CRT Codes", - "description": "Non-native arithmetic is an important and costly operation in SNARKs. It is essential for proving validity of general cryptographic data like RSA signatures, non-native elliptic curve arithmetic like secp256r1, and general SNARK proof composition. We investigate a new approach to prove non-native integer arithmetic using Residue Number Systems and a batch proximity test for Chinese Remainder Theorem (CRT) codes, as well as surprising connections to STARK soundness.", - "track": "Applied Cryptography", + "id": "onchain-capital-allocation-from-current-mechanisms-to-future-possbilities", + "sourceId": "BEWPLY", + "title": "Onchain Capital Allocation: From current mechanisms to future possbilities", + "description": "Capital allocation, from paying bills to complex organizational funding, often suffers from inefficiencies and lack of transparency. Web3 has the potential to revolutionize this by enabling more efficient, effective, and transparent capital distribution. By addressing coordination failures and introducing new onchain strategies, crypto could transform how society allocates resources.\r\n\r\nGitcoin founder Kevin Owocki will articulate this design space in this 20 minute talk.", + "track": "Coordination", "type": "Talk", - "expertise": "Expert", + "expertise": "Intermediate", "audience": "Research", - "featured": false, + "featured": true, "doNotRecord": false, - "tags": [ - "Cryptography", - "SNARK", - "Zero-Knowledge" - ], "keywords": [ - "Coding Theory", - "Math" + "Mycofi" + ], + "tags": [ + "Quadratic Voting", + "Public good", + "Regenerative Applications", + "mycofi", + "Public good", + "Quadratic Voting", + "Regenerative Applications" ], - "duration": 1400, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "cd812bc83c5fc28fa42061376bf270b2ffff2f325328e88579375f3fd212fc7b", + "sources_youtubeId": "3R7ehJ6OGw8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735cfae9dbb7a90e109be5d", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735cfae9dbb7a90e109be5d.vtt", - "transcript_text": " Coming to my talk. In writing the talk, I found that I think it probably makes sense to mostly go over what non-native arithmetic is and build up to the question. Because this is sort of an idea I've been casually thinking about for a while and I don't have like, you know, definitive answer to yet. So, yeah. Did I push the button or how does that? Is this working? The clicker? Oh, wait, it is working. Okay. Okay, so, for people who don't know, what is the context for all of this so non-native arithmetic is something that comes up in snarks which are succinct non-interactive arguments of knowledge may have heard of like zero knowledge proofs and this is the same kind of thing so in a snark, the prover, wants to convince the verifier that they know some witness. And they want to do this by generating a proof without interacting with the verifier, so non-interactively. And the proof should be small. And if it's zero knowledge, it keeps the witness secret. Yeah, and sorry for the formatting. I did the slides in Beamer and had to import them to Google Slides, so it might be a little weird. But, yeah, so the way that we usually write this is there's some C, which is like a circuit that has some public input X and some private input W, and the protocol allows the prover to convince the verifier that they know W by sending a proof. So, for example, right, you could have some, like, chain state X, which is public, some transaction W that is private, and you want to prove that you know W for a given chain state. And so not all transactions will be valid valid and so that's what C checks Yeah, so the the quarter important takeaway for this from this for our for our topic today is That C is defined typically typically, over a field. So a field is like a place where we can do arithmetic, addition, multiplication, etc. And that field will be defined modulo some prime P. The field is sometimes sort of like a free parameter, depending on the proof system. You can pick the field freely. And sometimes it's not. So if you're using like a Stark, you can sort of, in principle, use any fields, just some caveats. But for like an elliptic curve-based proof system, it's usually fixed as part of the curve, which, again, depending on the particulars, is something you probably can't control. So the question that motivates non-native arithmetic is what happens if we want to prove a statement about some arithmetic that lies in a different field than the one that we're defining our relation over. So right suppose you want to verify a signature that is defined over some other curve externally to your proof system, right? You want to verify a Bitcoin transaction or something inside of BN254. And in that case, both of your fields will be fixed and you can't, you know, change them. And they're different. So you have to simulate one field inside of the other. And yes, we need to emulate or simulate the non-native field inside of the field that our relation is defined over. So just as a simple example of why this doesn't work, right? Suppose we wanted to check that like 4 times 5 mod 7. You can check that that, I think all the arithmetic there is right. And 4 times 5 is 6 mod 7. But then if our proof system is defined mod 5, then directly carrying out the computation will yield the wrong result. So they're fundamentally different spaces for doing arithmetic. And so you need to simulate the one inside of the other. So what can we do? The situation is not hopeless. Like in principle, right, this is a solvable problem because you can simulate or you can encode any NP relation into an arithmetic circuit over any field. You know, you can encode it into binary digits and check that the digits are binary and encode the bitwise operations. And so this is just sort of like an existence proof. But this is, you know, very slow. We would prefer to do things more efficiently than this. And intuitively, you'd kind of expect it to be possible to do something better, because fields are similar, right? They wrap around, but for small values, they behave kind of like the integers. You're just adding numbers, and if you don't exceed the modulus, then maybe we can exploit that somehow. That's what we're going to try and do. So the first observation is to check modular arithmetic. We just have to check integer arithmetic, because if you want to check something mod R, this is the same thing as just checking some integer relation for a quotient. And in some cases, this is actually sort of enough by itself. So depending on the relative sizes of the fields, it might be possible to do this. Like if you check that A, B, C, and Q are all small, you can just check this relation, as written here, directly in the larger field. And the reason this works is because it will never wrap around the larger field. So if all the sizes of everything are small, or provably small, then if it's equal to 0 mod p, it has to hold mod r. You can convince yourself of this, I think. It's not that strange, right? Because if something has magnitude less than p and it holds mod p, that means it's 0 mod p. And since it has magnitude less than p, the only thing that's 0 mod p with magnitude less than p is 0. And so this works sort of like up to square root of the size of the field, basically. And that's coming from the fact that we're checking a multiplication. If you were checking a degree 3 thing, then you'd want a cube root or something like that. But what if our field's not small, or our modulus is not small? Maybe it's even bigger than the prime that we're working over. Then you can't expect your encodings of the small values in the larger fields to behave well. In fact, it might not be possible at all. If your elements are larger than the field that you're encoding your relation into, you can't even commit to a big value in a single field element. So this technique, I think originally, or at least I first heard about it from Aztec, but there's this ancient theorem, the Chinese remainder theorem, that allows us to work around this problem. And so what that says is if you want to work in a big modulus, it's sufficient to work in two smaller moduli that divide the larger one, or that multiply to give the larger one, if they're co-prime. And it's sort of like both sides of that are rings, right? So you can take a value, and instead of working mod AB, you work mod A, and you work mod B. And these two rings are isomorphic. And so people call it a residue number system, and you can represent a large number, mod AB, by representing it by encoding all of its residues mod different small primes that divide AB. Here I've written it with two, but you can sort of recursively apply this equivalence to include arbitrary numbers of co-prime divisors. And so remember we were working mod P, and we had had some R and our R might be too big. But now we can pick whatever modulus we want, right? We can pick a bunch of small primes. So it's always large enough to recover the value that we're operating over. And so long as everything kind of remains small, that Q is supposed to be a product. So long as everything remains small, this works. And like before, how we didn't want to wrap around P, here we don't want to wrap around M. But here M is a free parameter. It's not fixed by the proof system. Okay. So now like a slight digression to explain the idea. Read Solomon codes are an error correcting code scheme that is ubiquitous in in snarks if you've heard of starks or fry this is you know essential to how they work and the basic idea is we have some message that we treat as a vector of dimension K and we encode it as a polynomial as the coefficients of a polynomial and then that polynomial we evaluate at more points. So remember a polynomial of degree k, or k minus 1, is determined by k points, and by evaluating it at more points, you're adding redundant information. And so, like in the original conception of Reed-Solomon codes, this was to deal with the fact that some of the information might be corrupted. So you only need, even if some of the data is maliciously altered, you can still recover the original message. Certainly given all of the points, you can recover. So given a polynomial, and we evaluate it at some set of endpoints, this is equivalent to reducing the polynomial mod this kind of degree one ideal, X minus RI. And so this insight allows us to generalize Reed-Solomon codes to other domains, ideal domains. And for example, algebraic geometry codes, which replace polynomials and evaluations with functions of some curve and divisors can be cast in this language as well and this is also how we're going to introduce CRT codes here CRT is still Chinese remainder theorem. So the idea of a CRT code is rather than encoding our message as a polynomial, we encode it as an integer. And then, rather than evaluating the polynomial at multiple points, we take the integer, mod, a bunch of small primes. And so you can think of these mod small primes as analogous to evaluating a polynomial at a point. It's not exactly the same, right? Because the ring you get from reducing mod different primes is not the same. But the ring you get from evaluating at a point is always the same. It's not really important for our purposes. Yeah, so if you have enough primes so that the product of all the primes exceeds the bound of your original sort of space of integers that you're encoding into, then even if some of the primes are wrong, in the same way that with the Reed-Solomon code you can correct errors, with the CRT code you can also correct errors. And this encoding is the same as a residue number system. So you're taking an integer, you're reducing it mod a bunch of small primes, you're keeping the residues and you can just work with those. And for those who are familiar, there's a lot of similarities here with how, you know, Starks work and other kinds of algebraic proof type stuff. Okay, so this slide I just screenshotted because it was horribly mangled by the formatting. So the sketch of the protocol is, suppose we want to verify some non-native arithmetic. So we have some system of polynomial equations, the f in different x's, which are integers, and the f's have degree d, and all the integers are bounded. So in the original version of this, we'd be checking arithmetic mods some other prime, and you can encode that as an integer relation. And so the primes are fixed in advance, and you have some M, which is larger than the kind of maximum value achievable by F. As written, it's a little wrong because you probably want to account for the additions, but it's something like that. And then you encode your integers, model the small primes, as we described, and also provide quotients for the evaluations of these functions. I guess as written, those should all be zero, so maybe there's a slight confusion there. But yeah, so you prove your encoding of all of the integers is sort of within its appropriate range, and then you choose a random subset of the primes to test the relations over. And for simplicity, assuming the primes are all like roughly the same size, then you can calculate the success probability of a dishonest prover pretty straightforwardly. You know, it's just the probability of having like some code word, right, some set of residues that does not correspond to a small integer, or that's like wrong in some position, and then, yeah, anyway. Yeah, so it's similar to how starks work. Okay. Yeah, okay. So this is the interesting stuff. As described in the previous protocol, we're testing each encoding of X is close to a small integer directly. You're just taking your set of residues and checking that it is close to a small integer or an integer within the right bound. This used to be how fry-based proofs did things, but it turns out that there's a much more efficient batch proximity test. The proximity test here is you're testing the closeness of the set of residues to its space of correct messages. And what people do now with Starks is take a random linear combination of encoded columns or encoded integers and test that the random linear combination is close to a code word. So instead of testing each one independently, you can just take a random linear combination and test that one thing. So the question is, can we do the same thing here with CRT codes? So I think so, but it's very different, right? Because in the Reed-Solomon setting you have polynomials and you're working over a field and you're taking a random linear combination over the field here we have a set of integers and we're taking a random linear combination of these integers but there's no obvious base field in the same way that there would be for polynomials to take a random linear combination over. So instead we would pick a random integer linear combination to take of the integers. And this has a lot of interesting issues. So for example, this is not sensitive to encodings of small rational numbers. If you encode it like one half, then there's like a one in two chance that a random linear combination will cause the one half to go away. And you might think, well, we could pick primes to multiply by, but that doesn't really work. So it is in some sense fundamentally different, but I think that it's possible to do this. The question is just like how good is it and if those things can be worked around. So for example, for working mod R, this is kind of fine because small rationals are also valid sort of to reduce mod R for large R, but there's lots of details to be worked out. So, yeah, just some overview of the things I haven't talked about in this talk. So I mentioned a little bit that CRT codes, the notion of distance is a little bit subtle, especially if the primes are not all, like, the same size. They can't sometimes always be, I mean, right, like every prime is of a different size, so you have to account for that. It's not like a fundamental problem, it's just a little more complicated. As well, I mentioned this decoding to rationals rather than integers, which in some applications is fine. It might not always be fine, so I have to work that out as well. And then another thing I haven't talked about, but that could actually be worth considering in practice, is all of this generalizes from integers to number fields. And in number fields, you have a sort of different situation where the first point, you actually can have primes that are like of the same size, right? If you have some prime over rationals, integers that splits over number fields, then that might yield like a more convenient thing to work over. And in some cases, this sort of works. And then you have to ask, how do you define the size of things and so on? But just more technicalities, I think. And then one other interesting thing worth considering is, so we moved to the integers, and we were no longer able to take random linear combinations over an extension of the base field. But what happens if we work over Starks with polynomials, and instead of taking a random linear combination over the base field, we took a random linear combination of columns by random polynomials, not over an extension field. In this way, it's sort of analogous to taking an integer-linear combination of integers, and it has a lot of the same problems, but it would allow us to avoid ever working with extension fields, potentially. So I think that would be an interesting direction to explore if this work ends up making sense. And that is all I have. Thank you. All right. Thank you so much, Liam. We do have a question here. Could you repeat how you calculate which primes are small enough to use for creating a CRT encoding? So I guess there's two ways that I'm not sure what the question is referring to but For encoding into like a residue number system the primes can be any primes That you you usually choose them to be small because working mod small primes is much more efficient. This question could also be talking about the example that I gave where you can simulate arithmetic mod a square root sized number in a larger field. And that is just coming from the fact that the relation that you're calculating over can't exceed the field you're operating in. So you fix some field that you're actually working over, and then if your field that you're simulating is small enough, then everything kind of just works out, as long as everything remains smaller than the characteristic of the larger field. All right, perfect. We do have a little bit of time for questions, so let's give it another 10 seconds to see if we have any questions. What is the size of size 2 to the 128 sized space. All right, perfect. Thank you so much for the amazing talk, Liam. So we will resume at 5 for our next talk.", - "eventId": "devcon-7", - "slot_start": 1731576600000, - "slot_end": 1731578400000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/15NH3bC1NnjmkyRycEK1VaWR9dgZMJsH0PJMf-OTgOyA", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "liam-eagen" - ] + "kevin-owocki" + ], + "eventId": "devcon-7", + "slot_start": 1731391200000, + "slot_end": 1731393000000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1-hdTt4ELigY4Pe3nCr4vnQFCDtQaHLB_e-UaHGdXucE", + "resources_slides": "https://drive.google.com/file/d/1xd41vnH6elifzDdtu1ulcLNd5K8lNcxT/view" }, "vector": [ 0, @@ -544438,14 +542904,8 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -544715,6 +543175,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -544924,7 +543385,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -545191,8 +543651,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -545293,6 +543751,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -545314,6 +543773,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -545391,6 +543851,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -545464,7 +543925,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -545606,6 +544066,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -545742,10 +544203,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 2, 0, @@ -545764,37 +544225,49 @@ }, { "session": { - "id": "onchain-capital-allocation-from-current-mechanisms-to-future-possbilities", - "sourceId": "BEWPLY", - "title": "Onchain Capital Allocation: From current mechanisms to future possbilities", - "description": "Capital allocation, from paying bills to complex organizational funding, often suffers from inefficiencies and lack of transparency. Web3 has the potential to revolutionize this by enabling more efficient, effective, and transparent capital distribution. By addressing coordination failures and introducing new onchain strategies, crypto could transform how society allocates resources.\r\n\r\nGitcoin founder Kevin Owocki will articulate this design space in this 20 minute talk.", - "track": "Coordination", + "id": "onchain-is-the-next-online", + "sourceId": "CXZ7UT", + "title": "Onchain is the next online", + "description": "The goal is to bring the world into a global onchain economy that increases innovation, creativity, and freedom — and that's only possible on a decentralized platform that’s super easy to use. In this talk, Jesse Pollak, Creator of Base, can share his insights on why building for simplicity is so important for the Ethereum ecosystem, and what he’s learned from building the fastest-growing L2.", + "track": "Usability", "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", - "featured": true, + "expertise": "Beginner", + "audience": "Developer", + "featured": false, "doNotRecord": false, - "keywords": [ - "Mycofi" - ], "tags": [ - "Quadratic Voting", - "Public good", - "Regenerative Applications", - "mycofi", - "Public good", - "Quadratic Voting", - "Regenerative Applications" + "Layer 2s", + "Account Abstraction", + "Paymaster", + "creators", + "Account Abstraction", + "Layer 2s" ], - "language": "en", - "speakers": [ - "kevin-owocki" + "keywords": [ + "Account Abstraction", + "Layer 2s", + "UX", + "Wallets", + "Developer Tools" ], + "duration": 1552, + "language": "en", + "sources_swarmHash": "978b7fc60439de724ea5e51af3ba22045297010a8db8f3a42e0913f9fdbcc179", + "sources_youtubeId": "olXwQyMrDqQ", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6736e6c61b0f83434d10c862", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736e62c1b0f83434d0b798d.vtt", + "transcript_text": " Tanya Cushman Reviewer Reviewer's Name Good morning, everyone. How's everyone today? Good! My name is Jeff Lau, and I'm a co-founder of ENS. Traditionally, this talk is done by Nick Johnson, but sadly, he did not make it here today out to Thailand, so instead, I will be talking to you instead. The last time I did the E&S, State of E&S talk was in 2018 at Prague. Who here was at DEF CON 4 Prague? Wow, wow. That's actually more than I thought. At that time, five years ago, we were exploring the permanent registrar and migrating from the Vickrey auction to the commit reveal registration of today. Since last time, so much has changed, and I'll be talking about some of those things today. So who here has heard of ENS? Right, that's pretty much everyone. That's good. That's good. Vitalik said this in 2024. ENS is our most successful non-financial application on Ethereum. And I'll go into a little bit of what ENS is for the very few people out there who don't know what it is. So ENS is for the very few people out there who don't know what it is. So ENS is a user experience protocol. And that means we put users first. And that has multiple meanings. We endeavor to create the best user experience to make the blockchain easier to navigate. We want our users to be in control of their identity, creating true, trustless ownership. And the protocol is built to be incredibly control of their identity, creating true, trustless ownership. And the protocol is built to be credibly neutral and permissionless, so anyone, anywhere can use ENS. ENS's humble beginning started with trying to solve one of the innate weaknesses of blockchain. Everything is a long number in hexadecimal, and it's completely unreadable. So really, how are we supposed to scale to billions of users if the first thing they do when they create a wallet is see this? ENS helps you turn that into something humans understand. A name. The simplicity of a name is familiar to all of us. As soon as we are born, we are given a name, and it forms part of our identity. Whether that large number, sometimes called a hash, represents your identity on Ethereum, a decentralized website, or something else, a name is an order of magnitude more meaningful to people. And ENS serves humans, not machines. ENS has several core principles that make our protocol user-centric. It's decentralized, meaning no single person or entity controls the levers and buttons that govern the protocol. It's permissionless, so anyone can register a name and create an identity without having to state who you are or where you're from. And lastly, the protocol itself is built to be credibly neutral. This means we're doing our best to not have codified, privileged actors in the system. Okay, definitions out of the way. Let's talk about the years so far, and we've had a really great one. If you put all of the ENS registration durations end-to-end, you'd have to start 9 million years ago. In the late Miocene epoch, Earth was a wild realm of change. Sabertooth tigers stalk sprawling grasslands while early elephants thunder across open savannas. And despite an abundance of large, sharp teeth, nobody was trying to sell Dentacoin to anyone. Some more seriously though, it illustrates we're brewing pretty damn well. Here's the basic stats. Nearly 2 million .eth names. 900,000 users, of which nearly 200,000 have set up decentralized profiles for themselves, which you can view in Ethereum follow protocol. But ENS was not only about .eth. A truer picture of ENS's popularity has to take into account the enormous number of names using one of our many, many partner integrations. And many of these are completely trustless and cost-free. Linear, Base, and Uni all have released subdomain registrars this year. Linear has completely trustless on Linear L2 using state proofs on L1 for CCIP read verification. Base has them also stored in L2 but uses signature verification. And uni.eth has them completely off chain and verified using a signature. This really shows the progressive decentralization of subdomains that can start simple and gradually be made trustless using L2s and a state proof with CCIP read. And lastly, the £1,000 Gorilla Coinbase. As of today, Coinbase reports they've issued over 12 million CB.ID names to their users. This marks the largest deployment of ENS names to end users since we launched and really demonstrating the value of rolling out ENS infrastructure to absolutely everyone. And that has really been the theme of the past year or more, bringing ENS to more people, more affordably, and with less know-how required. CCIP read and off-chain names have been the foundation of that and have led to a number of hugely successful integrations. Almost 16 million active names, more than 14 million more than .eth names that exist, around 10 million users, and over 400,000 decentralized profiles. .eth only paints a small picture of the success of ENS. But if you include all of our many partner integrations, you'll really see how wide-ranging ENS truly is. But ENS is not only about subdomains. Our success is how widely integrated we are, allowing names to be used out in the wild. Integrations are one of the most important success metrics for ENS. Imagine owning a Visa credit card, but no one accepts Visa. This is the problem we need to tackle by bringing large integrations that allow the use of ENS within their existing users, allowing ENS name resolution to exist outside of our echo chamber. And this year has been a fantastic year for integrations. And I'd like to tell you about a few of them today. But first, while not an integration specifically, I'd like to quickly highlight Gasless DNS Sec, one of our biggest technological advances in ENS to date. We've supported DNS top-level domains inside ENS almost since day one. But importing one came with a substantial fee. Gasless DNS set combines our trustless DNS import functionality with the gateway architecture of CCIP read and makes it possible to make nearly any DNS name function inside ENS without transactions or gas fees whatsoever. And one of the first to use the power of gas as DNSSEC was GoDaddy. GoDaddy added built-in support for every name registered through them, removing the usability barrier of understanding how to set up DNSSEC, and you can set up your DNS name to work in ENS using GoDaddy in just a few clicks. GoDaddy has over 80 million names registered with them, 20 million customers, and just this one integration on its own vastly improves the network effect of ENS. In collaboration with 3DNS, Dotbox is the first truly blockchain-native, fully ENS-enabled DNS TLD. Dotbox registrations are all recorded on Ethereum, and every Dotbox name is automatically ENS-enabled. It's not just the main related companies that are integrating us, though. Bitwise, a traditional finance company who released their Ethereum ETF product this year, has created ENS subnames for each of the addresses that hold the ETH back in the ETF. This is a great use case, and it hopefully makes tracking custody of unchanged assets more transparent and more auditable. Payment providers have started to adopt ENS too, most notably PayPal and Venmo. They rolled out their ENS support for their crypto wallets to all US users early this year. Venmo has 90 million US users and PayPal has over 400 million accounts globally. And this really shows how ENS is really super wide-ranging and going to millions and millions of users. And lastly, can you really say you're a successful protocol until Google integrates you? Today you can search your .eth name on Google and it will automatically show you your balance and your resolved address. Google is the gateway to the internet, and ENS has become important enough to be integrated into the most used search engine. So what do these large integrations bring to ENS? Well, subdomains means more users, more people using ENS for the first time, and broadening the social network ENS creates. Large integrations like PayPal, Venmo, and Google means there are more places for ENS to be used, making ENS fundamentally more useful. In the same thing, we've also been pushing forward L2 UX with some EIPs in the pipelines, the first of which is ERC-7785, that will enshrine ENS to be the namespace to allow discovery of new L2s. In the future, we hope to push further EIPs to help the fractionalization of L2 addresses and address this problem head-on. ENS works on these problems because, of course, selfishly it helps ENS proliferate. But selfishly, it also helps the entire Ethereum ecosystem as well. Born out of Ethereum, we as ENS have never forgotten our roots, all the values that have come out of them. Ethereum is not just in our name, but in the DNA of E&S itself. For seven years, primary names have only existed on L1, which limits their use to people who are willing to pay Ethereum gas fees. Being able to set a primary name on any chain, effectively for free, will be a huge unlock for the Web3 UX. And this is the first example of a more for free, will be a huge unlock for the Web3UX. And this is the first example of a more flexible contract that will be seen to be broadly in ENS v2, which was announced in May, and marks the first time that any ENS contract will be officially deployed to L2. And I'm proud to announce that primary names are coming to L2 and are available on testnets today. Reverse registrars, the smart contracts that power L2 primary names, are live today on a handful of testnets and should be coming to mainnet by the end of this year. And to be clear, there's nothing for developers, non-developers to do this time, but if you're a developer, you can go to testnets today and play around. Now, things have really changed in the last seven years. I mean, look at our old 2017 logo. L2s are the norm now, and smart contract accounts are commonplace. Gas prices are fluctuating, and it's not sustainable to stay on L1 anymore. In May, we talked about moving to L2, driven by several key reasons. The first is reducing the overall cost of interacting with ENS. The second is reducing the number of transactions in all possible situations. And the last is we want to let all users transact from the chain of their choice without the need for on and off ramps that can make L2 so painful. With these UX goals in mind, we are aiming to make a true improvement to ENS UX without the usual trade-offs. With that, on Monday at Friends Day, we announced our plans to build our own chain, which we're calling Namechain. Namechain is built on a few core things that are really important to us ENS. Today, ZK EVMs are the only way to get transaction finality in a reasonable amount of time. And because ENS is one of the only applications that requires reading state from L1, we need the fastest finality we can to ensure that everything can be read from other L2s. ZK EVMs use ZK proofs to prove the correctness of a state transition on an L2 and commit this to L1. The ENS resolution process can then use CCIP read to verify the data from name chain. Contrasting this to an optimistic rollup, the industry standard would require us to wait seven days before our name can be in use. The second is, of course, the stack must be complete open source, allowing it to be easily audited, edited, and forked if required. And lastly, we care about the credit neutrality and decentralization of ENS, and maintaining that is a priority. We're exploring whether shared sequencing or base sequencing makes sense for us at NameChain. As for decentralization, it is non-negotiable for us. We're going to be building L2 to L2 bridging right into the protocol. So you can do things like commit whilst you bridge and pay from your preferred L2. This will allow you to start your ENS journey from any L2, lowering the barrier to entry when you buy your first name. And this is one of the primary reasons we want to launch our own chain versus deploying to another public chain. Being able to control the entire stack from protocol to governance means that this roll-up is here to serve ENS and naming. This is super important to us as we at ENS are committed to being L2 agnostic. Name chain is the L2 we're building. But we're not just building an L2. ENS v2 is a complete ground-up redesign of ENS. And only one component of that is having a dedicated L2. It's designed to be more flexible, more accessible, and more affordable. So we continue the momentum started with CCPI read and off-chain names and expand its utility to every user and application that needs it. So how are we going to do that? Well, first, we've redefined how names are registered and recorded with a completely redesigned registry. Second, .eth names will be recorded and by default hosted on our dedicated L2 name chain. And finally, a user-driven migration with backwards compatibility. And let's explore these in a bit more detail. .eth on name chain means that all registration renewals will no longer live on Ethereum L1, but instead on name chain. Storing it on name chain means that all registration renewals will no longer live on Ethereum L1, but instead on name chain. Storing it on name chain means that registration renewal gas will be significantly lower. Although Ethereum has its conveniences, it's not a long-term solution for gas prices on L1. Having registration on name chain means we can ensure registration gas costs are as close to zero as possible. However, even though .ethnames will move to name chain, name resolution largely remains untouched and begins from Ethereum itself. Let me reiterate that. ENS resolution, reading from ENS names, remains anchored on L1. And that is a fundamental difference between ENS and other protocols moving to L2. We do not fractionalize our protocol. We connect it together and remain as one. And this is how we remain L2 agnostic. L2 support was a consideration for the beginning of the design process of V2. And that means we can also deploy the V2 contracts to L2 networks. Combined with CCIP read, this makes it possible to seamlessly stitch together a coherent namespace from multiple distinct networks and L2s. This architecture reinforces how we are staying L2 agnostic, beginning everything from L1, but only moving .eth to name chain while supporting ENS on other L2s. Okay, let's talk about the complete technical rewrite of supporting ENS on other L2s. OK, let's talk about the complete technical rewrite of the ENS registry. Registries are contracts that store data about that name. The current registry stores the owner and resolver records, and currently all names are stored in a single flat registry. This design is straightforward, but also has a couple of significant drawbacks, one of which that it's difficult to find custom rules for name issuance and ownership. Another is that because each name exists independently in the registry, when a name is transferred or deleted, all subnames remain unchanged. We're solving both of these problems while adding more flexibility to the system as a whole by introducing a new registry design in ENS v2. In v2, each name optionally has its own registry which deals exclusively with its subdomains. There's a root registry which contains all the top-level names. Each top-level name has a registry containing all the second-level names such as nick.eth and so forth. Using a hierarchy of registries has a number of advantages. Anyone who owns a name can now supply the registry implementation of their choice, giving full control on how subnames are issued and controlled. This allows all the functionality of the name wrapper and more, whilst preserving the flexibility for name owners to set their own rules, ownership, and even resolution. When a name changes hands, or if an owner wants to start from scratch, they can easily replace the registry from the name entirely, erasing all existing subnames, allowing a very easy, fuss-free way of clearing up the hierarchy of your name. Likewise, because each registry exists independently, we can even insert registries at multiple places in the ENS hierarchy. When you're ready to move from a subname to an ethname of your own, you can bring everything with you while still ensuring your old name functions as it did before. In line with ENS's core principles, moving to v2 is an entirely user-driven and opt-in process. All registrations will be transferred to the new L2 chain, because by its nature, a registry has to be stored all in one location. But the names themselves, their records, and subnames, however, can continue to exist on Ethereum or any other L2 or off-chain storage solution. After launch, users can migrate their names to v2 with a single transaction. This will move the name from the legacy flat ENS registry to the new hierarchical one. At the same time, they can choose to either leave the name on L1 or migrate it to name chain. And names that aren't migrated will continue to function indefinitely. The new ENS contracts are programmed to automatically fall back to looking up names in the version 1 registry. This means that for names that are owned by immutable contracts or for users that can't or won't upgrade, functionality of their name is safeguarded forever. It's been seven years since the inception of ENS. Google, Venmo, PayPal, GoDaddy. Wow, we couldn't have imagined seven years ago that we would get so far. And ENS is Ethereum-aligned and always will be. That's why we always enjoy coming and giving the state of ENS to DEF CON every single year since DEF CON 1 or 2. And L2 reverse registrars on testnet today, it's something we've been working on for a little while now, and we're really trying to bring the interop into Ethereum, and that's really been the theme for this DEF CON. And ENS is expanding to name chain. But remaining on L1 whilst remaining L2 agnostic. And ENS v2 will improve the UX of ENS across the board for years to come. And lastly, this talk was too long to mention some of the amazing ENS DAO service providers this year. We're not the only ones building ENS. And it would be impossible to expand to a billion users without help. To name a few, first, the Ethereum Follow protocol is bringing the social profiles of ENS to the masses. NameStone is powering many of the subdomain integrations and Unruggable for building the generalized gateways that are powering CCIP read. And of course, my ENS Labs team for building everything that we've seen today and supporting us today and for years to come. And finally, the adventure is not over. If you're looking for a position at ENS, we're always looking for good people. So if you'd like to work for ENS Labs, please check enslabs.org for job postings. Thank you and enjoy your last day at DEF CON. Thank you so much, Jeff, for that. I think that was fantastic. So as you can see here, if you've already asked your questions, that is great. What you can do is you can also upvote a question. So if you really, really want to have it answered, it can move up to the top of the question. So we're going to start off with the first one. What about privacy? Using ENS is a major privacy leak and can dox a user. Sure. I mean, this is a question we get a lot for us at ENS is a usability protocol on top of Ethereum and L2s. And Ethereum and L2s have this issue regardless of ENS. So obviously you need to practice good OPSEC and make sure your address is not linked and do the things that you do to protect yourself with addresses. Having a name on it doesn't additionally create less privacy. So you obviously need to create that good OPSEC no matter whether you're using ENS or not. All right. And then we've got Google integration supports .eth domains, but not the corresponding subdomains. Why is that? I mean, if anyone from Google here can do this, it would be fantastic if they could. Obviously .eth is our flagship product. Google really respects that and wants to do some integrations and starting with .eth. But hopefully in the future, we can really push them to do self-domains as well. And if anyone has connections at Google, please talk to us and we'll try and get that in. Okay, remember you can still add some more questions if you feel like none of the ones that you have are on here. We've got, can builders use name chain for other stuff? Yeah, so as I said, name chain will be on a ZK EVM most likely and that means it's EVM compatible, so it's going to be a public chain that anyone can use. We're not going to be permissioning the chain so you can deploy your own resolvers. All right. Then at the top here we've got, what are some of the problems that stand in our way to increase mainstream adoption of ENS? I mean, I think this applies to all of Ethereum and all of blockchain itself. It's really about getting... improving our wallet user experience, having names and all of these things to replace addresses. And once you do that, you can get more blockchain and Ethereum support across the board, in your cafes, in your paying for things day to day. And then it'll be very easy to kind of improve the mainstream adoption of ENS. I believe already ENS is quite widely supported across blockchain. So it's really about expanding the entire ecosystem itself so ENS can serve a larger ecosystem. All right. Can Namechain be Keystore roll-up as well? It is something we talked about, and I don't think it's something we can rule out. I think a Keystore roll-up would be absolutely great. But obviously there are technical difficulties to the Keystore roll rollup as well. So if we can solve them for other chains, I'm sure we can do it for Namechain as well. All right, we got one that just got upvoted quite hard. How will the ENS token be integrated with Namechain? That is something up for debate right now. I believe the token is currently on L1, and for now it will stay on L1. Obviously, this is discussions about where we would put our DAO and governance contracts, but yeah, that's still up for discussion. And then a few more questions here. How does ENS compare with unstoppable domains? What do you guys think? How does ENS compare with unstoppable domains? I think we're doing pretty well. I feel like that one was definitely a trap. The next question, when will the ENS manager app support EFP? Oh, we'll have to talk about that, Brantley. All right. What has been the greatest challenge for ENS? The greatest challenge for ENS? Oh, there are many. I think it probably starts many, many years ago, and when people didn't really believe that we were a project worth supporting, but I think we've overcome that now, and it's really like a non-starter if your application doesn't support ENS names. So I think just overcoming that just shows we're here to stay, and we're here to help all of you builders out here.", "eventId": "devcon-7", - "slot_start": 1731391200000, - "slot_end": 1731393000000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1-hdTt4ELigY4Pe3nCr4vnQFCDtQaHLB_e-UaHGdXucE" + "slot_start": 1731641400000, + "slot_end": 1731643200000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1-gQZPtDYukgyGQgCLVng3phznkejfM-uJlR1MDiF-MQ", + "resources_slides": "https://drive.google.com/file/d/1AvEAd1lBJUlv5pYg3fJwNBUomazD-31r/view", + "speakers": [ + "jesse-pollak" + ] }, "vector": [ 0, @@ -545805,9 +544278,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -546079,7 +544549,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -546297,6 +544766,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -546599,6 +545069,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -546613,6 +545084,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -546657,7 +545129,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -546679,7 +545150,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -546757,7 +545227,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -546973,8 +545442,9 @@ 0, 0, 0, - 2, 0, + 2, + 2, 0, 0, 0, @@ -547115,7 +545585,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -547127,54 +545596,46 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "onchain-is-the-next-online", - "sourceId": "CXZ7UT", - "title": "Onchain is the next online", - "description": "The goal is to bring the world into a global onchain economy that increases innovation, creativity, and freedom — and that's only possible on a decentralized platform that’s super easy to use. In this talk, Jesse Pollak, Creator of Base, can share his insights on why building for simplicity is so important for the Ethereum ecosystem, and what he’s learned from building the fastest-growing L2.", + "id": "open-challenges-in-mini-apps-and-frames", + "sourceId": "TZDRPY", + "title": "Open challenges in Mini-apps and Frames", + "description": "There are a number of open challenges we've run into with trying to make interoperable mini-apps work at Open Frames. I'll run through some of them and what I think it'll take to get great UX via Mini-apps.", "track": "Usability", - "type": "Talk", + "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Developer", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", - "Account Abstraction", - "Paymaster", - "creators", - "Account Abstraction", - "Layer 2s" + "Social", + "UI/UX", + "frames", + "Social", + "UI/UX" ], "keywords": [ - "Account Abstraction", - "Layer 2s", - "UX", - "Wallets", - "Developer Tools" + "frames" ], - "duration": 1552, + "duration": 480, "language": "en", "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_youtubeId": "-LlOzt951z8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736e6c61b0f83434d10c862", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736e62c1b0f83434d0b798d.vtt", - "transcript_text": " Tanya Cushman Reviewer Reviewer's Name Good morning, everyone. How's everyone today? Good! My name is Jeff Lau, and I'm a co-founder of ENS. Traditionally, this talk is done by Nick Johnson, but sadly, he did not make it here today out to Thailand, so instead, I will be talking to you instead. The last time I did the E&S, State of E&S talk was in 2018 at Prague. Who here was at DEF CON 4 Prague? Wow, wow. That's actually more than I thought. At that time, five years ago, we were exploring the permanent registrar and migrating from the Vickrey auction to the commit reveal registration of today. Since last time, so much has changed, and I'll be talking about some of those things today. So who here has heard of ENS? Right, that's pretty much everyone. That's good. That's good. Vitalik said this in 2024. ENS is our most successful non-financial application on Ethereum. And I'll go into a little bit of what ENS is for the very few people out there who don't know what it is. So ENS is for the very few people out there who don't know what it is. So ENS is a user experience protocol. And that means we put users first. And that has multiple meanings. We endeavor to create the best user experience to make the blockchain easier to navigate. We want our users to be in control of their identity, creating true, trustless ownership. And the protocol is built to be incredibly control of their identity, creating true, trustless ownership. And the protocol is built to be credibly neutral and permissionless, so anyone, anywhere can use ENS. ENS's humble beginning started with trying to solve one of the innate weaknesses of blockchain. Everything is a long number in hexadecimal, and it's completely unreadable. So really, how are we supposed to scale to billions of users if the first thing they do when they create a wallet is see this? ENS helps you turn that into something humans understand. A name. The simplicity of a name is familiar to all of us. As soon as we are born, we are given a name, and it forms part of our identity. Whether that large number, sometimes called a hash, represents your identity on Ethereum, a decentralized website, or something else, a name is an order of magnitude more meaningful to people. And ENS serves humans, not machines. ENS has several core principles that make our protocol user-centric. It's decentralized, meaning no single person or entity controls the levers and buttons that govern the protocol. It's permissionless, so anyone can register a name and create an identity without having to state who you are or where you're from. And lastly, the protocol itself is built to be credibly neutral. This means we're doing our best to not have codified, privileged actors in the system. Okay, definitions out of the way. Let's talk about the years so far, and we've had a really great one. If you put all of the ENS registration durations end-to-end, you'd have to start 9 million years ago. In the late Miocene epoch, Earth was a wild realm of change. Sabertooth tigers stalk sprawling grasslands while early elephants thunder across open savannas. And despite an abundance of large, sharp teeth, nobody was trying to sell Dentacoin to anyone. Some more seriously though, it illustrates we're brewing pretty damn well. Here's the basic stats. Nearly 2 million .eth names. 900,000 users, of which nearly 200,000 have set up decentralized profiles for themselves, which you can view in Ethereum follow protocol. But ENS was not only about .eth. A truer picture of ENS's popularity has to take into account the enormous number of names using one of our many, many partner integrations. And many of these are completely trustless and cost-free. Linear, Base, and Uni all have released subdomain registrars this year. Linear has completely trustless on Linear L2 using state proofs on L1 for CCIP read verification. Base has them also stored in L2 but uses signature verification. And uni.eth has them completely off chain and verified using a signature. This really shows the progressive decentralization of subdomains that can start simple and gradually be made trustless using L2s and a state proof with CCIP read. And lastly, the £1,000 Gorilla Coinbase. As of today, Coinbase reports they've issued over 12 million CB.ID names to their users. This marks the largest deployment of ENS names to end users since we launched and really demonstrating the value of rolling out ENS infrastructure to absolutely everyone. And that has really been the theme of the past year or more, bringing ENS to more people, more affordably, and with less know-how required. CCIP read and off-chain names have been the foundation of that and have led to a number of hugely successful integrations. Almost 16 million active names, more than 14 million more than .eth names that exist, around 10 million users, and over 400,000 decentralized profiles. .eth only paints a small picture of the success of ENS. But if you include all of our many partner integrations, you'll really see how wide-ranging ENS truly is. But ENS is not only about subdomains. Our success is how widely integrated we are, allowing names to be used out in the wild. Integrations are one of the most important success metrics for ENS. Imagine owning a Visa credit card, but no one accepts Visa. This is the problem we need to tackle by bringing large integrations that allow the use of ENS within their existing users, allowing ENS name resolution to exist outside of our echo chamber. And this year has been a fantastic year for integrations. And I'd like to tell you about a few of them today. But first, while not an integration specifically, I'd like to quickly highlight Gasless DNS Sec, one of our biggest technological advances in ENS to date. We've supported DNS top-level domains inside ENS almost since day one. But importing one came with a substantial fee. Gasless DNS set combines our trustless DNS import functionality with the gateway architecture of CCIP read and makes it possible to make nearly any DNS name function inside ENS without transactions or gas fees whatsoever. And one of the first to use the power of gas as DNSSEC was GoDaddy. GoDaddy added built-in support for every name registered through them, removing the usability barrier of understanding how to set up DNSSEC, and you can set up your DNS name to work in ENS using GoDaddy in just a few clicks. GoDaddy has over 80 million names registered with them, 20 million customers, and just this one integration on its own vastly improves the network effect of ENS. In collaboration with 3DNS, Dotbox is the first truly blockchain-native, fully ENS-enabled DNS TLD. Dotbox registrations are all recorded on Ethereum, and every Dotbox name is automatically ENS-enabled. It's not just the main related companies that are integrating us, though. Bitwise, a traditional finance company who released their Ethereum ETF product this year, has created ENS subnames for each of the addresses that hold the ETH back in the ETF. This is a great use case, and it hopefully makes tracking custody of unchanged assets more transparent and more auditable. Payment providers have started to adopt ENS too, most notably PayPal and Venmo. They rolled out their ENS support for their crypto wallets to all US users early this year. Venmo has 90 million US users and PayPal has over 400 million accounts globally. And this really shows how ENS is really super wide-ranging and going to millions and millions of users. And lastly, can you really say you're a successful protocol until Google integrates you? Today you can search your .eth name on Google and it will automatically show you your balance and your resolved address. Google is the gateway to the internet, and ENS has become important enough to be integrated into the most used search engine. So what do these large integrations bring to ENS? Well, subdomains means more users, more people using ENS for the first time, and broadening the social network ENS creates. Large integrations like PayPal, Venmo, and Google means there are more places for ENS to be used, making ENS fundamentally more useful. In the same thing, we've also been pushing forward L2 UX with some EIPs in the pipelines, the first of which is ERC-7785, that will enshrine ENS to be the namespace to allow discovery of new L2s. In the future, we hope to push further EIPs to help the fractionalization of L2 addresses and address this problem head-on. ENS works on these problems because, of course, selfishly it helps ENS proliferate. But selfishly, it also helps the entire Ethereum ecosystem as well. Born out of Ethereum, we as ENS have never forgotten our roots, all the values that have come out of them. Ethereum is not just in our name, but in the DNA of E&S itself. For seven years, primary names have only existed on L1, which limits their use to people who are willing to pay Ethereum gas fees. Being able to set a primary name on any chain, effectively for free, will be a huge unlock for the Web3 UX. And this is the first example of a more for free, will be a huge unlock for the Web3UX. And this is the first example of a more flexible contract that will be seen to be broadly in ENS v2, which was announced in May, and marks the first time that any ENS contract will be officially deployed to L2. And I'm proud to announce that primary names are coming to L2 and are available on testnets today. Reverse registrars, the smart contracts that power L2 primary names, are live today on a handful of testnets and should be coming to mainnet by the end of this year. And to be clear, there's nothing for developers, non-developers to do this time, but if you're a developer, you can go to testnets today and play around. Now, things have really changed in the last seven years. I mean, look at our old 2017 logo. L2s are the norm now, and smart contract accounts are commonplace. Gas prices are fluctuating, and it's not sustainable to stay on L1 anymore. In May, we talked about moving to L2, driven by several key reasons. The first is reducing the overall cost of interacting with ENS. The second is reducing the number of transactions in all possible situations. And the last is we want to let all users transact from the chain of their choice without the need for on and off ramps that can make L2 so painful. With these UX goals in mind, we are aiming to make a true improvement to ENS UX without the usual trade-offs. With that, on Monday at Friends Day, we announced our plans to build our own chain, which we're calling Namechain. Namechain is built on a few core things that are really important to us ENS. Today, ZK EVMs are the only way to get transaction finality in a reasonable amount of time. And because ENS is one of the only applications that requires reading state from L1, we need the fastest finality we can to ensure that everything can be read from other L2s. ZK EVMs use ZK proofs to prove the correctness of a state transition on an L2 and commit this to L1. The ENS resolution process can then use CCIP read to verify the data from name chain. Contrasting this to an optimistic rollup, the industry standard would require us to wait seven days before our name can be in use. The second is, of course, the stack must be complete open source, allowing it to be easily audited, edited, and forked if required. And lastly, we care about the credit neutrality and decentralization of ENS, and maintaining that is a priority. We're exploring whether shared sequencing or base sequencing makes sense for us at NameChain. As for decentralization, it is non-negotiable for us. We're going to be building L2 to L2 bridging right into the protocol. So you can do things like commit whilst you bridge and pay from your preferred L2. This will allow you to start your ENS journey from any L2, lowering the barrier to entry when you buy your first name. And this is one of the primary reasons we want to launch our own chain versus deploying to another public chain. Being able to control the entire stack from protocol to governance means that this roll-up is here to serve ENS and naming. This is super important to us as we at ENS are committed to being L2 agnostic. Name chain is the L2 we're building. But we're not just building an L2. ENS v2 is a complete ground-up redesign of ENS. And only one component of that is having a dedicated L2. It's designed to be more flexible, more accessible, and more affordable. So we continue the momentum started with CCPI read and off-chain names and expand its utility to every user and application that needs it. So how are we going to do that? Well, first, we've redefined how names are registered and recorded with a completely redesigned registry. Second, .eth names will be recorded and by default hosted on our dedicated L2 name chain. And finally, a user-driven migration with backwards compatibility. And let's explore these in a bit more detail. .eth on name chain means that all registration renewals will no longer live on Ethereum L1, but instead on name chain. Storing it on name chain means that all registration renewals will no longer live on Ethereum L1, but instead on name chain. Storing it on name chain means that registration renewal gas will be significantly lower. Although Ethereum has its conveniences, it's not a long-term solution for gas prices on L1. Having registration on name chain means we can ensure registration gas costs are as close to zero as possible. However, even though .ethnames will move to name chain, name resolution largely remains untouched and begins from Ethereum itself. Let me reiterate that. ENS resolution, reading from ENS names, remains anchored on L1. And that is a fundamental difference between ENS and other protocols moving to L2. We do not fractionalize our protocol. We connect it together and remain as one. And this is how we remain L2 agnostic. L2 support was a consideration for the beginning of the design process of V2. And that means we can also deploy the V2 contracts to L2 networks. Combined with CCIP read, this makes it possible to seamlessly stitch together a coherent namespace from multiple distinct networks and L2s. This architecture reinforces how we are staying L2 agnostic, beginning everything from L1, but only moving .eth to name chain while supporting ENS on other L2s. Okay, let's talk about the complete technical rewrite of supporting ENS on other L2s. OK, let's talk about the complete technical rewrite of the ENS registry. Registries are contracts that store data about that name. The current registry stores the owner and resolver records, and currently all names are stored in a single flat registry. This design is straightforward, but also has a couple of significant drawbacks, one of which that it's difficult to find custom rules for name issuance and ownership. Another is that because each name exists independently in the registry, when a name is transferred or deleted, all subnames remain unchanged. We're solving both of these problems while adding more flexibility to the system as a whole by introducing a new registry design in ENS v2. In v2, each name optionally has its own registry which deals exclusively with its subdomains. There's a root registry which contains all the top-level names. Each top-level name has a registry containing all the second-level names such as nick.eth and so forth. Using a hierarchy of registries has a number of advantages. Anyone who owns a name can now supply the registry implementation of their choice, giving full control on how subnames are issued and controlled. This allows all the functionality of the name wrapper and more, whilst preserving the flexibility for name owners to set their own rules, ownership, and even resolution. When a name changes hands, or if an owner wants to start from scratch, they can easily replace the registry from the name entirely, erasing all existing subnames, allowing a very easy, fuss-free way of clearing up the hierarchy of your name. Likewise, because each registry exists independently, we can even insert registries at multiple places in the ENS hierarchy. When you're ready to move from a subname to an ethname of your own, you can bring everything with you while still ensuring your old name functions as it did before. In line with ENS's core principles, moving to v2 is an entirely user-driven and opt-in process. All registrations will be transferred to the new L2 chain, because by its nature, a registry has to be stored all in one location. But the names themselves, their records, and subnames, however, can continue to exist on Ethereum or any other L2 or off-chain storage solution. After launch, users can migrate their names to v2 with a single transaction. This will move the name from the legacy flat ENS registry to the new hierarchical one. At the same time, they can choose to either leave the name on L1 or migrate it to name chain. And names that aren't migrated will continue to function indefinitely. The new ENS contracts are programmed to automatically fall back to looking up names in the version 1 registry. This means that for names that are owned by immutable contracts or for users that can't or won't upgrade, functionality of their name is safeguarded forever. It's been seven years since the inception of ENS. Google, Venmo, PayPal, GoDaddy. Wow, we couldn't have imagined seven years ago that we would get so far. And ENS is Ethereum-aligned and always will be. That's why we always enjoy coming and giving the state of ENS to DEF CON every single year since DEF CON 1 or 2. And L2 reverse registrars on testnet today, it's something we've been working on for a little while now, and we're really trying to bring the interop into Ethereum, and that's really been the theme for this DEF CON. And ENS is expanding to name chain. But remaining on L1 whilst remaining L2 agnostic. And ENS v2 will improve the UX of ENS across the board for years to come. And lastly, this talk was too long to mention some of the amazing ENS DAO service providers this year. We're not the only ones building ENS. And it would be impossible to expand to a billion users without help. To name a few, first, the Ethereum Follow protocol is bringing the social profiles of ENS to the masses. NameStone is powering many of the subdomain integrations and Unruggable for building the generalized gateways that are powering CCIP read. And of course, my ENS Labs team for building everything that we've seen today and supporting us today and for years to come. And finally, the adventure is not over. If you're looking for a position at ENS, we're always looking for good people. So if you'd like to work for ENS Labs, please check enslabs.org for job postings. Thank you and enjoy your last day at DEF CON. Thank you so much, Jeff, for that. I think that was fantastic. So as you can see here, if you've already asked your questions, that is great. What you can do is you can also upvote a question. So if you really, really want to have it answered, it can move up to the top of the question. So we're going to start off with the first one. What about privacy? Using ENS is a major privacy leak and can dox a user. Sure. I mean, this is a question we get a lot for us at ENS is a usability protocol on top of Ethereum and L2s. And Ethereum and L2s have this issue regardless of ENS. So obviously you need to practice good OPSEC and make sure your address is not linked and do the things that you do to protect yourself with addresses. Having a name on it doesn't additionally create less privacy. So you obviously need to create that good OPSEC no matter whether you're using ENS or not. All right. And then we've got Google integration supports .eth domains, but not the corresponding subdomains. Why is that? I mean, if anyone from Google here can do this, it would be fantastic if they could. Obviously .eth is our flagship product. Google really respects that and wants to do some integrations and starting with .eth. But hopefully in the future, we can really push them to do self-domains as well. And if anyone has connections at Google, please talk to us and we'll try and get that in. Okay, remember you can still add some more questions if you feel like none of the ones that you have are on here. We've got, can builders use name chain for other stuff? Yeah, so as I said, name chain will be on a ZK EVM most likely and that means it's EVM compatible, so it's going to be a public chain that anyone can use. We're not going to be permissioning the chain so you can deploy your own resolvers. All right. Then at the top here we've got, what are some of the problems that stand in our way to increase mainstream adoption of ENS? I mean, I think this applies to all of Ethereum and all of blockchain itself. It's really about getting... improving our wallet user experience, having names and all of these things to replace addresses. And once you do that, you can get more blockchain and Ethereum support across the board, in your cafes, in your paying for things day to day. And then it'll be very easy to kind of improve the mainstream adoption of ENS. I believe already ENS is quite widely supported across blockchain. So it's really about expanding the entire ecosystem itself so ENS can serve a larger ecosystem. All right. Can Namechain be Keystore roll-up as well? It is something we talked about, and I don't think it's something we can rule out. I think a Keystore roll-up would be absolutely great. But obviously there are technical difficulties to the Keystore roll rollup as well. So if we can solve them for other chains, I'm sure we can do it for Namechain as well. All right, we got one that just got upvoted quite hard. How will the ENS token be integrated with Namechain? That is something up for debate right now. I believe the token is currently on L1, and for now it will stay on L1. Obviously, this is discussions about where we would put our DAO and governance contracts, but yeah, that's still up for discussion. And then a few more questions here. How does ENS compare with unstoppable domains? What do you guys think? How does ENS compare with unstoppable domains? I think we're doing pretty well. I feel like that one was definitely a trap. The next question, when will the ENS manager app support EFP? Oh, we'll have to talk about that, Brantley. All right. What has been the greatest challenge for ENS? The greatest challenge for ENS? Oh, there are many. I think it probably starts many, many years ago, and when people didn't really believe that we were a project worth supporting, but I think we've overcome that now, and it's really like a non-starter if your application doesn't support ENS names. So I think just overcoming that just shows we're here to stay, and we're here to help all of you builders out here.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731641400000, - "slot_end": 1731643200000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1-gQZPtDYukgyGQgCLVng3phznkejfM-uJlR1MDiF-MQ", - "resources_slides": null, + "slot_start": 1731400200000, + "slot_end": 1731400800000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/10NeCTKHHZ_IznsD0BVvBmKLhLozti5XPFkZHUhhk45M", + "resources_slides": "https://drive.google.com/file/d/1MA6kr1rqx0MMROyZjFUwScQiP_APVOfK/view", "speakers": [ - "jesse-pollak" + "david-furlong" ] }, "vector": [ @@ -547980,16 +546441,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -548116,6 +546567,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -548355,34 +546807,35 @@ 0, 0, 0, - 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -548494,7 +546947,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -548504,6 +546956,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -548514,45 +546970,40 @@ }, { "session": { - "id": "open-challenges-in-mini-apps-and-frames", - "sourceId": "TZDRPY", - "title": "Open challenges in Mini-apps and Frames", - "description": "There are a number of open challenges we've run into with trying to make interoperable mini-apps work at Open Frames. I'll run through some of them and what I think it'll take to get great UX via Mini-apps.", - "track": "Usability", + "id": "open-decentralized-ai", + "sourceId": "WDMSDF", + "title": "Open + Decentralized AI", + "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Social", - "UI/UX", - "frames", - "Social", - "UI/UX" - ], - "keywords": [ - "frames" - ], - "duration": 480, + "tags": [], + "keywords": [], + "duration": 25, "language": "en", - "sources_swarmHash": "e0544223cf89ff9bbe7b382237527d59d6ad4ad2e377f957869ce72df0c49fbe", - "sources_youtubeId": "xoMfU9Gk0xc", + "sources_swarmHash": "e66f035630ef4fca4408ce06903e5ad43ca226ed677f0113ab4083ad4d808fa3", + "sources_youtubeId": "MuJtFwtH5-A", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6735d8799dbb7a90e1dbbd8a", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735d8799dbb7a90e1dbbd8a.vtt", + "transcript_text": " So when we think about open and decentralized, I think, you know, you've got your models, not your mind. I'd like to think that that's the equivalent of not your keys, not your crypto. I think open models will run the world. It will be the infrastructure upon which our societies run. And I think, you know, let's make them awesome and let's think about leveraging all this technology and innovation we're thinking about to do that. Because I think open source and decentralized AI has a massive advantage over these centralized ones.", "eventId": "devcon-7", - "slot_start": 1731400200000, - "slot_end": 1731400800000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/10NeCTKHHZ_IznsD0BVvBmKLhLozti5XPFkZHUhhk45M", - "resources_slides": null, + "slot_start": 1731580800000, + "slot_end": 1731581400000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/185D2a1dcM0Mnygg246mzs0j_kcxYkRpeWxUnWh_d0cs", + "resources_slides": "https://drive.google.com/file/d/11kckQWAIdqgnPw4CnF1eXJhpaRNgd-BA/view", "speakers": [ - "david-furlong" + "emad-mostaque" ] }, "vector": [ 0, + 6, 0, 0, 0, @@ -548560,7 +547011,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -549362,7 +547812,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -549483,7 +547932,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -549731,8 +548179,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -549866,6 +548312,7 @@ 0, 0, 0, + 2, 0, 0, 2, @@ -549874,8 +548321,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -549888,43 +548333,28 @@ }, { "session": { - "id": "open-decentralized-ai", - "sourceId": "WDMSDF", - "title": "Open + Decentralized AI", - "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", + "id": "open-source-orchestra-coffee-shop-welcome", + "sourceId": "RKELBQ", + "title": "Open-Source Orchestra coffee shop welcome", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], "keywords": [], - "duration": 25, + "tags": [], "language": "en", - "sources_swarmHash": "e66f035630ef4fca4408ce06903e5ad43ca226ed677f0113ab4083ad4d808fa3", - "sources_youtubeId": "MuJtFwtH5-A", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6735d8799dbb7a90e1dbbd8a", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735d8799dbb7a90e1dbbd8a.vtt", - "transcript_text": " So when we think about open and decentralized, I think, you know, you've got your models, not your mind. I'd like to think that that's the equivalent of not your keys, not your crypto. I think open models will run the world. It will be the infrastructure upon which our societies run. And I think, you know, let's make them awesome and let's think about leveraging all this technology and innovation we're thinking about to do that. Because I think open source and decentralized AI has a massive advantage over these centralized ones.", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731580800000, - "slot_end": 1731581400000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/185D2a1dcM0Mnygg246mzs0j_kcxYkRpeWxUnWh_d0cs", - "resources_slides": null, - "speakers": [ - "emad-mostaque" - ] + "slot_start": 1731636000000, + "slot_end": 1731639600000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1DTTbLibZzh-i4lar_fk3TZfYIUaEw5RUPBHEEHYhGG0", + "resources_slides": "" }, "vector": [ - 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -549934,6 +548364,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -550419,7 +548850,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -551256,25 +549686,35 @@ }, { "session": { - "id": "open-source-orchestra-coffee-shop-welcome", - "sourceId": "RKELBQ", - "title": "Open-Source Orchestra coffee shop welcome", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "id": "open-source-orchestra-zukaraoke-ktv", + "sourceId": "JBCULT", + "title": "Open Source Orchestra - ZuKaraoke KTV", + "description": "OSO brings karaoke to Devcon!", "track": "Entertainment", "type": "Music", - "expertise": "", - "audience": "Engineering", + "expertise": "Beginner", + "audience": "Hobby", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "keywords": [ + "Music", + "Karaoke" + ], + "tags": [ + "Art", + "Free Speech", + "Social" + ], "language": "en", - "speakers": [], + "speakers": [ + "veronica" + ], "eventId": "devcon-7", - "slot_start": 1731636000000, - "slot_end": 1731639600000, + "slot_start": 1731562200000, + "slot_end": 1731564000000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1DTTbLibZzh-i4lar_fk3TZfYIUaEw5RUPBHEEHYhGG0" + "resources_presentation": "https://docs.google.com/presentation/d/1LRNlRRa-nWIkUZN0OzhHcccD4YwYKnOZT21n-IMTU0Q", + "resources_slides": "" }, "vector": [ 0, @@ -551776,6 +550216,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -552051,6 +550492,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -552131,6 +550573,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -552202,14 +550645,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -552595,14 +551031,13 @@ 2, 0, 0, - 2, - 0, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -552613,34 +551048,40 @@ }, { "session": { - "id": "open-source-orchestra-zukaraoke-ktv", - "sourceId": "JBCULT", - "title": "Open Source Orchestra - ZuKaraoke KTV", - "description": "OSO brings karaoke to Devcon!", - "track": "Entertainment", - "type": "Music", + "id": "open-tech-blockchain-and-settlement", + "sourceId": "NGXHAA", + "title": "Open Tech, Blockchain, and Settlement", + "description": "In this talk, we discuss the what and why of open tech, starting with networking and the internet. Using the recurring progression of tech to openness, we explore the critical classes of commitments and settlement that enable blockchain to accelerate open coordination of finances, tech, and society.", + "track": "Coordination", + "type": "Talk", "expertise": "Beginner", - "audience": "Hobby", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Music", - "Karaoke" - ], "tags": [ - "Art", - "Free Speech", - "Social" + "fork", + "Consensus", + "Coordination" ], - "language": "en", - "speakers": [ - "veronica" + "keywords": [ + "Forking" ], + "duration": 1599, + "language": "en", + "sources_swarmHash": "2afaade7fef485377dc90dcd4b79e1ba6952d8647f8ab9c7a51aac698beef1e3", + "sources_youtubeId": "vfAgwVjLonc", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735847d9dbb7a90e1e872a3", "eventId": "devcon-7", - "slot_start": 1731562200000, - "slot_end": 1731564000000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1LRNlRRa-nWIkUZN0OzhHcccD4YwYKnOZT21n-IMTU0Q" + "slot_start": 1731558600000, + "slot_end": 1731560400000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1pAUfWWkDdvSVfjG3UFm9ChrQDu1XE0Ae1vmkkLNxV3A", + "resources_slides": "", + "speakers": [ + "robert-drost" + ] }, "vector": [ 0, @@ -552652,9 +551093,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -553396,34 +551837,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, + 6, 0, 0, 0, @@ -553502,7 +551916,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -553847,6 +552260,22 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -553956,63 +552385,60 @@ 0, 0, 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, 2, 0, 0, 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0 ] }, { "session": { - "id": "open-tech-blockchain-and-settlement", - "sourceId": "NGXHAA", - "title": "Open Tech, Blockchain, and Settlement", - "description": "In this talk, we discuss the what and why of open tech, starting with networking and the internet. Using the recurring progression of tech to openness, we explore the critical classes of commitments and settlement that enable blockchain to accelerate open coordination of finances, tech, and society.", - "track": "Coordination", + "id": "opening-ceremony", + "sourceId": "X3JSYF", + "title": "Opening Ceremony", + "description": "Don’t miss the Devcon Opening Ceremony, where we’ll set the stage for an incredible event ahead, with talks from Vitalik Buterin (Founder of Ethereum), Aya Miyaguchi (Executive Director of the Ethereum Foundation), Josh Stark (Ethereum Foundation Leadership), Skylar Weaver (Devcon Team Lead), and more surprise guests.", + "track": "Real World Ethereum", "type": "Talk", - "expertise": "Beginner", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "fork", - "Consensus", - "Coordination" - ], - "keywords": [ - "Forking" - ], - "duration": 1599, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "2afaade7fef485377dc90dcd4b79e1ba6952d8647f8ab9c7a51aac698beef1e3", - "sources_youtubeId": "vfAgwVjLonc", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6735847d9dbb7a90e1e872a3", + "speakers": [ + "skylar-weaver" + ], "eventId": "devcon-7", - "slot_start": 1731558600000, - "slot_end": 1731560400000, + "slot_start": 1731380400000, + "slot_end": 1731381300000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1pAUfWWkDdvSVfjG3UFm9ChrQDu1XE0Ae1vmkkLNxV3A", - "resources_slides": null, - "speakers": [ - "robert-drost" - ] + "sources_youtubeId": "dMLeSMcBskU", + "sources_swarmHash": "b4b199e383bcf161d7da28671901d39434e7456159cd822eaf6ccf1d802635ab", + "resources_presentation": "https://docs.google.com/presentation/d/1VG1PST0liQiPWvaWsw3TB7LQkH7HEEXqam66Ds4rCHw", + "resources_slides": "" }, "vector": [ 0, @@ -554021,15 +552447,12 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -554198,6 +552621,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -554516,7 +552940,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -554771,7 +553194,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -554921,7 +553343,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -555195,7 +553616,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -555333,6 +553753,7 @@ 0, 2, 0, + 0, 2, 0, 0, @@ -555351,29 +553772,26 @@ }, { "session": { - "id": "opening-ceremony", - "sourceId": "X3JSYF", - "title": "Opening Ceremony", - "description": "Don’t miss the Devcon Opening Ceremony, where we’ll set the stage for an incredible event ahead, with talks from Vitalik Buterin (Founder of Ethereum), Aya Miyaguchi (Executive Director of the Ethereum Foundation), Josh Stark (Ethereum Foundation Leadership), Skylar Weaver (Devcon Team Lead), and more surprise guests.", - "track": "Real World Ethereum", - "type": "Talk", - "expertise": "", - "audience": "Engineering", + "id": "opening-circle", + "sourceId": "T7THRV", + "title": "Opening Circle", + "description": "By master Zoe\r\n(Opening Session)\r\n- Nervous system check-in (to communicate safety and help people settle into the space)\r\n- Short check-in: guided meditation, breathwork, and gentle stretches (approx. 5 minutes) to bring everyone into the present moment\r\n- Intention setting for the conference, guiding participants to align their energy and time with their vision\r\n- Sharing intentions in small groups (3-5 people) to build community connection\r\n- Closing with a gratitude practice\r\n\r\n12 Nov 14:00 - 14:45", + "track": "Entertainment", + "type": "Mixed Formats", + "expertise": "Beginner", + "audience": "Hobby", "featured": false, "doNotRecord": false, "keywords": [], "tags": [], "language": "en", - "speakers": [ - "skylar-weaver" - ], + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731380400000, - "slot_end": 1731381300000, - "slot_roomId": "main-stage", - "sources_youtubeId": "dMLeSMcBskU", - "sources_swarmHash": "b4b199e383bcf161d7da28671901d39434e7456159cd822eaf6ccf1d802635ab", - "resources_presentation": "https://docs.google.com/presentation/d/1VG1PST0liQiPWvaWsw3TB7LQkH7HEEXqam66Ds4rCHw" + "slot_start": 1731394800000, + "slot_end": 1731397500000, + "slot_roomId": "decompression-room", + "resources_presentation": "https://docs.google.com/presentation/d/1n226DY0rUYiKnECT9xm9IZ_yu2qSeuhOfgg63eVqUM0", + "resources_slides": "" }, "vector": [ 0, @@ -555382,13 +553800,10 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, + 6, 0, 0, 0, @@ -555560,7 +553975,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -556694,14 +555108,13 @@ 2, 0, 0, - 2, - 0, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -556712,36 +555125,52 @@ }, { "session": { - "id": "opening-circle", - "sourceId": "T7THRV", - "title": "Opening Circle", - "description": "By master Zoe\r\n(Opening Session)\r\n- Nervous system check-in (to communicate safety and help people settle into the space)\r\n- Short check-in: guided meditation, breathwork, and gentle stretches (approx. 5 minutes) to bring everyone into the present moment\r\n- Intention setting for the conference, guiding participants to align their energy and time with their vision\r\n- Sharing intentions in small groups (3-5 people) to build community connection\r\n- Closing with a gratitude practice\r\n\r\n12 Nov 14:00 - 14:45", - "track": "Entertainment", - "type": "Mixed Formats", - "expertise": "Beginner", - "audience": "Hobby", + "id": "opsec-for-the-dark-forest-or-how-to-avoid-getting-rekt", + "sourceId": "TAEPPF", + "title": "OpSec for the Dark Forest (or how to avoid getting rekt)", + "description": "We will focus on the most important things you need to do to have a good OpSec to survive in the Crypto Dark Forest. I will cover: computer, mobile phone, email, telegram, social media, phone numbers, password managers and 2FA strategy, security software & social engineering.\r\nThis is based on many years of experience and in the cases we see daily on SEAL 911.", + "track": "Security", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Privacy", + "Security", + "Hacks", + "2FA", + "dprk", + "2FA", + "Hacks", + "Privacy", + "Security" + ], + "keywords": [ + "OpSec", + "Social Engineering", + "Malware", + "0days", + "DPRK" + ], + "duration": 542, "language": "en", - "speakers": [], + "sources_swarmHash": "0fb90958816f38c563510ad9f68ada525a114c7dbdf95c1534f4a4675a6e902c", + "sources_youtubeId": "nM2BBNlIRe4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731394800000, - "slot_end": 1731397500000, - "slot_roomId": "decompression-room", - "resources_presentation": "https://docs.google.com/presentation/d/1n226DY0rUYiKnECT9xm9IZ_yu2qSeuhOfgg63eVqUM0" + "slot_start": 1731405600000, + "slot_end": 1731406200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1jLrqWU4lm17NODOESY5ysFcreo3AXNtlq_mO-78OMZY", + "resources_slides": "https://drive.google.com/file/d/1DQLkWajUKovIrzj9DazShb8WX1o9TEXe/view", + "speakers": [ + "pablo-sabbatella" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -557243,6 +555672,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -557489,6 +555919,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -557604,6 +556035,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -557654,6 +556086,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -557740,6 +556173,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -557914,6 +556348,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -558045,7 +556480,7 @@ 0, 0, 0, - 0, + 2, 0, 0, 0, @@ -558058,8 +556493,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -558069,53 +556502,48 @@ }, { "session": { - "id": "opsec-for-the-dark-forest-or-how-to-avoid-getting-rekt", - "sourceId": "TAEPPF", - "title": "OpSec for the Dark Forest (or how to avoid getting rekt)", - "description": "We will focus on the most important things you need to do to have a good OpSec to survive in the Crypto Dark Forest. I will cover: computer, mobile phone, email, telegram, social media, phone numbers, password managers and 2FA strategy, security software & social engineering.\r\nThis is based on many years of experience and in the cases we see daily on SEAL 911.", - "track": "Security", - "type": "Lightning Talk", + "id": "optimism-retro-funding-so-far-so-good-so-what", + "sourceId": "QCMZS8", + "title": "Optimism Retro Funding: So Far, So Good, So What!?", + "description": "So far, over 50M OP has been awarded to projects with no strings attached. So good, another 800M OP is planned for future rounds. So what ... is the impact? My talk will offer an objective, data-driven perspective on the \"so what\" of Optimism's Retro Funding. It will include analysis on how different cohorts of projects have performed longitudinally across a variety of growth and quality metrics, while controlling for different funding and market-related effects.", + "track": "Coordination", + "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Privacy", - "Security", - "Hacks", - "2FA", - "dprk", - "2FA", - "Hacks", - "Privacy", - "Security" + "RPGF", + "Collective Intelligence", + "Open Source Software", + "grants", + "Collective Intelligence", + "Open Source Software", + "RPGF" ], "keywords": [ - "OpSec", - "Social Engineering", - "Malware", - "0days", - "DPRK" + "Data Science", + "Impact Measurement", + "Grants" ], - "duration": 542, + "duration": 1542, "language": "en", - "sources_swarmHash": "0fb90958816f38c563510ad9f68ada525a114c7dbdf95c1534f4a4675a6e902c", - "sources_youtubeId": "nM2BBNlIRe4", + "sources_swarmHash": "047944c236f3e1dd0245dbd16955b54f0bc3a72e7dfec5f04b2ab12b56574f74", + "sources_youtubeId": "pz4vGh53qSo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673358ad3a168eb535865df1", "eventId": "devcon-7", - "slot_start": 1731405600000, - "slot_end": 1731406200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1jLrqWU4lm17NODOESY5ysFcreo3AXNtlq_mO-78OMZY", - "resources_slides": null, + "slot_start": 1731407400000, + "slot_end": 1731409200000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/13Pt_GSxCedQkGTiptcOxzfpSOiZRApdYLaDdfjTzw8A", + "resources_slides": "https://drive.google.com/file/d/1OFjPmwCYJt0dPVSjmkeQ2tJW8Tf3d5PC/view", "speakers": [ - "pablo-sabbatella" + "carl-cervone" ] }, "vector": [ - 6, 0, 0, 0, @@ -558127,6 +556555,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -558866,7 +557295,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -558898,6 +557326,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -558961,6 +557390,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -558982,7 +557412,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -559033,7 +557462,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -559120,11 +557548,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -559138,6 +557561,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -559296,7 +557721,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -559433,8 +557857,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -559451,45 +557875,46 @@ }, { "session": { - "id": "optimism-retro-funding-so-far-so-good-so-what", - "sourceId": "QCMZS8", - "title": "Optimism Retro Funding: So Far, So Good, So What!?", - "description": "So far, over 50M OP has been awarded to projects with no strings attached. So good, another 800M OP is planned for future rounds. So what ... is the impact? My talk will offer an objective, data-driven perspective on the \"so what\" of Optimism's Retro Funding. It will include analysis on how different cohorts of projects have performed longitudinally across a variety of growth and quality metrics, while controlling for different funding and market-related effects.", - "track": "Coordination", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "optimize-zkevm-throughput-series-ii", + "sourceId": "HRDW3R", + "title": "Optimize zkEVM throughput: Series II", + "description": "There are different ways to optimize the zkEVM, the one exposed in this workshop is through optimizing the zkASM (zk assembly) code itself so that it consumes fewer counters for the same execution.\r\nThe first 40min of the workshop is a deep explanation of the zkASM language, instructions, operations, counters, build... And the rest of the time we will be live coding and explaining in detail two optimized core functions of the zkEVM so that attendees can appreciate the before and after optimizing", + "track": "Layer 2", + "type": "Workshop", + "expertise": "Expert", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "RPGF", - "Collective Intelligence", - "Open Source Software", - "grants", - "Collective Intelligence", - "Open Source Software", - "RPGF" + "ZK-EVMs", + "EVM-equivalent", + "ZKP", + "l2", + "EVM-equivalent", + "ZK-EVMs", + "ZKP" ], "keywords": [ - "Data Science", - "Impact Measurement", - "Grants" + "L2" ], - "duration": 1542, + "duration": 4575, "language": "en", - "sources_swarmHash": "047944c236f3e1dd0245dbd16955b54f0bc3a72e7dfec5f04b2ab12b56574f74", - "sources_youtubeId": "pz4vGh53qSo", + "sources_swarmHash": "b05317cd28522296f044ca89921a6b99f8b13dc344f61b21056c1ee972f4682c", + "sources_youtubeId": "EdUfOvoIhNc", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673358ad3a168eb535865df1", + "sources_streamethId": "6735c86b9dbb7a90e164abae", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731407400000, - "slot_end": 1731409200000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/13Pt_GSxCedQkGTiptcOxzfpSOiZRApdYLaDdfjTzw8A", - "resources_slides": null, + "slot_start": 1731571200000, + "slot_end": 1731576600000, + "slot_roomId": "classroom-c", + "resources_presentation": "https://docs.google.com/presentation/d/1j-dXA_XZk45fwe4mOSLfaBUXA0DVQTMQ1GLhESBsAZM", + "resources_slides": "https://drive.google.com/file/d/1ybYlkLTrFXcumfQdFQss_tgZqE37kxSv/view", "speakers": [ - "carl-cervone" + "ignasi-ramos", + "carlos-matallana" ] }, "vector": [ @@ -559500,11 +557925,11 @@ 0, 0, 0, + 6, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -559997,6 +558422,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -560278,10 +558704,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -560319,6 +558741,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -560342,7 +558765,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -560514,8 +558936,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -560571,6 +558991,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -560593,6 +559014,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -560674,6 +559096,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -560810,8 +559233,6 @@ 2, 0, 0, - 0, - 0, 2, 0, 0, @@ -560823,58 +559244,56 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "optimize-zkevm-throughput-series-ii", - "sourceId": "HRDW3R", - "title": "Optimize zkEVM throughput: Series II", - "description": "There are different ways to optimize the zkEVM, the one exposed in this workshop is through optimizing the zkASM (zk assembly) code itself so that it consumes fewer counters for the same execution.\r\nThe first 40min of the workshop is a deep explanation of the zkASM language, instructions, operations, counters, build... And the rest of the time we will be live coding and explaining in detail two optimized core functions of the zkEVM so that attendees can appreciate the before and after optimizing", - "track": "Layer 2", - "type": "Workshop", - "expertise": "Expert", - "audience": "Developer", + "id": "optimizing-full-node-costs-with-monitor-tools", + "sourceId": "D9UAVG", + "title": "Optimizing full node costs with monitor tools", + "description": "Running a full node is a fundamental component of participating in a decentralized network. However, the operational cost associated with running a full node can be prohibitively high, even for an archive node, it needs a lot of CPU/Memory and SSD disks. At our organization, we have successfully implemented a cost reduction strategy by using the pprof tool, along with grafana and prometheus in our node infrastructure.", + "track": "Core Protocol", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "ZK-EVMs", - "EVM-equivalent", - "ZKP", - "l2", - "EVM-equivalent", - "ZK-EVMs", - "ZKP" + "Architecture", + "Developer Infrastructure", + "Best Practices", + "service", + "level", + "improvement", + "Architecture", + "Best Practices", + "Developer Infrastructure" ], "keywords": [ - "L2" + "performance optimization", + "service level improvement" ], - "duration": 4575, + "duration": 389, "language": "en", - "sources_swarmHash": "b05317cd28522296f044ca89921a6b99f8b13dc344f61b21056c1ee972f4682c", - "sources_youtubeId": "EdUfOvoIhNc", + "sources_swarmHash": "f6620ea38e11b7ab65c0402392377e73334050231b49731c9ab35dac21d1a8c0", + "sources_youtubeId": "-ZCcYVuEKVM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735c86b9dbb7a90e164abae", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6736f8d01b0f83434dbd1a8a", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736f8d01b0f83434dbd1a8a.vtt", + "transcript_text": " Hello everyone, this is JS Visa and I come from Amber Group. Before this project, I want to introduce Amber Group. Amber Group is a leading digital finance company and in the meantime Amber Group is also holding Amber AC which is launched for the BuildQuest Web3 innovation challenges. And I'm the DA leader of the Amber Group Web3 security team and besides of my job I'm also an independent Ethereum ecosystem contributor and I'm also managing execution clients for Gaia, REST and ELEGAN. And here's a picture about my contribution over the past six years and yeah I'm continually contributing and let's continue contributing. So today I will introduce a small piece of our fund in the history, which is, let me show you what we found in the monitoring panel. You can see there are some abnormal points in the picture, and so we just found that maybe there is something wrong, and so we need to find out what's wrong. It is really very hard to find out which process indicates abnormal points. But we are lucky. We found a project, a process that uses WebSocket to connect with the guest node and pull some data back. So we found this process, then we just pip of it. This is a piece of the proof frame graph and in the picture you can see there's many CPU used to do this module and yeah very much. So we found this maybe there's something wrong in the process, maybe in the client or in the server. And luckily, we finally found there's something wrong in the server. So what's the problem? We just write some simple code to reproduce the issue. So in the picture, you can see we have two structures. One is the load result and the other one is the interface result. And the load result which uses JSON.loadMessaging and the other one uses interface. And we also load some benchmark functions to just benchmark it. It is really simple. You can just reply it with the same code in your machine. And here is the result. In the picture we can see, yeah, let's translate into the chart. In the chart you can see, so in the first one, this uses less CPU and in the meantime, so memory usage is also smaller than the load messaging. So here's the question. In the subset, they store the data as JSON.loadMessaging and they use it to mature and unmature again and again. But what we want to improve is just to store it as an interface and on the end we just return the interface and mature it into JSON messaging. And so this is a PR way proposal in the Goethe theorem and It is much. Yeah. So fix is really easy. Not very much. You just need to find where's problems and fix it. And add some test case. Yeah. That's all. Thank you. All right. Let's give him a warm round of applause. JS Visa. You speak as fast as a choo-choo train, man. All I heard was JSON file and you're saving. So if I understand correctly, you're using JSON file to save on memory space and that's how you save your costs. Is that correct? Yeah. Okay, good. So I'm not completely an idiot. You know, I don't come from a tech background, so it can be quite challenging for me. But nevertheless, let's quickly look at the screen. We've got two questions for you, JS Visa. So let's look at the first question at the top. What do you dislike about the clients that you worked on? Get Eragon Red. Actually, I select the clients I like. So I like Gail, I like Eragon, and I like Luis. But in the meantime, for some other clients like Zawa, I really dislike them. Okay. All right. I appreciate the honesty. You select clients you like. Fantastic. How much of... Oh, excuse me. One got voted to the top. What other problems did you find in Getz P-Prof? I think maybe a lot of other issues. But in my point is you need to improve it. It depends on your workload. It depends on your company's workload. You need to first measure it and you need to monitor it and then say you can find an issue you want to fix or you want to improve. Alright. We got 30 seconds. You think you can do one more question? Let's try. Quick answer. How much of a performance improvement do you get after the PR? Actually, let me see. Maybe 13%. 13%? That's pretty good. That's pretty good. But it's just for the WebSocket and Notify, says RPC. Okay, you've got 20 seconds. Let's do one more. How come interface is faster than raw message unpassed bytes? I think I'm not really into that one. But I think because inside the same one to marshal and unmarshal, because every time you marshal it into better understand,", "eventId": "devcon-7", - "slot_start": 1731571200000, - "slot_end": 1731576600000, - "slot_roomId": "classroom-c", - "resources_presentation": "https://docs.google.com/presentation/d/1j-dXA_XZk45fwe4mOSLfaBUXA0DVQTMQ1GLhESBsAZM", - "resources_slides": null, + "slot_start": 1731571800000, + "slot_end": 1731572400000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1DOTMyJmIPI5tdLiG_5PoOmjA44ieroq22BSvZjFN9no", + "resources_slides": "https://drive.google.com/file/d/1lG06XlSxA9fABevhQdaB45fMNMafvdM7/view", "speakers": [ - "carlos-matallana", - "ignasi-ramos" + "jsvisa" ] }, "vector": [ - 0, - 0, - 0, 0, 0, 0, @@ -561302,6 +559721,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -561376,8 +559796,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -561655,6 +560073,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -561677,6 +560096,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -561684,6 +560104,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -561698,7 +560119,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -561912,6 +560332,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -561949,7 +560370,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -561972,7 +560392,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -562055,6 +560474,7 @@ 0, 0, 2, + 2, 0, 0, 0, @@ -562183,16 +560603,13 @@ 0, 0, 0, - 0, - 0, - 0, + 2, 0, 0, 0, 2, 0, 0, - 2, 0, 0, 0, @@ -562208,53 +560625,45 @@ }, { "session": { - "id": "optimizing-full-node-costs-with-monitor-tools", - "sourceId": "D9UAVG", - "title": "Optimizing full node costs with monitor tools", - "description": "Running a full node is a fundamental component of participating in a decentralized network. However, the operational cost associated with running a full node can be prohibitively high, even for an archive node, it needs a lot of CPU/Memory and SSD disks. At our organization, we have successfully implemented a cost reduction strategy by using the pprof tool, along with grafana and prometheus in our node infrastructure.", - "track": "Core Protocol", - "type": "Lightning Talk", + "id": "oracles-for-number-values", + "sourceId": "DBKAJX", + "title": "Oracles for number values", + "description": "We will overview the history and state of research on how to design a cryptoeconomic oracle that outputs a number value. One wants such tools for price oracles, but also for bringing other information on-chain, e.g. the damages to award from an on-chain insurance contract. We will look at approaches ranging from Vitalik's 2014 SchellingCoin proposal to ideas drawing from social choice theory, including based on recent research. We will explore tradeoffs including resistance to several attacks.", + "track": "Cryptoeconomics", + "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Architecture", - "Developer Infrastructure", - "Best Practices", - "service", - "level", - "improvement", - "Architecture", - "Best Practices", - "Developer Infrastructure" + "Mechanism design", + "oracle", + "Mechanism", + "design" ], "keywords": [ - "performance optimization", - "service level improvement" + "Oracles" ], - "duration": 389, + "duration": 1538, "language": "en", - "sources_swarmHash": "f6620ea38e11b7ab65c0402392377e73334050231b49731c9ab35dac21d1a8c0", - "sources_youtubeId": "-ZCcYVuEKVM", + "sources_swarmHash": "165e1d88355db8f98f034cb16767ea89002ed28a00155bbcebfa882308c5dca0", + "sources_youtubeId": "qd8mYNBx3-k", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736f8d01b0f83434dbd1a8a", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736f8d01b0f83434dbd1a8a.vtt", - "transcript_text": " Hello everyone, this is JS Visa and I come from Amber Group. Before this project, I want to introduce Amber Group. Amber Group is a leading digital finance company and in the meantime Amber Group is also holding Amber AC which is launched for the BuildQuest Web3 innovation challenges. And I'm the DA leader of the Amber Group Web3 security team and besides of my job I'm also an independent Ethereum ecosystem contributor and I'm also managing execution clients for Gaia, REST and ELEGAN. And here's a picture about my contribution over the past six years and yeah I'm continually contributing and let's continue contributing. So today I will introduce a small piece of our fund in the history, which is, let me show you what we found in the monitoring panel. You can see there are some abnormal points in the picture, and so we just found that maybe there is something wrong, and so we need to find out what's wrong. It is really very hard to find out which process indicates abnormal points. But we are lucky. We found a project, a process that uses WebSocket to connect with the guest node and pull some data back. So we found this process, then we just pip of it. This is a piece of the proof frame graph and in the picture you can see there's many CPU used to do this module and yeah very much. So we found this maybe there's something wrong in the process, maybe in the client or in the server. And luckily, we finally found there's something wrong in the server. So what's the problem? We just write some simple code to reproduce the issue. So in the picture, you can see we have two structures. One is the load result and the other one is the interface result. And the load result which uses JSON.loadMessaging and the other one uses interface. And we also load some benchmark functions to just benchmark it. It is really simple. You can just reply it with the same code in your machine. And here is the result. In the picture we can see, yeah, let's translate into the chart. In the chart you can see, so in the first one, this uses less CPU and in the meantime, so memory usage is also smaller than the load messaging. So here's the question. In the subset, they store the data as JSON.loadMessaging and they use it to mature and unmature again and again. But what we want to improve is just to store it as an interface and on the end we just return the interface and mature it into JSON messaging. And so this is a PR way proposal in the Goethe theorem and It is much. Yeah. So fix is really easy. Not very much. You just need to find where's problems and fix it. And add some test case. Yeah. That's all. Thank you. All right. Let's give him a warm round of applause. JS Visa. You speak as fast as a choo-choo train, man. All I heard was JSON file and you're saving. So if I understand correctly, you're using JSON file to save on memory space and that's how you save your costs. Is that correct? Yeah. Okay, good. So I'm not completely an idiot. You know, I don't come from a tech background, so it can be quite challenging for me. But nevertheless, let's quickly look at the screen. We've got two questions for you, JS Visa. So let's look at the first question at the top. What do you dislike about the clients that you worked on? Get Eragon Red. Actually, I select the clients I like. So I like Gail, I like Eragon, and I like Luis. But in the meantime, for some other clients like Zawa, I really dislike them. Okay. All right. I appreciate the honesty. You select clients you like. Fantastic. How much of... Oh, excuse me. One got voted to the top. What other problems did you find in Getz P-Prof? I think maybe a lot of other issues. But in my point is you need to improve it. It depends on your workload. It depends on your company's workload. You need to first measure it and you need to monitor it and then say you can find an issue you want to fix or you want to improve. Alright. We got 30 seconds. You think you can do one more question? Let's try. Quick answer. How much of a performance improvement do you get after the PR? Actually, let me see. Maybe 13%. 13%? That's pretty good. That's pretty good. But it's just for the WebSocket and Notify, says RPC. Okay, you've got 20 seconds. Let's do one more. How come interface is faster than raw message unpassed bytes? I think I'm not really into that one. But I think because inside the same one to marshal and unmarshal, because every time you marshal it into better understand,", + "sources_streamethId": "67370d351b0f83434d27366f", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67370d351b0f83434d27366f.vtt", + "transcript_text": " Hey everybody, so I'm going to talk about oracles for number values. My name is William George, I work for the Kleros cooperative. So what's the goal here? The district called a basic blockchain oracle problem. You've probably all heard about it many times. Blockchains don't have access to information about the off-chain world. If you want them to know something about the off-chain world, you have to tell them that information. A classic example is you might want an oracle for prices of DeFi, of assets for DeFi contracts. You might want other kinds of mechanisms to bring off-chain information onto the chain, maybe the amount of rainfall in some given location for some farm insurance contract. The mechanism used to do this is called an oracle. And sometimes the thing you want the oracle to output is a number, often, in the case of price oracles. Just to give a bit of my motivation and sort of how and why I'm thinking about this problem. So for those of you who aren't familiar, Kleros is a blockchain-based dispute resolution platform. Imagine Alice, the small business owner, hires Bob as the freelancer to provide her some service, build her a website, whatever. She puts Bob's payment in some smart contract escrow, and if Alice is happy with Bob's work, she just clicks a button and it's released. If Alice is unhappy, then she raises a dispute. That means that there's like some crowd of users of the Claris platform, a few of whom are drawn, and they decide who is right. And the right answer to this dispute, the correct resolution, may be a number. It may be some kind of partial settlement. How much should Bob be paid? If you have some kind of decentralized insurance contract, you might have disputes about how much of a compensation someone should get. And then basically you have this off-chain question, you know, like the answer to this off-chain dispute, and you want this, like, Oracle that's specialized in dispute resolution. And notice that these questions can be, you know, more subjective, more maybe individual than the kind of price Oracle question. And as a result, maybe your, like, oracle design might be slightly different for these slightly different questions. Okay. So now, like, digging really deep, how should we design an oracle? What are, like, the basic ingredients to go into an oracle? Well, on just, like, a fundamental level, you would ask, who is participating in the oracle? Obviously, you're bringing information from the off-chain world onto the chain. Somebody's got to be uploading that information. Who? What is the format of the information they provide? And assuming you have more than one person who's providing information, how do you aggregate their information if they don't provide exactly the same thing? How do you put it together into some kind of collective value that you use as the number in your system? And people probably aren't doing this altruistically. They're so, are they, what's in it for them? Like, are they being rewarded or penalized somehow? And as one makes these choices, what are you trying to optimize you for? Well, obviously, you want the Oracle to produce good information, fast, cheap. You want it to be attack resistant to whatever attacks are sort of relevant for your system. I'm going to talk about different choices people have made on these design choices over time, where some of the open like ideas are and like the attacks can vary from one idea to the other. I'll hit on a few. And so far all of this is relevant like everything I've said could be true for oracles whether they're outputting number values or not. I could have some discrete information that everything I've said so far would be relevant for. And where you really get into the number question, the specifics of the number question, is this question of how precise you want your information to be. How many decimals? And this is very relevant for incentive functions. If you have people, rewarding people to participate in your oracle, say you want a price oracle, and you say, tell me the price of ETH and USD. And I say, the price is like $3,200 more or less. And I say $3,201. Am I right? Should I be rewarded? Should I be penalized? So this is a question of, for a number, you can always have a more precise, closer answer. And there's this question of how close you need to be. And over the course of the talk, we'll hit on a few different notions of close that you can build into different kinds of incentive functions. Like maybe the most basic one would be to say, okay, you need to be within X percent of the output value. One percent, you're rewarded. Beyond one percent, you're penalized. And that's a simple thing you could do. There will be other ideas that people have done. Okay. Now to hit on, you know, the sort of next ingredient that goes into Oracle, this question of vote information and aggregation. So, again, I think a lot, spend a lot of time thinking about dispute resolution for the Kleros platform. Sometimes the disputes have binary answers or discrete answers that are non-binary. And as a result, I spent a lot of time in research thinking about how you aggregate kind of discrete non-numeric answers together. And this draws from the field of social choice theory frequently, this like academic field of how to design good voting systems. Voting systems that handle vote splitting well, that don't have too much tactical voting. There's all kinds of complicated questions when you're trying to define a voting system. And I bring this up because in the number case, you have similar problems. They have their own spin on these kind of vote aggregation questions. In some sense, the questions get easier of how to aggregate people's slightly different information together because now if your participants are submitting numbers, again, maybe the question is what is the price of ETH and USD, and everybody submits a number, now you can do number operations on those numbers. You can take the average, or better yet, you can take the median. So if you think of the numbers people provide to you, like lining them up from highest to lowest, you take the middle value, that's the median. And that's a particularly popular choice for people who have done a number of oracles because it's resistant to outlier effects. If somebody puts some crazy extreme value, that's not going to affect the median too much. So in some sense, this question of like vote aggregation is easier than in the discrete case, but also there are specific challenges like that sort of precision question I had from a slot a few ago. Okay, and now hitting back the sort of last ingredient before I get into the choices that different projects have made, this question of who is participating. And I would argue that there's like two basic models you could have here, and maybe you can do some combination of the two, but you can have either a crowd model or a delegate model. With the idea, and I would say that there are like parallels here to kind of different proof of stake systems, delegated proof of stake versus something that's more like Ethereum style proof of stake. So the question is, who is doing the thing? Who is providing the information? In the proof of stake systems and the consensus algorithms, who is producing blocks? In an Oracle question, who is producing Oracle values? With the delegated model sort of taking the point of view that these tasks are hard, maybe you want an Oracle that produces fast are hard. Maybe you want an oracle that produces fast values very frequently. You want zero downtime. And to have that kind of reliability, you want a beefy institutional actor that has really high performance hardware somewhere in some data center. And thus, maybe it's not appropriate for random people to be doing the task of providing the oracle information or producing the blocks in the consensus algorithm. So instead you have them vote on who you, like on delegates who then do the task for you. And, you know, then the crowd model is the opposite of that. You know, everybody's doing the task. I can get 32 ETH, I can spin up a validator, I can be part of proof of stake, or in an Oracleacle, I can be the person that's providing values for the oracle. And there are, of course, pluses and minuses. In the crowd model, you know, if you have, like, lots of people that are doing, like, a task constantly, that takes gas. Maybe, you know, like, normal hardware, they're not, like, super fast, as fast as an institutional actor. In a delegated model, there's some risk that the delegates could abuse their role. Sure, they can be voted out, but in any given question, they're delegated. And you can mitigate that somewhat by aggregating a bunch of delegates together, so that you'd have to have some collusion or something for them to crop an answer. And now, to sort of summarize some of the different choices people have made over time, I'm going to start with the beginning of the history of this problem, which is a blog post by Vitalik back in 2014. He called it Shelling Coin, so this is the dawn of Ethereum. This post was actually one of the big inspirations for what became Kleros, ultimately. And he was thinking about this sort of price oracle question again, where his proposal is, okay, I have people, they each submit a number value. You output the median. So far, that's kind of normal. And then his incentive function was that you reward people in the 25 to 75% range. So people who have middle values are rewarded, and people who have extreme values are not rewarded. Maybe they're penalized. He didn't explicitly say who the participant should be. He assumed that you had some kind of civil resistance tool. So this could be either delegated or crowd, depending on exactly what that civil resistance tool looks like. But you can certainly imagine a crowd version of this. And now here, note that this 25% to 70% rule, now the notion of being close to the answer, of being rewarded, close enough to be rewarded, is being closer than other people. Not just close to the answer, being rewarded, close enough to be rewarded, is being closer than other people. Not just close to the value, but like, I have to beat out somebody else. Everybody can be within 1%. Not everybody can be within the middle of 50 percentile. So, moving on in history, like, the big Oracle provider, at least for price Oracles now, is Chainlink. So, I'll summarize what they do and how that's kind of, you know, there's choices evolved from what SchellingCoin proposed. So here they have like a marketplace of nodes that provide Oracle information. Those nodes are supposed to take information from reliable sources, CoinGecko, Kaiko, whatever. And then a given price feed has a bunch of nodes that are sort of delegated into it. And you output the median of the different nodes. And those nodes are rewarded for their payment for services. So, in some sense, this is crowd-ish, because there's this marketplace. Everybody can participate. I, too, can spin up a Chainlink node. But in practice, it winds up being more like a delegated system, because any given price feed has some nodes selected. And then if you want to get rid of them, that goes back to this question of voting them out through a governance process. If I have a price feed that's used by a big DeFi application like Compound or something, if one of the nodes does a bad job, then you correct that by going on the Compound forum and having a proposal that looks kind of like this where you're like, please change the price feed to update and get rid of that node. like this, where you're like, please change the price feed to update and get rid of that node. So now to like, to like just wrap up this question of like delegated versus crowd, here's a sort of partial list of different projects over time. And there's kind of a spectrum where you can be sort of in the middle, but like more or less clustered into the two sides. Maker had an internal price Oracle for their, their DAI stable coin. And it's older than anything else on this table, so they had more of a delegated model. Really, all of the price oracle things have this delegated model, which is probably the only practical choice you could make, at least historically, because if you want a price oracle that updates in real time for DeFi applications, you can't have huge crowds of people voting constantly. The gas would be crazy. People aren't performant enough to do that. The stuff on the other side that's more crowd, Kleros and UMA, you know, have more like individual one-off cases involving human effort. So it's not super important that people are up to, you know, voting in real time. Nest has a sort of interesting thing where they are specific to price oracles because they use like an arbitration an arbitrage game to as part of their mechanism so it depends on being a price oracle okay now getting back to the other questions aggregation functions incentive functions and which ultimately comes down to what are the attacks if I'm trying to manipulate the system like what can I do and which systems are more robust? So an attack that was already highlighted by Vitalik in his shelling coin article is what he called micro-cheating. There's this long quote, I'm not going to read the whole thing. But basically the idea is that you can just nudge the value provided just a little bit. Like, you know what the true answer is, but you as an attacker provide a value that's just slightly to one side, whatever side you want to nudge the oracle. And if you're sophisticated, maybe you stay within the 25% to 75% range, so you're not even penalized. And maybe you can move the output a bit to give a bit of a more visual to this. There's some distribution of how honest people, people who are really trying, will provide answers. Some people are better at this task than others. So in the absence of attackers, it will kind of average out. Here the median answer is the spot on people. But if I have an attacker that has a few votes, not even like a majority, the votes only three out of I think 12 or nine or something here, then by sort of going to one side, you know, if they're like a sophisticated actor that can anticipate the distribution of the honest people's votes, they can kind of unwittingly wrap the people that are confused and off to one side into an attack coalition against their will. They can take the, like, confused people, join them together with the actual attack votes, and collectively they have a majority that moves the answer from spot on to okay. And then as the attacker gets more and more votes, you can drag the answer that much more. And if you think algebraically, how much can attacker with K votes move the answer like this? Well, if your aggregation function is taking the median, then an attacker with K votes can drag the result to the 0.5 minus k over 1 minus kth percentile of the distribution of honest participants. So this gives us a measure to judge, okay, this is the resistance of median as an aggregation tool against this kind of attack. And then we can compare that to other attacks and see which ones are more or less resistant. And a natural question is, does it ever make sense to do anything other than check the median? The median seems like a really robust mechanism. Lots of projects have used this. And I would say probably not if your voters only give you a single number. But if they give you multiple numbers, you can do something that's more interesting. And now, getting to research that I have done, I've thought a lot about how to have voters provide intervals of precision, where now they provide you some lower and upper range where they think the true value lies. And if everybody's intervals overlap, well, the answer should be somewhere in the overlap. If there's some point of conflict where there's one interval that's like the upper bound is strictly less than the lower bound of some other interval, then they're like they disagree. And you can essentially have the users vote on whether you're higher or lower than a point of disagreement. I will sort of quickly go through this because I don't have tons of time. But you know, you can think of if my upper bound is less than the point of conflict, I vote less. If it's my lower bound is higher than the point of conflict, I vote less. If my lower bound is higher than the point of conflict, I vote higher. And if my interval just contains the point of conflict, well, then I didn't vote at all. I gave you less precise information. Everybody can vote like this. You can kind of resolve the points of conflict and you can come up with a collective answer. So this is an aggregation mechanism that isn't just taking the median. This is based on an academic article I wrote with a co-author, Clement Lessage, several years ago at this point. That was based on a version of this that was slightly different because at the time, we wrote this with basically trying to be compatible with Kleros v1 as it existed at the time. As of a few days ago, Kleros 2.0 has been launched, which has much more flexible mechanisms for being able to encode things like this as modules. So now we have more flexibility to do things like this in the future. So how does that aggregation mechanism I just proposed, how does it compare to taking the meeting? Is it better or less attack resistant? Without going too much into the details, I will just say that it kind of depends on whether, on your distribution of the honest participants, like how they, how they act. Particularly if people who are confused know they're confused. If people who are like out on the edges of the distribution are providing improvised information, if they give you long intervals because they realized, oh, I don't really know the answer to this question, then this sort of voting by intervals performs better than just taking the median. If people that are wrong are really convinced that they're super confident of being wrong, then it performs not as well. There's slight effects either way, but this is the sort of analysis you can do of attack resistance for these different systems. Now, that was all about the aggregation rule. Even before you think about is the attacker going to be penalized or rewarded for doing an attack, or how much are they going to be penalized, this is the sort of basic question of how much can an attacker that's willing to sacrifice some amount of money drag the answer with some kind of minority attack coalition. When you get back to the incentive function, there are all kinds of interesting questions. And getting back to this question of how close do you need to be to be rewarded, ultimately every sort of incentive rule that you come up with encodes a notion of distance to say whether, like, a given participant is close or not. And for the voting by intervals thing, I have this really complicated formula that I use as an incentive rule, at least tentatively, that tries to balance the fact that you want to encourage people to submit really small intervals, which is the first term. And at the same time, you want to encourage people to be, you want to really reward people if they vote on the right side of the points of conflict. people to be, you want to like really reward people if they vote on the right side of the points of conflict. So that if there's like a point where the system could go a different way and choose a different answer, you want to like raise the stakes on people so that an attacker that winds up losing like loses a lot of money, you know, that much more. So if anybody's interested in that formula, feel free to talk to me. We can dig into it. And then like the different metrics that people have used, just to summarize the three that we've looked at. So there's this notion of being close if you're close to, like, as a percentage of the output. There is a notion of being close if you're closer than other people. These are different things. And then there's that crazy formula from the page, which is about being on the right side of points of conflict, so sort of being close when it matters. And if everybody kind of agreed it doesn't matter so much and the formula doesn't care as much about how you voted. Summarizing how different projects have taken the different things they've done on these choices. So, shelling coin and chain link, like the format of the vote is just a number. As such, the only real reasonable aggregation mechanism is just to take the median. For the interval approach, I have more complicated information. That means I can take more complicated aggregation rules. Some projects I didn't talk very much about. Other projects also have, you know, that have vote formats of numbers also take the median. Pyth, which is an interesting example, they also have something where you have something like an interval. They have a different aggregation mechanism. And I just want to say there's a lot of room for experiments here. So concluding, historically, on this delegated to crowd model, most people have been interested in price oracles. If you want a price oracle that updates really fast, you probably needed a price, like a delegated model, particularly in a high gas environment, maybe even in a low gas environment, just because you want people to be able to provide information with very low lag time. But as you consider more bespoke questions, subjective questions that you might have in a ClaroSelect platform, now you open up this design space and you get back to this question of digging in, like what kind of oracles can we design and does it make sense to go back to a crowd model? We've come up with measures to talk about how micro-cheating varies from one approach to another. The delegated approach, it leaves its incentives to just the threat of being thrown out of the platform. So they don't really have explicit incentive rules. But if you want a crowd model, you really have to think, like, what's my incentive role? And there's a lot of open, interesting research there. So if you're interested in that, you know, reach out. And I'm happy to take questions. happy to take questions. So we've got quite a few questions streaming in. As a wider variety of RWAs get tokenized, how concerned are you that oracles become a point of failure? Yeah, so like, the more things you have that are integrated in important ways in your system, the more attack service you have. You know, things you have that are integrated in important ways in your system, the more attack service you have. If you have real world assets that people are really engaged with and they're providing information, you have lots of watchers. A priori, it shouldn't be so bad, but we definitely want to think about how rigorous our oracles are. How do crowd oracles typically implement Sybil resistance? Yeah, so I mean, it depends. For Kleros, in Kleros 1.0, there is a token weighted drawing, so there's a token that participants have to have and you can't take over the system unless you have a large percentage of the tokens. In Kleros 2.0, we've considered additional social mechanisms that like that token mechanism is still there, but also you can use proof of personhood tools layered on top of that. And we've had interesting results that I can point you to if you're interested, such that you can design a system where you both have to break the proof of personhood and break the token way to draw to mount a large-scale Sybil attack. Is there an actual objectively right answer or truth for numbers, or is it something that's always subjective? I would argue, on some level, if you have a discrete question, you can talk truly about having a right answer, an answer that's better than any of the other answers. If you have a number question, people, if you zoom in enough, can always disagree. People might say, okay, yeah, we went up to the thousandth decimal place, but in the 10,000th decimal place, I'm right and you're wrong. So in some sense, there's always some amount of subjectivity in any kind of number question. How do Oracle implementations typically check that they have a sufficient quorum of answers? Yeah, so for the delegate systems, I mean, like, they have, like, some number of delegates that they think is appropriate. An individual system might have a quorum in case there's, like, some, like, big outage where, like, okay, I have nine delegates, at least four of them have to be online or something. So when you have a small number of delegates, it's straightforward. In a crowd system, you could build something like that in. In Kleros, we don't have a quorum system because there's an appeal process. So if for some reason everybody was censored, there was an outage, it was bad network connectivity, and nobody voted, you can just appeal and try again. But certainly something to think about in some systems. And the last one is a great question. If I'm a participant in the voting crowd, what's stopping me from just copying another answer as my vote and essentially free-riding? Yeah, so again, this will depend on the system. So in Kleros, again, there's this appeal mechanism which gives people extra rewards if they were on the right side of an answer that is, like, where they lost their voting round and ultimately, like, appealed and they were proven right by the appeal round. So that, you know, gives an incentive to people to be contrarian if they think that they're right. Other approaches can be taken. There are, you can use commit and reveal systems. There's still questions if you do that about what happens if someone pays you to reveal your vote even if the commit and reveal system allows you to technically hide your vote, you reveal anyway. There are lots of interesting sort of properly cryptographic questions there, but that's like a subject of research, but there are approaches. So I might require a fact check on this last question, that Polymarket has a highly centralized oracle and seems to be the largest. Do you think sentiment will drive crypto products to this model instead of algorithmic? Is it a centralized oracle first? I think they use Zuma. So, like, that's, I think, you know, like, people can fact-check me. But I believe they use, like, a crowd model. So, you know, if people are interested in that product, then there could be demand for that much more decentralization. If something takes off, people will have greater scrutiny, of course. But in general, what do you think about centralized oracles versus algorithmic or decentralized ones? It sort of depends on what you mean by centralized. Do you consider like a delegated model like centralized? I mean, it kind of depends. You know, it's not a question of like absolute centralization, it's about who has the power to do what. If you have just like a pure one actor responsible for providing you like the truth, you know, you don't have a bunch of delegates and you don't take the median of their answers. You don't have a marketplace to join or be kicked out as a delegate. And then, yeah, obviously, if you just have one absolute source of truth, that's not necessarily in the spirit of the ethos that we have. Thanks. Yeah, that was really good and quick.", "eventId": "devcon-7", - "slot_start": 1731571800000, - "slot_end": 1731572400000, + "slot_start": 1731659400000, + "slot_end": 1731661200000, "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1DOTMyJmIPI5tdLiG_5PoOmjA44ieroq22BSvZjFN9no", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1gnmIdI5LzbPxcbx7iSARUelWaUg1VuvSthLIpccggM8", + "resources_slides": "https://drive.google.com/file/d/11OwPviAyfx5yMklNDlRXWGZI7SIyX5vr/view", "speakers": [ - "jsvisa" + "william-george" ] }, "vector": [ - 0, - 0, 0, 0, 6, @@ -562681,45 +561090,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -562799,6 +561169,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -563035,7 +561406,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -563049,6 +561419,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -563058,7 +561429,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -563066,7 +561436,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -563295,7 +561664,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -563436,8 +561804,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -563479,6 +561845,43 @@ 0, 0, 0, + 2, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -563567,7 +561970,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -563576,6 +561978,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -563589,51 +561995,49 @@ }, { "session": { - "id": "oracles-for-number-values", - "sourceId": "DBKAJX", - "title": "Oracles for number values", - "description": "We will overview the history and state of research on how to design a cryptoeconomic oracle that outputs a number value. One wants such tools for price oracles, but also for bringing other information on-chain, e.g. the damages to award from an on-chain insurance contract. We will look at approaches ranging from Vitalik's 2014 SchellingCoin proposal to ideas drawing from social choice theory, including based on recent research. We will explore tradeoffs including resistance to several attacks.", - "track": "Cryptoeconomics", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "our-cypherpunk-approach-to-self-sovereign-digital-identity-does-not-work-in-real-world", + "sourceId": "USJSPF", + "title": "Our (Cypherpunk) approach to Self-Sovereign Digital Identity does not work in real world", + "description": "For years our community is using cryptography and privacy-enhancing technologies trying to build solutions that will bring people control over their digital identities. How far have we got?\r\n\r\nThis talk would like to expose a gap that exists between our Cypherpunk approach to SSI and what a real world project needs / wants / can do.\r\n\r\nIf we want our SSI solutions to bring control over their digital identities back to people, it seems we need to take a different approach.", + "track": "Cypherpunk & Privacy", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Mechanism design", - "oracle", - "Mechanism", - "design" + "ssi", + "Digital Sovereignty", + "Identity", + "Privacy" ], "keywords": [ - "Oracles" + "ssi" ], - "duration": 1538, + "duration": 442, "language": "en", - "sources_swarmHash": "165e1d88355db8f98f034cb16767ea89002ed28a00155bbcebfa882308c5dca0", - "sources_youtubeId": "qd8mYNBx3-k", + "sources_swarmHash": "9310afeb1f82b238e351a86f537a6eb43ee59f033defe00ad24d7a0cfec43778", + "sources_youtubeId": "7q2YD5QUHmo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67370d351b0f83434d27366f", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67370d351b0f83434d27366f.vtt", - "transcript_text": " Hey everybody, so I'm going to talk about oracles for number values. My name is William George, I work for the Kleros cooperative. So what's the goal here? The district called a basic blockchain oracle problem. You've probably all heard about it many times. Blockchains don't have access to information about the off-chain world. If you want them to know something about the off-chain world, you have to tell them that information. A classic example is you might want an oracle for prices of DeFi, of assets for DeFi contracts. You might want other kinds of mechanisms to bring off-chain information onto the chain, maybe the amount of rainfall in some given location for some farm insurance contract. The mechanism used to do this is called an oracle. And sometimes the thing you want the oracle to output is a number, often, in the case of price oracles. Just to give a bit of my motivation and sort of how and why I'm thinking about this problem. So for those of you who aren't familiar, Kleros is a blockchain-based dispute resolution platform. Imagine Alice, the small business owner, hires Bob as the freelancer to provide her some service, build her a website, whatever. She puts Bob's payment in some smart contract escrow, and if Alice is happy with Bob's work, she just clicks a button and it's released. If Alice is unhappy, then she raises a dispute. That means that there's like some crowd of users of the Claris platform, a few of whom are drawn, and they decide who is right. And the right answer to this dispute, the correct resolution, may be a number. It may be some kind of partial settlement. How much should Bob be paid? If you have some kind of decentralized insurance contract, you might have disputes about how much of a compensation someone should get. And then basically you have this off-chain question, you know, like the answer to this off-chain dispute, and you want this, like, Oracle that's specialized in dispute resolution. And notice that these questions can be, you know, more subjective, more maybe individual than the kind of price Oracle question. And as a result, maybe your, like, oracle design might be slightly different for these slightly different questions. Okay. So now, like, digging really deep, how should we design an oracle? What are, like, the basic ingredients to go into an oracle? Well, on just, like, a fundamental level, you would ask, who is participating in the oracle? Obviously, you're bringing information from the off-chain world onto the chain. Somebody's got to be uploading that information. Who? What is the format of the information they provide? And assuming you have more than one person who's providing information, how do you aggregate their information if they don't provide exactly the same thing? How do you put it together into some kind of collective value that you use as the number in your system? And people probably aren't doing this altruistically. They're so, are they, what's in it for them? Like, are they being rewarded or penalized somehow? And as one makes these choices, what are you trying to optimize you for? Well, obviously, you want the Oracle to produce good information, fast, cheap. You want it to be attack resistant to whatever attacks are sort of relevant for your system. I'm going to talk about different choices people have made on these design choices over time, where some of the open like ideas are and like the attacks can vary from one idea to the other. I'll hit on a few. And so far all of this is relevant like everything I've said could be true for oracles whether they're outputting number values or not. I could have some discrete information that everything I've said so far would be relevant for. And where you really get into the number question, the specifics of the number question, is this question of how precise you want your information to be. How many decimals? And this is very relevant for incentive functions. If you have people, rewarding people to participate in your oracle, say you want a price oracle, and you say, tell me the price of ETH and USD. And I say, the price is like $3,200 more or less. And I say $3,201. Am I right? Should I be rewarded? Should I be penalized? So this is a question of, for a number, you can always have a more precise, closer answer. And there's this question of how close you need to be. And over the course of the talk, we'll hit on a few different notions of close that you can build into different kinds of incentive functions. Like maybe the most basic one would be to say, okay, you need to be within X percent of the output value. One percent, you're rewarded. Beyond one percent, you're penalized. And that's a simple thing you could do. There will be other ideas that people have done. Okay. Now to hit on, you know, the sort of next ingredient that goes into Oracle, this question of vote information and aggregation. So, again, I think a lot, spend a lot of time thinking about dispute resolution for the Kleros platform. Sometimes the disputes have binary answers or discrete answers that are non-binary. And as a result, I spent a lot of time in research thinking about how you aggregate kind of discrete non-numeric answers together. And this draws from the field of social choice theory frequently, this like academic field of how to design good voting systems. Voting systems that handle vote splitting well, that don't have too much tactical voting. There's all kinds of complicated questions when you're trying to define a voting system. And I bring this up because in the number case, you have similar problems. They have their own spin on these kind of vote aggregation questions. In some sense, the questions get easier of how to aggregate people's slightly different information together because now if your participants are submitting numbers, again, maybe the question is what is the price of ETH and USD, and everybody submits a number, now you can do number operations on those numbers. You can take the average, or better yet, you can take the median. So if you think of the numbers people provide to you, like lining them up from highest to lowest, you take the middle value, that's the median. And that's a particularly popular choice for people who have done a number of oracles because it's resistant to outlier effects. If somebody puts some crazy extreme value, that's not going to affect the median too much. So in some sense, this question of like vote aggregation is easier than in the discrete case, but also there are specific challenges like that sort of precision question I had from a slot a few ago. Okay, and now hitting back the sort of last ingredient before I get into the choices that different projects have made, this question of who is participating. And I would argue that there's like two basic models you could have here, and maybe you can do some combination of the two, but you can have either a crowd model or a delegate model. With the idea, and I would say that there are like parallels here to kind of different proof of stake systems, delegated proof of stake versus something that's more like Ethereum style proof of stake. So the question is, who is doing the thing? Who is providing the information? In the proof of stake systems and the consensus algorithms, who is producing blocks? In an Oracle question, who is producing Oracle values? With the delegated model sort of taking the point of view that these tasks are hard, maybe you want an Oracle that produces fast are hard. Maybe you want an oracle that produces fast values very frequently. You want zero downtime. And to have that kind of reliability, you want a beefy institutional actor that has really high performance hardware somewhere in some data center. And thus, maybe it's not appropriate for random people to be doing the task of providing the oracle information or producing the blocks in the consensus algorithm. So instead you have them vote on who you, like on delegates who then do the task for you. And, you know, then the crowd model is the opposite of that. You know, everybody's doing the task. I can get 32 ETH, I can spin up a validator, I can be part of proof of stake, or in an Oracleacle, I can be the person that's providing values for the oracle. And there are, of course, pluses and minuses. In the crowd model, you know, if you have, like, lots of people that are doing, like, a task constantly, that takes gas. Maybe, you know, like, normal hardware, they're not, like, super fast, as fast as an institutional actor. In a delegated model, there's some risk that the delegates could abuse their role. Sure, they can be voted out, but in any given question, they're delegated. And you can mitigate that somewhat by aggregating a bunch of delegates together, so that you'd have to have some collusion or something for them to crop an answer. And now, to sort of summarize some of the different choices people have made over time, I'm going to start with the beginning of the history of this problem, which is a blog post by Vitalik back in 2014. He called it Shelling Coin, so this is the dawn of Ethereum. This post was actually one of the big inspirations for what became Kleros, ultimately. And he was thinking about this sort of price oracle question again, where his proposal is, okay, I have people, they each submit a number value. You output the median. So far, that's kind of normal. And then his incentive function was that you reward people in the 25 to 75% range. So people who have middle values are rewarded, and people who have extreme values are not rewarded. Maybe they're penalized. He didn't explicitly say who the participant should be. He assumed that you had some kind of civil resistance tool. So this could be either delegated or crowd, depending on exactly what that civil resistance tool looks like. But you can certainly imagine a crowd version of this. And now here, note that this 25% to 70% rule, now the notion of being close to the answer, of being rewarded, close enough to be rewarded, is being closer than other people. Not just close to the answer, being rewarded, close enough to be rewarded, is being closer than other people. Not just close to the value, but like, I have to beat out somebody else. Everybody can be within 1%. Not everybody can be within the middle of 50 percentile. So, moving on in history, like, the big Oracle provider, at least for price Oracles now, is Chainlink. So, I'll summarize what they do and how that's kind of, you know, there's choices evolved from what SchellingCoin proposed. So here they have like a marketplace of nodes that provide Oracle information. Those nodes are supposed to take information from reliable sources, CoinGecko, Kaiko, whatever. And then a given price feed has a bunch of nodes that are sort of delegated into it. And you output the median of the different nodes. And those nodes are rewarded for their payment for services. So, in some sense, this is crowd-ish, because there's this marketplace. Everybody can participate. I, too, can spin up a Chainlink node. But in practice, it winds up being more like a delegated system, because any given price feed has some nodes selected. And then if you want to get rid of them, that goes back to this question of voting them out through a governance process. If I have a price feed that's used by a big DeFi application like Compound or something, if one of the nodes does a bad job, then you correct that by going on the Compound forum and having a proposal that looks kind of like this where you're like, please change the price feed to update and get rid of that node. like this, where you're like, please change the price feed to update and get rid of that node. So now to like, to like just wrap up this question of like delegated versus crowd, here's a sort of partial list of different projects over time. And there's kind of a spectrum where you can be sort of in the middle, but like more or less clustered into the two sides. Maker had an internal price Oracle for their, their DAI stable coin. And it's older than anything else on this table, so they had more of a delegated model. Really, all of the price oracle things have this delegated model, which is probably the only practical choice you could make, at least historically, because if you want a price oracle that updates in real time for DeFi applications, you can't have huge crowds of people voting constantly. The gas would be crazy. People aren't performant enough to do that. The stuff on the other side that's more crowd, Kleros and UMA, you know, have more like individual one-off cases involving human effort. So it's not super important that people are up to, you know, voting in real time. Nest has a sort of interesting thing where they are specific to price oracles because they use like an arbitration an arbitrage game to as part of their mechanism so it depends on being a price oracle okay now getting back to the other questions aggregation functions incentive functions and which ultimately comes down to what are the attacks if I'm trying to manipulate the system like what can I do and which systems are more robust? So an attack that was already highlighted by Vitalik in his shelling coin article is what he called micro-cheating. There's this long quote, I'm not going to read the whole thing. But basically the idea is that you can just nudge the value provided just a little bit. Like, you know what the true answer is, but you as an attacker provide a value that's just slightly to one side, whatever side you want to nudge the oracle. And if you're sophisticated, maybe you stay within the 25% to 75% range, so you're not even penalized. And maybe you can move the output a bit to give a bit of a more visual to this. There's some distribution of how honest people, people who are really trying, will provide answers. Some people are better at this task than others. So in the absence of attackers, it will kind of average out. Here the median answer is the spot on people. But if I have an attacker that has a few votes, not even like a majority, the votes only three out of I think 12 or nine or something here, then by sort of going to one side, you know, if they're like a sophisticated actor that can anticipate the distribution of the honest people's votes, they can kind of unwittingly wrap the people that are confused and off to one side into an attack coalition against their will. They can take the, like, confused people, join them together with the actual attack votes, and collectively they have a majority that moves the answer from spot on to okay. And then as the attacker gets more and more votes, you can drag the answer that much more. And if you think algebraically, how much can attacker with K votes move the answer like this? Well, if your aggregation function is taking the median, then an attacker with K votes can drag the result to the 0.5 minus k over 1 minus kth percentile of the distribution of honest participants. So this gives us a measure to judge, okay, this is the resistance of median as an aggregation tool against this kind of attack. And then we can compare that to other attacks and see which ones are more or less resistant. And a natural question is, does it ever make sense to do anything other than check the median? The median seems like a really robust mechanism. Lots of projects have used this. And I would say probably not if your voters only give you a single number. But if they give you multiple numbers, you can do something that's more interesting. And now, getting to research that I have done, I've thought a lot about how to have voters provide intervals of precision, where now they provide you some lower and upper range where they think the true value lies. And if everybody's intervals overlap, well, the answer should be somewhere in the overlap. If there's some point of conflict where there's one interval that's like the upper bound is strictly less than the lower bound of some other interval, then they're like they disagree. And you can essentially have the users vote on whether you're higher or lower than a point of disagreement. I will sort of quickly go through this because I don't have tons of time. But you know, you can think of if my upper bound is less than the point of conflict, I vote less. If it's my lower bound is higher than the point of conflict, I vote less. If my lower bound is higher than the point of conflict, I vote higher. And if my interval just contains the point of conflict, well, then I didn't vote at all. I gave you less precise information. Everybody can vote like this. You can kind of resolve the points of conflict and you can come up with a collective answer. So this is an aggregation mechanism that isn't just taking the median. This is based on an academic article I wrote with a co-author, Clement Lessage, several years ago at this point. That was based on a version of this that was slightly different because at the time, we wrote this with basically trying to be compatible with Kleros v1 as it existed at the time. As of a few days ago, Kleros 2.0 has been launched, which has much more flexible mechanisms for being able to encode things like this as modules. So now we have more flexibility to do things like this in the future. So how does that aggregation mechanism I just proposed, how does it compare to taking the meeting? Is it better or less attack resistant? Without going too much into the details, I will just say that it kind of depends on whether, on your distribution of the honest participants, like how they, how they act. Particularly if people who are confused know they're confused. If people who are like out on the edges of the distribution are providing improvised information, if they give you long intervals because they realized, oh, I don't really know the answer to this question, then this sort of voting by intervals performs better than just taking the median. If people that are wrong are really convinced that they're super confident of being wrong, then it performs not as well. There's slight effects either way, but this is the sort of analysis you can do of attack resistance for these different systems. Now, that was all about the aggregation rule. Even before you think about is the attacker going to be penalized or rewarded for doing an attack, or how much are they going to be penalized, this is the sort of basic question of how much can an attacker that's willing to sacrifice some amount of money drag the answer with some kind of minority attack coalition. When you get back to the incentive function, there are all kinds of interesting questions. And getting back to this question of how close do you need to be to be rewarded, ultimately every sort of incentive rule that you come up with encodes a notion of distance to say whether, like, a given participant is close or not. And for the voting by intervals thing, I have this really complicated formula that I use as an incentive rule, at least tentatively, that tries to balance the fact that you want to encourage people to submit really small intervals, which is the first term. And at the same time, you want to encourage people to be, you want to really reward people if they vote on the right side of the points of conflict. people to be, you want to like really reward people if they vote on the right side of the points of conflict. So that if there's like a point where the system could go a different way and choose a different answer, you want to like raise the stakes on people so that an attacker that winds up losing like loses a lot of money, you know, that much more. So if anybody's interested in that formula, feel free to talk to me. We can dig into it. And then like the different metrics that people have used, just to summarize the three that we've looked at. So there's this notion of being close if you're close to, like, as a percentage of the output. There is a notion of being close if you're closer than other people. These are different things. And then there's that crazy formula from the page, which is about being on the right side of points of conflict, so sort of being close when it matters. And if everybody kind of agreed it doesn't matter so much and the formula doesn't care as much about how you voted. Summarizing how different projects have taken the different things they've done on these choices. So, shelling coin and chain link, like the format of the vote is just a number. As such, the only real reasonable aggregation mechanism is just to take the median. For the interval approach, I have more complicated information. That means I can take more complicated aggregation rules. Some projects I didn't talk very much about. Other projects also have, you know, that have vote formats of numbers also take the median. Pyth, which is an interesting example, they also have something where you have something like an interval. They have a different aggregation mechanism. And I just want to say there's a lot of room for experiments here. So concluding, historically, on this delegated to crowd model, most people have been interested in price oracles. If you want a price oracle that updates really fast, you probably needed a price, like a delegated model, particularly in a high gas environment, maybe even in a low gas environment, just because you want people to be able to provide information with very low lag time. But as you consider more bespoke questions, subjective questions that you might have in a ClaroSelect platform, now you open up this design space and you get back to this question of digging in, like what kind of oracles can we design and does it make sense to go back to a crowd model? We've come up with measures to talk about how micro-cheating varies from one approach to another. The delegated approach, it leaves its incentives to just the threat of being thrown out of the platform. So they don't really have explicit incentive rules. But if you want a crowd model, you really have to think, like, what's my incentive role? And there's a lot of open, interesting research there. So if you're interested in that, you know, reach out. And I'm happy to take questions. happy to take questions. So we've got quite a few questions streaming in. As a wider variety of RWAs get tokenized, how concerned are you that oracles become a point of failure? Yeah, so like, the more things you have that are integrated in important ways in your system, the more attack service you have. You know, things you have that are integrated in important ways in your system, the more attack service you have. If you have real world assets that people are really engaged with and they're providing information, you have lots of watchers. A priori, it shouldn't be so bad, but we definitely want to think about how rigorous our oracles are. How do crowd oracles typically implement Sybil resistance? Yeah, so I mean, it depends. For Kleros, in Kleros 1.0, there is a token weighted drawing, so there's a token that participants have to have and you can't take over the system unless you have a large percentage of the tokens. In Kleros 2.0, we've considered additional social mechanisms that like that token mechanism is still there, but also you can use proof of personhood tools layered on top of that. And we've had interesting results that I can point you to if you're interested, such that you can design a system where you both have to break the proof of personhood and break the token way to draw to mount a large-scale Sybil attack. Is there an actual objectively right answer or truth for numbers, or is it something that's always subjective? I would argue, on some level, if you have a discrete question, you can talk truly about having a right answer, an answer that's better than any of the other answers. If you have a number question, people, if you zoom in enough, can always disagree. People might say, okay, yeah, we went up to the thousandth decimal place, but in the 10,000th decimal place, I'm right and you're wrong. So in some sense, there's always some amount of subjectivity in any kind of number question. How do Oracle implementations typically check that they have a sufficient quorum of answers? Yeah, so for the delegate systems, I mean, like, they have, like, some number of delegates that they think is appropriate. An individual system might have a quorum in case there's, like, some, like, big outage where, like, okay, I have nine delegates, at least four of them have to be online or something. So when you have a small number of delegates, it's straightforward. In a crowd system, you could build something like that in. In Kleros, we don't have a quorum system because there's an appeal process. So if for some reason everybody was censored, there was an outage, it was bad network connectivity, and nobody voted, you can just appeal and try again. But certainly something to think about in some systems. And the last one is a great question. If I'm a participant in the voting crowd, what's stopping me from just copying another answer as my vote and essentially free-riding? Yeah, so again, this will depend on the system. So in Kleros, again, there's this appeal mechanism which gives people extra rewards if they were on the right side of an answer that is, like, where they lost their voting round and ultimately, like, appealed and they were proven right by the appeal round. So that, you know, gives an incentive to people to be contrarian if they think that they're right. Other approaches can be taken. There are, you can use commit and reveal systems. There's still questions if you do that about what happens if someone pays you to reveal your vote even if the commit and reveal system allows you to technically hide your vote, you reveal anyway. There are lots of interesting sort of properly cryptographic questions there, but that's like a subject of research, but there are approaches. So I might require a fact check on this last question, that Polymarket has a highly centralized oracle and seems to be the largest. Do you think sentiment will drive crypto products to this model instead of algorithmic? Is it a centralized oracle first? I think they use Zuma. So, like, that's, I think, you know, like, people can fact-check me. But I believe they use, like, a crowd model. So, you know, if people are interested in that product, then there could be demand for that much more decentralization. If something takes off, people will have greater scrutiny, of course. But in general, what do you think about centralized oracles versus algorithmic or decentralized ones? It sort of depends on what you mean by centralized. Do you consider like a delegated model like centralized? I mean, it kind of depends. You know, it's not a question of like absolute centralization, it's about who has the power to do what. If you have just like a pure one actor responsible for providing you like the truth, you know, you don't have a bunch of delegates and you don't take the median of their answers. You don't have a marketplace to join or be kicked out as a delegate. And then, yeah, obviously, if you just have one absolute source of truth, that's not necessarily in the spirit of the ethos that we have. Thanks. Yeah, that was really good and quick.", + "sources_streamethId": "67349f549dbb7a90e13a18b4", "eventId": "devcon-7", - "slot_start": 1731659400000, - "slot_end": 1731661200000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1gnmIdI5LzbPxcbx7iSARUelWaUg1VuvSthLIpccggM8", - "resources_slides": null, + "slot_start": 1731494400000, + "slot_end": 1731495000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1tieWVdz2ClCZUAnL4cwbHgtEkk_tNIfgbdodCv6BfoY", + "resources_slides": "https://drive.google.com/file/d/1bCRsuuduwwQ4gaPIm-pDY3ZyV6QWIHqB/view", "speakers": [ - "william-george" + "miros" ] }, "vector": [ 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -564386,10 +562790,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -564429,6 +562829,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -564494,6 +562896,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -564814,8 +563217,6 @@ 0, 0, 2, - 2, - 2, 0, 0, 0, @@ -564947,11 +563348,9 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, + 2, 0, 0, 0, @@ -564964,45 +563363,51 @@ }, { "session": { - "id": "our-cypherpunk-approach-to-self-sovereign-digital-identity-does-not-work-in-real-world", - "sourceId": "USJSPF", - "title": "Our (Cypherpunk) approach to Self-Sovereign Digital Identity does not work in real world", - "description": "For years our community is using cryptography and privacy-enhancing technologies trying to build solutions that will bring people control over their digital identities. How far have we got?\r\n\r\nThis talk would like to expose a gap that exists between our Cypherpunk approach to SSI and what a real world project needs / wants / can do.\r\n\r\nIf we want our SSI solutions to bring control over their digital identities back to people, it seems we need to take a different approach.", - "track": "Cypherpunk & Privacy", - "type": "Lightning Talk", + "id": "panel-source-code-verification", + "sourceId": "UJJPSH", + "title": "Panel: Source Code Verification", + "description": "Source code verification is the basis of trustlessness and transparency in blockchains.\r\nMany projects do source code verification but there hasn't been much collaboration and public interaction. The panel will bring members from the new collective \"Verifier Alliance\" together to create an open discussion.\r\n\r\nTopics include open-data and open-source, standardization, future challenges like state and data growth, multichain, monetization, and financial sustainability", + "track": "Developer Experience", + "type": "Panel", "expertise": "Beginner", - "audience": "Community", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "ssi", - "Digital Sovereignty", - "Identity", - "Privacy" + "Developer Infrastructure", + "User Experience", + "blocks", + "explorer", + "Developer Infrastructure", + "User Experience" ], "keywords": [ - "ssi" + "Source Code Verification", + "Block Explorers" ], - "duration": 442, + "duration": 241, "language": "en", - "sources_swarmHash": "9310afeb1f82b238e351a86f537a6eb43ee59f033defe00ad24d7a0cfec43778", - "sources_youtubeId": "7q2YD5QUHmo", + "sources_swarmHash": "d6d57f8aec03fc28074f6e2d132ac76edc081fbb1a655a7ed4b49af8c342d5d4", + "sources_youtubeId": "XBYmL8ICFyQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67349f549dbb7a90e13a18b4", + "sources_streamethId": "67348f1d9dbb7a90e1860f67", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731494400000, - "slot_end": 1731495000000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1tieWVdz2ClCZUAnL4cwbHgtEkk_tNIfgbdodCv6BfoY", - "resources_slides": null, + "slot_start": 1731493800000, + "slot_end": 1731497400000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1q-4HjJon6v4PjMBDOXwQwQS2B6fgTj_TjlTh6teEZd0", + "resources_slides": "https://drive.google.com/file/d/1F5Krv5veS-IABDwYpfWBM73Z1Su47EmK/view", "speakers": [ - "miros" + "kirill-fedoseev", + "kaan-uzdogan", + "gary-thung", + "giacomo-barbieri" ] }, "vector": [ - 0, - 0, 0, 0, 0, @@ -565055,6 +563460,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -565345,6 +563751,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -565509,6 +563916,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -565764,6 +564172,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -565802,7 +564211,6 @@ 0, 0, 2, - 2, 0, 0, 0, @@ -565868,11 +564276,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -565900,6 +564303,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -566315,16 +564719,14 @@ 0, 0, 0, - 0, - 0, 2, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -566337,55 +564739,53 @@ }, { "session": { - "id": "panel-source-code-verification", - "sourceId": "UJJPSH", - "title": "Panel: Source Code Verification", - "description": "Source code verification is the basis of trustlessness and transparency in blockchains.\r\nMany projects do source code verification but there hasn't been much collaboration and public interaction. The panel will bring members from the new collective \"Verifier Alliance\" together to create an open discussion.\r\n\r\nTopics include open-data and open-source, standardization, future challenges like state and data growth, multichain, monetization, and financial sustainability", - "track": "Developer Experience", - "type": "Panel", - "expertise": "Beginner", - "audience": "Engineering", + "id": "passkeys-the-good-the-bad-the-ugly", + "sourceId": "XFLPAR", + "title": "Passkeys : the good, the bad, the ugly", + "description": "Passkeys are the new hype for easy onboarding, but it's a quite old protocol that has been hijacked for crypto purposes. We'll dig through the standard history, the potentially misleading security expectations, and see how to reverse engineer an implementation to validate its soundness", + "track": "Security", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Developer Infrastructure", - "User Experience", - "blocks", - "explorer", - "Developer Infrastructure", - "User Experience" + "Security", + "Account Abstraction", + "TEE", + "Account Abstraction", + "Security" ], "keywords": [ - "Source Code Verification", - "Block Explorers" + "TEE" ], - "duration": 241, + "duration": 1528, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "ba22dd73d7a6bf4d9643ab45b06a1aeb4b9628b46d99373ca721a38b46437d5e", + "sources_youtubeId": "TEjNSr8jjUI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67348f1d9dbb7a90e1860f67", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "673982fe1b0f83434d5ab3b3", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673982fe1b0f83434d5ab3b3.vtt", + "transcript_text": " Okay, so Makupai everybody. Today we'll be talking about passkeys, we'll be talking about security and memes as well. So you'll see it will be entertaining, I hope. i'll be going fast because i have a lot of slides so don't sleep so first you might be wondering what you are doing in this room why are we talking about identity and access management protocol phylo means fast identity online so why are we here it looks very complex server blocks client blocks a lot of stuff we don't do with Ethereum. But if we are DGENs, like we all are, we don't really care about all the parts on the server. We care about the fact that with passkeys, we can generate keys on the web browser. And this is very interesting for us because we like keys, of course. So next slide. Yes. So if I want to go through a Presentation as a FIDO protocol in one slide, I would say that FIDO is an authentication protocol You have a registration phase where you will create a key You will bind it to a web origin and then you have an authentication phase where you will get a challenge You will sign it and so you can verify that you are in the right place We have recent gas optimizations that allow us to run the FIDO protocol on-chain, which is why it's interesting, and there are a lot of abstractions going in FIDO, so a lot of different implementations of this protocol going on. Why is this interesting? Again, we got a God tier UX on mobile with account abstraction with passkeys. If you have not tried it already, I will suggest trying the Coinbase Smart Wallet, which is very impressive. But you get something that is very close to Web2 experience. You just use your biometrics, and you have a wallet. This wallet is self-custodial, so you can pretty much do whatever you want. And, well, it's super easy to create an account on mobile with Passkeys. On desktop, it's a bit different, but it's standard as well. So it's good because you have a common interface. You have a QR, you can redirect to your mobile. If you have an Android phone, you can just avoid scanning the QR every time. You can scan it once, and then it will be recognized automatically, but it's proprietary. So it's not a great interface, but at least it's standard on all the implementations, so you have a way to connect your desktop to your mobile, and it's not too confusing, let's say. Regarding a web developer, the experience is pretty standardized, pretty simple as well, because this was acquired by WebO10, by the W3C, sorry, under the name WebO10. So you pass common parameters when you register, like the user information, the key types, the challenge. So two APIs, super simple, have been going through this very, very quickly because we are not yet at the interesting part. The interesting part is going to start now because you are all wondering where the key is stored. We say we have keys, but well, where is it stored? This is the real question. And unfortunately to answer this we have to go through a very very large bit of history so I will be just doing this. The FIDO story starts in 2013 with two protocols the universal second factor and universal authentication factor. Universal second factor is the best known. And both protocols are mostly stateless. Stateless meaning that all credentials are not saved in the device. Credentials are generated in the device, saved on the server. So the device can be used to generate an infinite number of credentials. It was reported launched initially by google and ubico 2014 this is a real commercial launch with two devices uh ubikey and one device on which i worked on if you look at it you might recognize the early ledger design very similar 2018 502 is launched 502 introduces something new it's basically a fusion between U2F and UAF and introduces a concept of resident or discoverable credential. Those credentials are stored into the device so this completely changes the state of the protocol because now it goes stateful. 2019, WebO10 is launched by W3C, so it's basically FIDO but acquired by W3C, and at the same time, on Android, you have the Strongbox API which appears, and it is the equivalent of the secure enclave on iOS devices, so a very secure place to store keys, and Android got FIDO to certify. I think you are getting a trend there. We get iOS support in 2020, and we see that FIDO starts to be supported in devices which are very secure. So it all looks very good at the moment because we have dedicated devices with secure hardware to support FIDO. We have it in phones with strong security. And in 2022, things start changing because we get a shared announcement from Google, Apple, and Microsoft saying, this protocol looks nice, we are going to support it a lot more. And usually when you get this kind of announcement, it's the beginning of assimilation. And this assimilation has a name, Passkeys. So, well, first thing we can say, in 2023, it starts with the introduction of syncable credentials. So credentials that can be synced to different devices, either with a proprietary scheme or a password manager. So here's the illustration. It's a big temple because we are in Bangkok. Temples are called Wat. So this is Wat Pho. I didn't visit it yet, temples are called Wat. So this is Wat 4. I didn't visit it yet, but yeah, Wat. And in 2024, the FIDO alliance finally performs a general rebranding around passkeys. So now a passkey is just describing a FIDO credential. And we'll start a specification to make the synchronization a bit less proprietary. So we might ask ourselves, what is a syncable credential? Because if we search for this in the documentation, we won't find any reference to it. That's because the proper name for it is multi-device credential. So that's one thing. And if we want to define it, we can say it's a discoverable or resident credential in the context of a smartphone. But the only way to know if a credential is really syncable is to look at the answers that you get when you register it. You will get a flag which is called backup eligibility. If it's set to one, then the credential can be synced. So it's something on top of the spec, not super easy to get. The real problem starts now because we have several security misconceptions in FIDO. The first one is that, well, FIDO protects against phishing, not malware. So we can't really expect that FIDO is going to be good to protect against malware, but at the same time, it is implemented on secure devices. So we have strong... we think that the key is going to be strongly protected because this is the way that FIDO was defined. Then we can ask what is the consequence of introducing thinkable credentials to this. And to get even worse, when you look at the implementation on FIDO by crypto people, we abuse it routinely because FIDO is only designed for authentication. When you use it to sign transaction, we can't really verify what you are signing. And if we lose a key, the impact is going much more important for crypto than it is for authentication because you can always revoke an account and you can't revoke a transaction on the blockchain. Then FIDO is designed to be bound by the web origin. If we want to build a common web wallet, we are going to break this property by design. So we have to hack around it, but it's a hack around the specification. And just to say that we have been abusing this protocol for a long time, I'm not especially proud of it, but just mentioning it, I used U2F to communicate between Ledger and Metamask, sorry, and my Ether wallet in 2016 because there was no way to communicate between a browser and a USB device. So it was used as a tunnel at that time. Short break, maybe why secure hardware is important. Secure hardware is important because it will protect the key against malware. Any hardware does that but secure hardware is supposed to do it better. Then it will protect the key against physical attacks. Physical attacks are the last line of defense. When an attacker has access to a device you always think that your key is lost. Secure hardware is supposed to protect you against this. But even more importantly, secure hardware will protect you against passive attacks. Passive attacks meaning trying to obtain the key by listening to what the ship is doing, by listening to electromagnetic radiation, power differences, that kind of thing. And the only way to really protect against this is by using dedicated hardware because even if you use the best open source library like LeapSecP 266K1 here, it needs to be customized to your chip to avoid leaking information. So if you are not working with secure hardware you're going to have problems. That's basically, you are just one speculative leak away from losing the key, for example. So what is FIDO security model for non-seekable credentials? They cannot be extracted by malware. This is very important. The authentication is always done at the enclave level. The enclave is basically in charge. It's holding the key. It's holding the key, it's doing the authentication. If you want to do something with the enclave, you have to authenticate. This cannot be bypassed. So the malware cannot, a very strong malware that managed to modify the kernel of the device can fool you into signing something, but it would have to do it every time and it can't do anything else. Now let's think about some hypotheses for the synchronization. The first one will be the good hypothesis. In that case, we imagine that there is a hardware security module sitting at Google and Apple, and this hardware security module is doing a synchronization protocol between two enclaves. In that case, the security model doesn't change, the credential is never exposed, everything is good. At least it doesn't change. Now the bad synchronization hypothesis. In that case, the key is still owned by the enclave, but you have a way to put it into the application processor to start the synchronization mechanism. In that case, a malware could be able to extract the key, but after prompting the enclave to start the synchronization mechanism. In that case, a malware could be able to extract the key, but after prompting the enclave to start the synchronization protocol. And finally, the ugly synchronization hypothesis where the enclave might not even be used anymore. The key is in the application processor. Everything is in the clear. And then a malware could be able to extract the key, and the malware doesn't need to be as sophisticated as in the previous cases. And unfortunately, the only way to know what the implementation is, is to reverse it. So we are going to do this. First, on iOS. So iOS, to do this, we need to act as a malware. iOS is a bit difficult to jailbreak as you know so I made a reference to a recent jailbreak to show you how complex this is but since we can synchronize we can jailbreak an older iOS device and see what happens. So for that we are going to use the checkmate bug which is quite powerful and allows us to jailbreak a lot of older iPhones. I use PayRain for that, but you might want to use another exploit. It's a bit hard to run, but you will manage to run it if you want. Then we can dump the keychain. We have some information about the keychain at Apple, but here the first thing that we notice is that there is no security property that says that the credential needs to be authenticated every time. It's basically authenticated when the device is unlocked and then it's not going to be locked again. So we see a first problem here. Then digging into it, dumping the kitchen itself, we get more information. We got a first attempt that was done on an older version of iOS, a description of what the kitchen looks like. We basically, we have items in the kitchen which are defined by a metadata and the secret itself. The metadata and secret are wrapped by a key which is handled by the platform. So you need to have the device in order to decrypt this. And you have an additional indirection level for metadata. So you have an extra key which will be decrypt this and you have an additional indirection level for metadata so you have an extra key which will be decrypted by the platform as well but the scheme is always metadata in secret protected by a key basically so there is a small difference between two item version and basically knowing this you can fix what is on the internet today and you can make it dump the recent items because protocol buffers is very easy to describe, I mean it's self describing so you can modify this. When you look at the decrypted item you'll see that the value, the length of the value is 65 bytes plus 32 starting with 04. If you have played with key, you think that, okay, this might just be the public key and the private key concatenated. You can verify it by dumping the key, verifying that the public key associated to the private key is the right one, and it is the right one. So this means we have a way to decrypt. A malware has a way to decrypt the key. It can steal it, and it's fully handled by the by the application processor on ios on android we can do the same thing uh it's much easier to jailbreak an android phone because you can just unlock the bootloader and solve magics and your magis mat magisk and you're done and to look at the application we'll use a framework called frida which will let us introspect the application and inject code to understand what the application we'll use a framework called Frida which will let us introspect the application and inject code to understand what the program is doing. First question, it's very clear on iOS where the kitchen is, on Android not that much. So we want to know where we are and for that we are going to look at the logs. We see that we see a lot of logs referring GMS for Google Mobile Services. Looks like a good place to start, so we will instrument it. To instrument it, we'll just use the signature API in Java, and we will ask for the class that is being used, because that way we can dig further, and we can try to know exactly what is happening. So we do that with GMS. We see finally the name of the class. And we can find some information about that class on the internet. We see that it's a wrapper to another class. So we still don't know if the credential is handled by the secure enclave on the device. But at least we can dig further. So we start instrumenting again. And this time, we will dump the key. So we will assume that if we have the right class, we can cast it, and we can ask it to dump the key. And it works. So we can dump the key, we can verify it's the right private key, it matches the public key, which means that on Android and iOS, we have verified that the key is enabled by the application processor. As a bonus on Android, so it's yet another big what here, we get the key before the user authentication, which means that there is a catch mechanism that is loading the key, and user authentication is just there basically to make you think that it's secure, but it's not really secure. Finally, looking at an external password manager, here we have absolutely no expectation, so it's good. I did the example with Bitwarden. You can see when you dump it that the credential is listed as a public key. Bad news, it's not a public key. Of course, it's a private key, so we can just dump it, look at it, look at the private key, verify that it matches the public key. Exactly the same thing. So if you expected some security by saving your passkey to a password manager, you have none, which is completely the expected result. So to summarize, this is where we are today on smartphones. We have to choose between secure credentials or back-upable credentials, which might not be a good thing because you want both. So the passkeys are handled by the application processor. They are easy to extract by a malware when the device is unlocked, and there are physical attacks applicable, which is probably the worst part because something might be able to dump those keys at a later stage. So user-present enforcement is not really linked to usage of the key. And non-thinkable pass keys, things don't change there. They are still very secure. So we can wonder, how did we get there? One way to say that is that, well, we have conflicting rules between the vendors. So Google says that you can inject a key into the enclave, and Apple says that this is absolutely forbidden. You cannot inject a key into the enclave. So this might be the reason why the enclave were not modified and not used in that case. And all agree that the key cannot be exported from the enclave, so that's another good reason why it's not implemented that way. So FIDO says, FIDO introduced some enterprise, some difference between enterprise and customer pass keys. So this can be another sign that things are not going very, very well. If we wonder what will happen in the future, it's not going to change much because here we have a draft regarding the synchronization protocol, which does not describe how the key is used. If we want to add a trust encore to the key, so if we want to link a device-bound passkey to the key, there were a few extensions to do that, and they were finally dropped. And in the end, well, if we want a better UX for passkeys, this is also going to be dropped. So we had three possible improvement protocols that were dropped. So we can't think that this is not going to change much. If you are disappointed by Platform Authenticator, you can always rely on external passkey implementations. So I listed a few open source ones. The good news is that you can use your favorite hardware wallet if you want. The application is open source and you get a backupable passkey. So it can be another option. What happens on-chain? Well, I have one minute, so maybe I will go over there very fast. Cartridge initiated the move on StarkNet. So passkeys are now very popular because we have been running several optimizations in order to be able to verify passkeys on-chain very efficiently. So, PaaS keys are now very popular because we have been running several optimizations in order to be able to verify PaaS keys and chain very efficiently. I have a few kernels, I have listed a few kernels that you can use to check to use with PaaS keys. So, 0dev, Safecore or Coinbase Smart Wallet. Here's the main difference. I will just look at how they are using PaaS key. You make no difference with a common credential on 0dev. On Safecore, PaaSKey are supported but discouraged. If you look at the specifications, Safecore will tell you it's better to associate the PaaSKey with a regular credential. And if you want to run it on Coinbase Smart Wallet, they support PaaS pass keys, they also support regular signatures, and they will prompt you and tell you, yeah, you might want to use a recovery key. So the answer to the maybe burning question, should we drop pass keys? In my opinion, no, because they still offer the best way to onboard people. But we have to think about the threat model and we have to code accordingly. And one thing we can do since we have smart contracts, we can associate less privilege to pass keys that are not, that are syncable. Because since we know that syncable pass keys are way less secure than pass keys that are handled by the device, we can use them with quotas, we can use them, for example, for less amount of assets. And then we have something that is acceptable. But the most important thing is to know the threat and act accordingly. We are finished. So I will just let you with one last meme for the road. This is the difference between 2014 and 2024. We get rid of password by storing key in password managers. Might not be the best idea. I will let you decide. You can reach me on Twitter and there will be code on GitHub to describe all this so that you can run it on your own device and you can know if at some point implementation gets better and, well, the less ugly solution for the synchronization is being picked. Thank you. All right thank you so much Nicholas that was very entertaining as well to see some memes on the slides I really enjoy that. We do have a couple of questions for you I hope you don't mind answering. So the first question is is Bitwarden not safe generally or just for managing passkeys? So Bitwarden is safe for a password manager but you don't expect the same security to handle password and to handle keys. So that's why I think it's important to store keys in hardware because if you consider that a key is a password you lose a lot of security. So Bitwarden is reasonably safe for a password manager but of course you can extract anything you want from it because this is the way they work. Alright. There's one that's not a question, but thank you for making that big whack pun. It was brilliant. Thank you. Thank you. Next is, do you think intentional security lowering of the standard could be a dual... I don't like... I see the reference, so thanks for making it. No, I think it is... Sorry. I got the direction wrong, sorry. So, no, no, I don't think it is. I don't think it was... I don't think it was pushed by the government. I think it was really a choice to make the UX easier and to make it, make it easier, again, to not lose private keys. So definitely ease of use was the driver for that, in my opinion. Alright. And next question is, what do you think of spending limits for mission management? What should I use as well? I think spending limits and permission management are definitely a good way to deal with that. And what should I use as a wallet? So I will just speak about wallet frameworks. So not necessarily wallets, but for wallet frameworks, any framework that will support this is good. So there are a lot of them. I named three of them, and I think the three of them are good to manage passkeys and to manage additional permissions on top of them. All right, so there's four people who voted this. Well, that was depressing. What to do? What to do? Not panic. So that's the most important part. Keep using passkeys. Just think about using them well, using them with knowing this and use them knowing that they are pretty easy to extract by malware if they are synchronized. So just, yeah, it's not the end of the world. We just have to be more careful. All right. The next one is hardware UB keys would be relatively secure. Yeah, they are absolutely secure, but using your smartphone to store a key that will not be syncable is also very secure. But then you can't back up it, so it's always a choice. All right. And next question is, what are options for a non-syncable passkey? Exactly the same. You have to pass it as being non-discoverable when you create the key, but you can use a smartphone, and in that case, you are on an old style, I would say, file credential. All right. And let me just check. Okay. So next top voted question is, do you think pass keys are easy enough? Oh, it keeps moving. Do you think pass keys are easy enough to use for normal users? Even as an advanced user, I have sometimes lost access to pass key and had to jump through hoops to recover Yes, I think they will be I think they are in regular cases it might be a bit rough on the edges at the moment because the protocol is still very new Passkey synchronization is only one years old so I think it will get better and ultimately all the big firms want to push it. So it will definitely get better. All right. We have a few more seconds to answer this last question. If you turn off syncing passkeys on iOS, are they safe again? They are still on your device. So the problem here is that if you created the passkey as syncable and you turn off passkey synchronization, you get a passkey that is stored in the application processor and which is not synchronizable. So basically, you get the worst of both worlds. Sorry. Actually, we have a few more seconds. Maybe you want to answer also what's the future of secure chip? How do you feel JavaCard in general? I hope there will be secure chips that are more secure, more, sorry, more open in the future, and FlashBots is doing a lot of research in that, so it's good. JavaCard is outdated. That's one of the reasons why I decided to start Ledger, because I wanted to have native code running on a smart card, so that's my general take. JavaCard is good,", "eventId": "devcon-7", - "slot_start": 1731493800000, - "slot_end": 1731497400000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1q-4HjJon6v4PjMBDOXwQwQS2B6fgTj_TjlTh6teEZd0", - "resources_slides": null, + "slot_start": 1731482400000, + "slot_end": 1731484200000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1qSDCPwnZ7bDT8RyjyUEMjDpMOU2yF_Nq0xmCkw7SprQ", + "resources_slides": "https://drive.google.com/file/d/1YvALPBQ5IfyYEdjUFFfjAfRZ6mj1ELdH/view", "speakers": [ - "gary-thung", - "giacomo-barbieri", - "kaan-uzdogan", - "kirill-fedoseev" + "nicolas-bacca" ] }, "vector": [ + 6, + 0, + 0, + 0, 0, 0, 0, - 6, 0, 0, 0, @@ -566434,7 +564834,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -566726,7 +565125,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -566890,7 +565288,6 @@ 0, 0, 0, - 6, 6, 0, 0, @@ -567130,6 +565527,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -567149,7 +565547,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -567181,13 +565578,26 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -567280,7 +565690,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -567451,6 +565860,22 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -567571,37 +565996,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -567694,11 +566088,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, 0, 2, 0, @@ -567711,54 +566105,46 @@ 0, 0, 0, - 0, - 0, 0 ] }, { "session": { - "id": "passkeys-the-good-the-bad-the-ugly", - "sourceId": "XFLPAR", - "title": "Passkeys : the good, the bad, the ugly", - "description": "Passkeys are the new hype for easy onboarding, but it's a quite old protocol that has been hijacked for crypto purposes. We'll dig through the standard history, the potentially misleading security expectations, and see how to reverse engineer an implementation to validate its soundness", - "track": "Security", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Developer", + "id": "peerdas-in-grandine", + "sourceId": "YLLNEW", + "title": "PeerDAS in Grandine", + "description": "EPF project presentation on improving PeerDAS implementation in Grandine", + "track": "[CLS] EPF Day", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Security", - "Account Abstraction", - "TEE", - "Account Abstraction", - "Security" - ], - "keywords": [ - "TEE" + "Core Protocol", + "DAS", + "Data Availability", + "EIP4844" ], - "duration": 1528, + "keywords": [], + "duration": 714, "language": "en", - "sources_swarmHash": "ba22dd73d7a6bf4d9643ab45b06a1aeb4b9628b46d99373ca721a38b46437d5e", - "sources_youtubeId": "TEjNSr8jjUI", + "sources_swarmHash": "b9c24999fe0efb631c31d2e6218f40dd82f654c4e6d6511549ece5006b3b141d", + "sources_youtubeId": "Z0mXYd2UAkM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673982fe1b0f83434d5ab3b3", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673982fe1b0f83434d5ab3b3.vtt", - "transcript_text": " Okay, so Makupai everybody. Today we'll be talking about passkeys, we'll be talking about security and memes as well. So you'll see it will be entertaining, I hope. i'll be going fast because i have a lot of slides so don't sleep so first you might be wondering what you are doing in this room why are we talking about identity and access management protocol phylo means fast identity online so why are we here it looks very complex server blocks client blocks a lot of stuff we don't do with Ethereum. But if we are DGENs, like we all are, we don't really care about all the parts on the server. We care about the fact that with passkeys, we can generate keys on the web browser. And this is very interesting for us because we like keys, of course. So next slide. Yes. So if I want to go through a Presentation as a FIDO protocol in one slide, I would say that FIDO is an authentication protocol You have a registration phase where you will create a key You will bind it to a web origin and then you have an authentication phase where you will get a challenge You will sign it and so you can verify that you are in the right place We have recent gas optimizations that allow us to run the FIDO protocol on-chain, which is why it's interesting, and there are a lot of abstractions going in FIDO, so a lot of different implementations of this protocol going on. Why is this interesting? Again, we got a God tier UX on mobile with account abstraction with passkeys. If you have not tried it already, I will suggest trying the Coinbase Smart Wallet, which is very impressive. But you get something that is very close to Web2 experience. You just use your biometrics, and you have a wallet. This wallet is self-custodial, so you can pretty much do whatever you want. And, well, it's super easy to create an account on mobile with Passkeys. On desktop, it's a bit different, but it's standard as well. So it's good because you have a common interface. You have a QR, you can redirect to your mobile. If you have an Android phone, you can just avoid scanning the QR every time. You can scan it once, and then it will be recognized automatically, but it's proprietary. So it's not a great interface, but at least it's standard on all the implementations, so you have a way to connect your desktop to your mobile, and it's not too confusing, let's say. Regarding a web developer, the experience is pretty standardized, pretty simple as well, because this was acquired by WebO10, by the W3C, sorry, under the name WebO10. So you pass common parameters when you register, like the user information, the key types, the challenge. So two APIs, super simple, have been going through this very, very quickly because we are not yet at the interesting part. The interesting part is going to start now because you are all wondering where the key is stored. We say we have keys, but well, where is it stored? This is the real question. And unfortunately to answer this we have to go through a very very large bit of history so I will be just doing this. The FIDO story starts in 2013 with two protocols the universal second factor and universal authentication factor. Universal second factor is the best known. And both protocols are mostly stateless. Stateless meaning that all credentials are not saved in the device. Credentials are generated in the device, saved on the server. So the device can be used to generate an infinite number of credentials. It was reported launched initially by google and ubico 2014 this is a real commercial launch with two devices uh ubikey and one device on which i worked on if you look at it you might recognize the early ledger design very similar 2018 502 is launched 502 introduces something new it's basically a fusion between U2F and UAF and introduces a concept of resident or discoverable credential. Those credentials are stored into the device so this completely changes the state of the protocol because now it goes stateful. 2019, WebO10 is launched by W3C, so it's basically FIDO but acquired by W3C, and at the same time, on Android, you have the Strongbox API which appears, and it is the equivalent of the secure enclave on iOS devices, so a very secure place to store keys, and Android got FIDO to certify. I think you are getting a trend there. We get iOS support in 2020, and we see that FIDO starts to be supported in devices which are very secure. So it all looks very good at the moment because we have dedicated devices with secure hardware to support FIDO. We have it in phones with strong security. And in 2022, things start changing because we get a shared announcement from Google, Apple, and Microsoft saying, this protocol looks nice, we are going to support it a lot more. And usually when you get this kind of announcement, it's the beginning of assimilation. And this assimilation has a name, Passkeys. So, well, first thing we can say, in 2023, it starts with the introduction of syncable credentials. So credentials that can be synced to different devices, either with a proprietary scheme or a password manager. So here's the illustration. It's a big temple because we are in Bangkok. Temples are called Wat. So this is Wat Pho. I didn't visit it yet, temples are called Wat. So this is Wat 4. I didn't visit it yet, but yeah, Wat. And in 2024, the FIDO alliance finally performs a general rebranding around passkeys. So now a passkey is just describing a FIDO credential. And we'll start a specification to make the synchronization a bit less proprietary. So we might ask ourselves, what is a syncable credential? Because if we search for this in the documentation, we won't find any reference to it. That's because the proper name for it is multi-device credential. So that's one thing. And if we want to define it, we can say it's a discoverable or resident credential in the context of a smartphone. But the only way to know if a credential is really syncable is to look at the answers that you get when you register it. You will get a flag which is called backup eligibility. If it's set to one, then the credential can be synced. So it's something on top of the spec, not super easy to get. The real problem starts now because we have several security misconceptions in FIDO. The first one is that, well, FIDO protects against phishing, not malware. So we can't really expect that FIDO is going to be good to protect against malware, but at the same time, it is implemented on secure devices. So we have strong... we think that the key is going to be strongly protected because this is the way that FIDO was defined. Then we can ask what is the consequence of introducing thinkable credentials to this. And to get even worse, when you look at the implementation on FIDO by crypto people, we abuse it routinely because FIDO is only designed for authentication. When you use it to sign transaction, we can't really verify what you are signing. And if we lose a key, the impact is going much more important for crypto than it is for authentication because you can always revoke an account and you can't revoke a transaction on the blockchain. Then FIDO is designed to be bound by the web origin. If we want to build a common web wallet, we are going to break this property by design. So we have to hack around it, but it's a hack around the specification. And just to say that we have been abusing this protocol for a long time, I'm not especially proud of it, but just mentioning it, I used U2F to communicate between Ledger and Metamask, sorry, and my Ether wallet in 2016 because there was no way to communicate between a browser and a USB device. So it was used as a tunnel at that time. Short break, maybe why secure hardware is important. Secure hardware is important because it will protect the key against malware. Any hardware does that but secure hardware is supposed to do it better. Then it will protect the key against physical attacks. Physical attacks are the last line of defense. When an attacker has access to a device you always think that your key is lost. Secure hardware is supposed to protect you against this. But even more importantly, secure hardware will protect you against passive attacks. Passive attacks meaning trying to obtain the key by listening to what the ship is doing, by listening to electromagnetic radiation, power differences, that kind of thing. And the only way to really protect against this is by using dedicated hardware because even if you use the best open source library like LeapSecP 266K1 here, it needs to be customized to your chip to avoid leaking information. So if you are not working with secure hardware you're going to have problems. That's basically, you are just one speculative leak away from losing the key, for example. So what is FIDO security model for non-seekable credentials? They cannot be extracted by malware. This is very important. The authentication is always done at the enclave level. The enclave is basically in charge. It's holding the key. It's holding the key, it's doing the authentication. If you want to do something with the enclave, you have to authenticate. This cannot be bypassed. So the malware cannot, a very strong malware that managed to modify the kernel of the device can fool you into signing something, but it would have to do it every time and it can't do anything else. Now let's think about some hypotheses for the synchronization. The first one will be the good hypothesis. In that case, we imagine that there is a hardware security module sitting at Google and Apple, and this hardware security module is doing a synchronization protocol between two enclaves. In that case, the security model doesn't change, the credential is never exposed, everything is good. At least it doesn't change. Now the bad synchronization hypothesis. In that case, the key is still owned by the enclave, but you have a way to put it into the application processor to start the synchronization mechanism. In that case, a malware could be able to extract the key, but after prompting the enclave to start the synchronization mechanism. In that case, a malware could be able to extract the key, but after prompting the enclave to start the synchronization protocol. And finally, the ugly synchronization hypothesis where the enclave might not even be used anymore. The key is in the application processor. Everything is in the clear. And then a malware could be able to extract the key, and the malware doesn't need to be as sophisticated as in the previous cases. And unfortunately, the only way to know what the implementation is, is to reverse it. So we are going to do this. First, on iOS. So iOS, to do this, we need to act as a malware. iOS is a bit difficult to jailbreak as you know so I made a reference to a recent jailbreak to show you how complex this is but since we can synchronize we can jailbreak an older iOS device and see what happens. So for that we are going to use the checkmate bug which is quite powerful and allows us to jailbreak a lot of older iPhones. I use PayRain for that, but you might want to use another exploit. It's a bit hard to run, but you will manage to run it if you want. Then we can dump the keychain. We have some information about the keychain at Apple, but here the first thing that we notice is that there is no security property that says that the credential needs to be authenticated every time. It's basically authenticated when the device is unlocked and then it's not going to be locked again. So we see a first problem here. Then digging into it, dumping the kitchen itself, we get more information. We got a first attempt that was done on an older version of iOS, a description of what the kitchen looks like. We basically, we have items in the kitchen which are defined by a metadata and the secret itself. The metadata and secret are wrapped by a key which is handled by the platform. So you need to have the device in order to decrypt this. And you have an additional indirection level for metadata. So you have an extra key which will be decrypt this and you have an additional indirection level for metadata so you have an extra key which will be decrypted by the platform as well but the scheme is always metadata in secret protected by a key basically so there is a small difference between two item version and basically knowing this you can fix what is on the internet today and you can make it dump the recent items because protocol buffers is very easy to describe, I mean it's self describing so you can modify this. When you look at the decrypted item you'll see that the value, the length of the value is 65 bytes plus 32 starting with 04. If you have played with key, you think that, okay, this might just be the public key and the private key concatenated. You can verify it by dumping the key, verifying that the public key associated to the private key is the right one, and it is the right one. So this means we have a way to decrypt. A malware has a way to decrypt the key. It can steal it, and it's fully handled by the by the application processor on ios on android we can do the same thing uh it's much easier to jailbreak an android phone because you can just unlock the bootloader and solve magics and your magis mat magisk and you're done and to look at the application we'll use a framework called frida which will let us introspect the application and inject code to understand what the application we'll use a framework called Frida which will let us introspect the application and inject code to understand what the program is doing. First question, it's very clear on iOS where the kitchen is, on Android not that much. So we want to know where we are and for that we are going to look at the logs. We see that we see a lot of logs referring GMS for Google Mobile Services. Looks like a good place to start, so we will instrument it. To instrument it, we'll just use the signature API in Java, and we will ask for the class that is being used, because that way we can dig further, and we can try to know exactly what is happening. So we do that with GMS. We see finally the name of the class. And we can find some information about that class on the internet. We see that it's a wrapper to another class. So we still don't know if the credential is handled by the secure enclave on the device. But at least we can dig further. So we start instrumenting again. And this time, we will dump the key. So we will assume that if we have the right class, we can cast it, and we can ask it to dump the key. And it works. So we can dump the key, we can verify it's the right private key, it matches the public key, which means that on Android and iOS, we have verified that the key is enabled by the application processor. As a bonus on Android, so it's yet another big what here, we get the key before the user authentication, which means that there is a catch mechanism that is loading the key, and user authentication is just there basically to make you think that it's secure, but it's not really secure. Finally, looking at an external password manager, here we have absolutely no expectation, so it's good. I did the example with Bitwarden. You can see when you dump it that the credential is listed as a public key. Bad news, it's not a public key. Of course, it's a private key, so we can just dump it, look at it, look at the private key, verify that it matches the public key. Exactly the same thing. So if you expected some security by saving your passkey to a password manager, you have none, which is completely the expected result. So to summarize, this is where we are today on smartphones. We have to choose between secure credentials or back-upable credentials, which might not be a good thing because you want both. So the passkeys are handled by the application processor. They are easy to extract by a malware when the device is unlocked, and there are physical attacks applicable, which is probably the worst part because something might be able to dump those keys at a later stage. So user-present enforcement is not really linked to usage of the key. And non-thinkable pass keys, things don't change there. They are still very secure. So we can wonder, how did we get there? One way to say that is that, well, we have conflicting rules between the vendors. So Google says that you can inject a key into the enclave, and Apple says that this is absolutely forbidden. You cannot inject a key into the enclave. So this might be the reason why the enclave were not modified and not used in that case. And all agree that the key cannot be exported from the enclave, so that's another good reason why it's not implemented that way. So FIDO says, FIDO introduced some enterprise, some difference between enterprise and customer pass keys. So this can be another sign that things are not going very, very well. If we wonder what will happen in the future, it's not going to change much because here we have a draft regarding the synchronization protocol, which does not describe how the key is used. If we want to add a trust encore to the key, so if we want to link a device-bound passkey to the key, there were a few extensions to do that, and they were finally dropped. And in the end, well, if we want a better UX for passkeys, this is also going to be dropped. So we had three possible improvement protocols that were dropped. So we can't think that this is not going to change much. If you are disappointed by Platform Authenticator, you can always rely on external passkey implementations. So I listed a few open source ones. The good news is that you can use your favorite hardware wallet if you want. The application is open source and you get a backupable passkey. So it can be another option. What happens on-chain? Well, I have one minute, so maybe I will go over there very fast. Cartridge initiated the move on StarkNet. So passkeys are now very popular because we have been running several optimizations in order to be able to verify passkeys on-chain very efficiently. So, PaaS keys are now very popular because we have been running several optimizations in order to be able to verify PaaS keys and chain very efficiently. I have a few kernels, I have listed a few kernels that you can use to check to use with PaaS keys. So, 0dev, Safecore or Coinbase Smart Wallet. Here's the main difference. I will just look at how they are using PaaS key. You make no difference with a common credential on 0dev. On Safecore, PaaSKey are supported but discouraged. If you look at the specifications, Safecore will tell you it's better to associate the PaaSKey with a regular credential. And if you want to run it on Coinbase Smart Wallet, they support PaaS pass keys, they also support regular signatures, and they will prompt you and tell you, yeah, you might want to use a recovery key. So the answer to the maybe burning question, should we drop pass keys? In my opinion, no, because they still offer the best way to onboard people. But we have to think about the threat model and we have to code accordingly. And one thing we can do since we have smart contracts, we can associate less privilege to pass keys that are not, that are syncable. Because since we know that syncable pass keys are way less secure than pass keys that are handled by the device, we can use them with quotas, we can use them, for example, for less amount of assets. And then we have something that is acceptable. But the most important thing is to know the threat and act accordingly. We are finished. So I will just let you with one last meme for the road. This is the difference between 2014 and 2024. We get rid of password by storing key in password managers. Might not be the best idea. I will let you decide. You can reach me on Twitter and there will be code on GitHub to describe all this so that you can run it on your own device and you can know if at some point implementation gets better and, well, the less ugly solution for the synchronization is being picked. Thank you. All right thank you so much Nicholas that was very entertaining as well to see some memes on the slides I really enjoy that. We do have a couple of questions for you I hope you don't mind answering. So the first question is is Bitwarden not safe generally or just for managing passkeys? So Bitwarden is safe for a password manager but you don't expect the same security to handle password and to handle keys. So that's why I think it's important to store keys in hardware because if you consider that a key is a password you lose a lot of security. So Bitwarden is reasonably safe for a password manager but of course you can extract anything you want from it because this is the way they work. Alright. There's one that's not a question, but thank you for making that big whack pun. It was brilliant. Thank you. Thank you. Next is, do you think intentional security lowering of the standard could be a dual... I don't like... I see the reference, so thanks for making it. No, I think it is... Sorry. I got the direction wrong, sorry. So, no, no, I don't think it is. I don't think it was... I don't think it was pushed by the government. I think it was really a choice to make the UX easier and to make it, make it easier, again, to not lose private keys. So definitely ease of use was the driver for that, in my opinion. Alright. And next question is, what do you think of spending limits for mission management? What should I use as well? I think spending limits and permission management are definitely a good way to deal with that. And what should I use as a wallet? So I will just speak about wallet frameworks. So not necessarily wallets, but for wallet frameworks, any framework that will support this is good. So there are a lot of them. I named three of them, and I think the three of them are good to manage passkeys and to manage additional permissions on top of them. All right, so there's four people who voted this. Well, that was depressing. What to do? What to do? Not panic. So that's the most important part. Keep using passkeys. Just think about using them well, using them with knowing this and use them knowing that they are pretty easy to extract by malware if they are synchronized. So just, yeah, it's not the end of the world. We just have to be more careful. All right. The next one is hardware UB keys would be relatively secure. Yeah, they are absolutely secure, but using your smartphone to store a key that will not be syncable is also very secure. But then you can't back up it, so it's always a choice. All right. And next question is, what are options for a non-syncable passkey? Exactly the same. You have to pass it as being non-discoverable when you create the key, but you can use a smartphone, and in that case, you are on an old style, I would say, file credential. All right. And let me just check. Okay. So next top voted question is, do you think pass keys are easy enough? Oh, it keeps moving. Do you think pass keys are easy enough to use for normal users? Even as an advanced user, I have sometimes lost access to pass key and had to jump through hoops to recover Yes, I think they will be I think they are in regular cases it might be a bit rough on the edges at the moment because the protocol is still very new Passkey synchronization is only one years old so I think it will get better and ultimately all the big firms want to push it. So it will definitely get better. All right. We have a few more seconds to answer this last question. If you turn off syncing passkeys on iOS, are they safe again? They are still on your device. So the problem here is that if you created the passkey as syncable and you turn off passkey synchronization, you get a passkey that is stored in the application processor and which is not synchronizable. So basically, you get the worst of both worlds. Sorry. Actually, we have a few more seconds. Maybe you want to answer also what's the future of secure chip? How do you feel JavaCard in general? I hope there will be secure chips that are more secure, more, sorry, more open in the future, and FlashBots is doing a lot of research in that, so it's good. JavaCard is outdated. That's one of the reasons why I decided to start Ledger, because I wanted to have native code running on a smart card, so that's my general take. JavaCard is good,", + "sources_streamethId": "67345a769dbb7a90e13568c9", "eventId": "devcon-7", - "slot_start": 1731482400000, - "slot_end": 1731484200000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1qSDCPwnZ7bDT8RyjyUEMjDpMOU2yF_Nq0xmCkw7SprQ", - "resources_slides": null, + "slot_start": 1731483000000, + "slot_end": 1731483900000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1Iiq2VFXcakCQ4LfaHpWejg013im1G0mu9_E24tzaarE", + "resources_slides": "https://drive.google.com/file/d/18WsNmmLQ0SQjS394K8Y-1Ux2RiQVR4Qi/view", "speakers": [ - "nicolas-bacca" + "hangleang" ] }, "vector": [ - 6, 0, 0, 0, @@ -567774,6 +566160,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -568509,11 +566896,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -568530,6 +566912,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -568560,7 +566943,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -568574,9 +566956,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -568843,7 +567227,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -568865,6 +567248,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -569074,13 +567458,11 @@ 0, 2, 0, + 2, 0, 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -569094,38 +567476,39 @@ }, { "session": { - "id": "peerdas-in-grandine", - "sourceId": "YLLNEW", - "title": "PeerDAS in Grandine", - "description": "EPF project presentation on improving PeerDAS implementation in Grandine", + "id": "peerdas-metrics-specifications", + "sourceId": "UYPWVK", + "title": "PeerDAS metrics specifications", + "description": "The PeerDAS Metrics Specifications help make testing more efficient and straightforward by creating standard metrics for Consensus clients. With a unified Grafana dashboard, teams can monitor performance in real-time, compare client data side by side, and quickly spot issues. This approach makes troubleshooting faster, supports research, and encourages teamwork, helping strengthen the Ethereum ecosystem and improve scalability.", "track": "[CLS] EPF Day", "type": "Lightning Talk", - "expertise": "Beginner", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ "Core Protocol", - "DAS", - "Data Availability", - "EIP4844" + "Testing", + "Tooling" ], - "keywords": [], - "duration": 714, + "keywords": [ + "DevOps" + ], + "duration": 706, "language": "en", - "sources_swarmHash": "b9c24999fe0efb631c31d2e6218f40dd82f654c4e6d6511549ece5006b3b141d", - "sources_youtubeId": "Z0mXYd2UAkM", + "sources_swarmHash": "710e662c1b81b646ab2603eb50ff3b4385f2f967194e6fd32bf5901d839045b0", + "sources_youtubeId": "BRzI_IyU5SU", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67345a769dbb7a90e13568c9", + "sources_streamethId": "67347b4e9dbb7a90e15bbdc6", "eventId": "devcon-7", - "slot_start": 1731483000000, - "slot_end": 1731483900000, + "slot_start": 1731483900000, + "slot_end": 1731484800000, "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1Iiq2VFXcakCQ4LfaHpWejg013im1G0mu9_E24tzaarE", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1K_w0rS7tGijHA1ThVt6Mzpg7shFMcaOpglVD01dIMPQ", + "resources_slides": "https://drive.google.com/file/d/1XflmzTS8Jr4PnTuD2OfUcZc9FmJgpUuV/view", "speakers": [ - "hangleang" + "ekaterina-riazantseva" ] }, "vector": [ @@ -569896,11 +568279,9 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, + 2, 0, 0, 0, @@ -569943,11 +568324,8 @@ 0, 0, 0, - 2, - 0, 0, 0, - 2, 0, 0, 0, @@ -570126,6 +568504,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -570236,7 +568615,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -570443,9 +568821,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 2, 0, @@ -570465,39 +568843,47 @@ }, { "session": { - "id": "peerdas-metrics-specifications", - "sourceId": "UYPWVK", - "title": "PeerDAS metrics specifications", - "description": "The PeerDAS Metrics Specifications help make testing more efficient and straightforward by creating standard metrics for Consensus clients. With a unified Grafana dashboard, teams can monitor performance in real-time, compare client data side by side, and quickly spot issues. This approach makes troubleshooting faster, supports research, and encourages teamwork, helping strengthen the Ethereum ecosystem and improve scalability.", - "track": "[CLS] EPF Day", - "type": "Lightning Talk", + "id": "permissionless-p2p-with-the-waku-network", + "sourceId": "N9WRM3", + "title": "Permissionless P2P with The Waku Network", + "description": "This workshop will be oriented around showcasing how p2p networks are pivotal for dapps and just Privacy oriented applications. We will show how Waku can be used to strengthen many concerns about censorship resistance and decentralization. Another section of workshop will be about conscious choice of tradeoffs and those that are present in Waku or any other p2p network. We will try to leave you with some patterns that can be implemented into your daily development and reasoning.", + "track": "Cypherpunk & Privacy", + "type": "Workshop", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Core Protocol", - "Testing", - "Tooling" + "Developer Infrastructure", + "Privacy", + "DePIN", + "infra", + "p2p", + "DePIN", + "Developer Infrastructure", + "Privacy" ], "keywords": [ - "DevOps" + "p2p", + "infra" ], - "duration": 706, + "duration": 3588, "language": "en", - "sources_swarmHash": "710e662c1b81b646ab2603eb50ff3b4385f2f967194e6fd32bf5901d839045b0", - "sources_youtubeId": "BRzI_IyU5SU", + "sources_swarmHash": "cd279ae97ff4fd6476822817592ef641d9cb81d5c24f41c0b18dc70c195e9d17", + "sources_youtubeId": "QbpNrcD0MvI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67347b4e9dbb7a90e15bbdc6", + "sources_streamethId": "6735bec29dbb7a90e1c39d62", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731483900000, - "slot_end": 1731484800000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1K_w0rS7tGijHA1ThVt6Mzpg7shFMcaOpglVD01dIMPQ", - "resources_slides": null, + "slot_start": 1731571200000, + "slot_end": 1731576600000, + "slot_roomId": "classroom-d", + "resources_presentation": "https://docs.google.com/presentation/d/1-0QAKQAwAZ11MiH9PyyPFFxZJJ76rz1xsmKj_FWlbEM", + "resources_slides": "", "speakers": [ - "ekaterina-riazantseva" + "sasha" ] }, "vector": [ @@ -570506,6 +568892,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -570516,7 +568903,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -571273,14 +569659,6 @@ 0, 2, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -571311,6 +569689,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -571372,6 +569751,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -571496,7 +569876,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -571557,6 +569936,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -571693,6 +570073,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -571837,47 +570218,41 @@ }, { "session": { - "id": "permissionless-p2p-with-the-waku-network", - "sourceId": "N9WRM3", - "title": "Permissionless P2P with The Waku Network", - "description": "This workshop will be oriented around showcasing how p2p networks are pivotal for dapps and just Privacy oriented applications. We will show how Waku can be used to strengthen many concerns about censorship resistance and decentralization. Another section of workshop will be about conscious choice of tradeoffs and those that are present in Waku or any other p2p network. We will try to leave you with some patterns that can be implemented into your daily development and reasoning.", - "track": "Cypherpunk & Privacy", - "type": "Workshop", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "pessimists-archive-presents-the-franklin-fallacy-why-we-misjudge-new-technologies", + "sourceId": "W7MVPA", + "title": "Pessimists Archive Presents: The Franklin Fallacy, Why We Misjudge New Technologies", + "description": "People often dismiss emerging technologies by focusing only on their current limitations, overlooking their potential evolution. This tendency, seen throughout history—from the telegraph to Ethereum—stems from what can be called “The Franklin Fallacy.” When asked about the purpose of a hot air balloon, Benjamin Franklin famously responded, \"What good is a newborn baby?\" highlighting how judging a technology in its infancy is shortsighted. This talk explores the psychology of this fallacy.", + "track": "Real World Ethereum", + "type": "Talk", + "expertise": "Beginner", + "audience": "Academic", "featured": false, "doNotRecord": false, "tags": [ - "Developer Infrastructure", - "Privacy", - "DePIN", - "infra", - "p2p", - "DePIN", - "Developer Infrastructure", - "Privacy" + "e/acc", + "Marketing" ], "keywords": [ - "p2p", - "infra" + "Technological", + "Acceptance" ], - "duration": 3588, + "duration": 945, "language": "en", - "sources_swarmHash": "cd279ae97ff4fd6476822817592ef641d9cb81d5c24f41c0b18dc70c195e9d17", - "sources_youtubeId": "QbpNrcD0MvI", + "sources_swarmHash": "48acde54ba63807192aacaefd33d64c8c49f1880cf8670d34a0f7f3be2d030bd", + "sources_youtubeId": "CSLqxWBcM-0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735bec29dbb7a90e1c39d62", + "sources_streamethId": "673573b19dbb7a90e1a51919", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731571200000, - "slot_end": 1731576600000, - "slot_roomId": "classroom-d", - "resources_presentation": "https://docs.google.com/presentation/d/1-0QAKQAwAZ11MiH9PyyPFFxZJJ76rz1xsmKj_FWlbEM", - "resources_slides": null, + "slot_start": 1731555000000, + "slot_end": 1731556800000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1BYWK_IatacBdd2r84kKv_IWDoGpsDqXH7RNIaxf7qqQ", + "resources_slides": "https://drive.google.com/file/d/1UExUiTCTXVQvasENYO3EP-CJp6BnWhgF/view", "speakers": [ - "sasha" + "louis-anslow" ] }, "vector": [ @@ -571886,8 +570261,8 @@ 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -572654,7 +571029,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -572686,7 +571060,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -572748,7 +571121,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -572788,6 +571160,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -572923,6 +571296,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -572934,7 +571308,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -573071,7 +571444,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -573199,9 +571571,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -573209,6 +571578,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -573217,42 +571587,42 @@ }, { "session": { - "id": "pessimists-archive-presents-the-franklin-fallacy-why-we-misjudge-new-technologies", - "sourceId": "W7MVPA", - "title": "Pessimists Archive Presents: The Franklin Fallacy, Why We Misjudge New Technologies", - "description": "People often dismiss emerging technologies by focusing only on their current limitations, overlooking their potential evolution. This tendency, seen throughout history—from the telegraph to Ethereum—stems from what can be called “The Franklin Fallacy.” When asked about the purpose of a hot air balloon, Benjamin Franklin famously responded, \"What good is a newborn baby?\" highlighting how judging a technology in its infancy is shortsighted. This talk explores the psychology of this fallacy.", - "track": "Real World Ethereum", - "type": "Talk", + "id": "play-a-massive-onchain-war-game-mud-day-demo", + "sourceId": "PG3VAG", + "title": "Play a massive onchain war game! - MUD Day Demo", + "description": "Play Battle for Blockchain, an onchain war game with us. Become the commander of armies and storm your enemies. Collaborate with friends to obliterate opponents and win fortune.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", + "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Academic", + "audience": "Hobby", "featured": false, "doNotRecord": false, - "tags": [ - "e/acc", - "Marketing" - ], "keywords": [ - "Technological", - "Acceptance" + "", + "" + ], + "tags": [ + "Autonomous World", + "Coordination", + "Gaming" ], - "duration": 945, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "b3fc8cfcda25f5b407661fc4c11e4aeea06f84745c1626e046e3a58c220d1b10", + "sources_youtubeId": "KXWmTDAetZ4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673573b19dbb7a90e1a51919", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731555000000, - "slot_end": 1731556800000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1BYWK_IatacBdd2r84kKv_IWDoGpsDqXH7RNIaxf7qqQ", - "resources_slides": null, "speakers": [ - "louis-anslow" - ] + "stokarz" + ], + "eventId": "devcon-7", + "slot_start": 1731554700000, + "slot_end": 1731555000000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1UNKZFzRMqNLX4iLJO6NRMaXRhwd2RgXojdoLtHJGj3w", + "resources_slides": "https://drive.google.com/file/d/1zmnBTcdVjBa5XA1J2HcvN4gzaYVViryh/view" }, "vector": [ 0, @@ -573261,13 +571631,13 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -574113,12 +572483,8 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, + 2, 0, 0, 0, @@ -574299,7 +572665,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -574580,9 +572945,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -574591,34 +572956,39 @@ }, { "session": { - "id": "play-a-massive-onchain-war-game-mud-day-demo", - "sourceId": "PG3VAG", - "title": "Play a massive onchain war game! - MUD Day Demo", - "description": "Play Battle for Blockchain, an onchain war game with us. Become the commander of armies and storm your enemies. Collaborate with friends to obliterate opponents and win fortune.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Hobby", + "id": "polynomial-commitment-schemes-for-zero-knowledge-proof-systems-a-hands-on-workshop", + "sourceId": "QAQAUX", + "title": "Polynomial Commitment Schemes for Zero-Knowledge Proof Systems: A Hands-on Workshop", + "description": "In this workshop, we will compare three distinct classes of Polynomial Commitment Schemes employed in various zero-knowledge proof systems: pairings-based (e.g., KZG), discrete logarithm-based (e.g., IPA), and hash function-based (e.g., FRI). We will explore their mathematical constructions, properties, and trade-offs. Participants will engage in hands-on proof-of-concept implementations, gaining practical experience of these advanced cryptographic protocols.", + "track": "Applied Cryptography", + "type": "Workshop", + "expertise": "Intermediate", + "audience": "Developer", "featured": false, - "doNotRecord": false, + "doNotRecord": true, "keywords": [ - "", - "" + "cryptographic primitives", + "implementation" ], "tags": [ - "Autonomous World", - "Coordination", - "Gaming" + "Zk Rollups", + "Zero-Knowledge", + "Cryptography", + "implementation", + "Cryptography", + "Zero-Knowledge", + "Zk Rollups" ], "language": "en", "speakers": [ - "stokarz" + "giuseppe" ], "eventId": "devcon-7", - "slot_start": 1731554700000, - "slot_end": 1731555000000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1UNKZFzRMqNLX4iLJO6NRMaXRhwd2RgXojdoLtHJGj3w" + "slot_start": 1731645000000, + "slot_end": 1731650400000, + "slot_roomId": "classroom-d", + "resources_presentation": "https://docs.google.com/presentation/d/1L15TG4XE9h8o3WvPj5ksj6cdCnNYdYuY1dI9gWq3GEg", + "resources_slides": "https://drive.google.com/file/d/1yM92AclsMJ7nFpBrBtWJ7ZWnI9YvTVbx/view" }, "vector": [ 0, @@ -574631,8 +573001,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -575136,10 +573504,9 @@ 0, 0, 0, - 6, - 0, 0, 0, + 6, 0, 0, 0, @@ -575384,6 +573751,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -575439,6 +573808,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -575482,8 +573852,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -575527,7 +573895,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -575811,6 +574178,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -575932,6 +574300,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -575946,10 +574315,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0 @@ -575957,38 +574322,44 @@ }, { "session": { - "id": "polynomial-commitment-schemes-for-zero-knowledge-proof-systems-a-hands-on-workshop", - "sourceId": "QAQAUX", - "title": "Polynomial Commitment Schemes for Zero-Knowledge Proof Systems: A Hands-on Workshop", - "description": "In this workshop, we will compare three distinct classes of Polynomial Commitment Schemes employed in various zero-knowledge proof systems: pairings-based (e.g., KZG), discrete logarithm-based (e.g., IPA), and hash function-based (e.g., FRI). We will explore their mathematical constructions, properties, and trade-offs. Participants will engage in hands-on proof-of-concept implementations, gaining practical experience of these advanced cryptographic protocols.", - "track": "Applied Cryptography", - "type": "Workshop", + "id": "popcraft-mud-day-demo", + "sourceId": "UDJFDV", + "title": "PopCraft - MUD Day Demo", + "description": "This is a project demo for MUD Day CLS: onchain games and non-financial applications. PopCraft is a fully on-chain casual click-based game integrating gameplay with financial elements. Currently in single-player mode, it plans to expand to multiplayer. Built on composability, PopCraft uses PixeLAW, TCM, and Redswap. In-game item issuance and trading are decentralized, transparent, and open, allowing seamless integration of any ERC-20 token projects and DEX.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Community", "featured": false, - "doNotRecord": true, - "keywords": [ - "cryptographic primitives", - "implementation" - ], + "doNotRecord": false, "tags": [ - "Zk Rollups", - "Zero-Knowledge", - "Cryptography", - "implementation", - "Cryptography", - "Zero-Knowledge", - "Zk Rollups" + "Autonomous World", + "Gaming", + "Not financial" ], - "language": "en", - "speakers": [ - "giuseppe" + "keywords": [ + "Fully", + "on-chain", + "game" ], + "duration": 296, + "language": "en", + "sources_swarmHash": "aff1af8d2c9754f3f42ee7832466c60ad90eb9ccfc155e90e1ece97bd6932049", + "sources_youtubeId": "fV_xf0pac6k", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735888f9dbb7a90e149563b", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735888f9dbb7a90e149563b.vtt", + "transcript_text": " Hello everyone, I'm Seki from the Matican team. Today I will show our latest 4-length game, Popcraft. Okay. I always ask myself some questions about the necessity, enjoyment, benefits, timing, and the contribution, but using blockchain in this field. So what's the outcome? In this field, a lot of people have to do a lot of exploring. And we want to do something different. So we chose the cross and composability as our core start point, which lead us to create PopCraft. It's a fully composable game, which makes cross-embedded compatibility no longer just a meme. So the gameplay is just like PopStar. You can even rewind by eliminating all items or bots in 90 seconds. Multiple adjacent items can be clicked to eliminate directly. Sequel items need to buy a program to eliminate. Okay. Okay. This is a video, but I cannot to play it. Okay. What is PopCraft? Simply put, PopCraft equals to pixel long plus this custom scene plus RedSwap. OK. So how we build it? Actually, we build two things based on mount. IEVM compatible Pixelon and PopCraft. OK, we just use Pixelon as a game board for Puffcraft, which gives game very excellent composability at a core level. Okay, so we use this custom machine as a game item factory. When we play this custom machine, we think how we can use this material more useful. We end up, came up with an idea of, we just use this material in another game, yeah. Okay, we use Reswarp as a game marketplace. Okay, Reswarp as a game marketplace. Restwarp is DX or Resto, and it makes the game problem price very transparent. So, the problem now is live on Resto mainnet. You can play it anytime. Okay. Also, you can play beyond Popcraft. You can play Popcraft, purchase game proper in Popcraft. Raise the price of the game prop. You can also play this custom machine, produce machine and produce material and sell them on RedSwap. You can also just be a trader on RedSwap, buying and selling T7 material, they are all ERC20 tokens. Okay, you can do, choose any of the above, all of the above. Okay, composability in PopGraft. Okay, we can replace this custom machine as any other project which has ESC20 tokens as a game prop and a game item. Also, we can replace RedShop with any other DEX. One more thing. I think we need to make crypto vegetable in 4.2 game. We might have to do another thing, but we also need to do one thing. Thank you.", "eventId": "devcon-7", - "slot_start": 1731645000000, - "slot_end": 1731650400000, - "slot_roomId": "classroom-d", - "resources_presentation": "https://docs.google.com/presentation/d/1L15TG4XE9h8o3WvPj5ksj6cdCnNYdYuY1dI9gWq3GEg" + "slot_start": 1731557100000, + "slot_end": 1731557400000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/12K7Vn_cc7jQu6WzJS3EQxpVW_8a_ylzYwi82LxCmSBw", + "resources_slides": "", + "speakers": [ + "ck" + ] }, "vector": [ 0, @@ -576001,9 +574372,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -576754,11 +575125,6 @@ 0, 0, 0, - 6, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -576811,8 +575177,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -576856,6 +575220,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -576981,6 +575347,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -577182,7 +575549,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -577311,10 +575677,10 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, 0, 0, 0, @@ -577327,44 +575693,35 @@ }, { "session": { - "id": "popcraft-mud-day-demo", - "sourceId": "UDJFDV", - "title": "PopCraft - MUD Day Demo", - "description": "This is a project demo for MUD Day CLS: onchain games and non-financial applications. PopCraft is a fully on-chain casual click-based game integrating gameplay with financial elements. Currently in single-player mode, it plans to expand to multiplayer. Built on composability, PopCraft uses PixeLAW, TCM, and Redswap. In-game item issuance and trading are decentralized, transparent, and open, allowing seamless integration of any ERC-20 token projects and DEX.", + "id": "porting-dark-forest-to-mud-mud-day-demo", + "sourceId": "VBS9CJ", + "title": "Porting Dark Forest to MUD - MUD Day Demo", + "description": "We recently ported Dark Forest to the MUD engine and would like to share some of the insights we gained during this process with everyone.", "track": "[CLS] MUD Community-Led Session, by 0xPARC", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Community", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Autonomous World", - "Gaming", - "Not financial" - ], - "keywords": [ - "Fully", - "on-chain", - "game" - ], - "duration": 296, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "aff1af8d2c9754f3f42ee7832466c60ad90eb9ccfc155e90e1ece97bd6932049", - "sources_youtubeId": "fV_xf0pac6k", + "sources_swarmHash": "e3dd16f3151091dff7332eccf386599232b3603bebdd91e86bdccd3321a106e9", + "sources_youtubeId": "pGz0lv8y74s", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735888f9dbb7a90e149563b", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735888f9dbb7a90e149563b.vtt", - "transcript_text": " Hello everyone, I'm Seki from the Matican team. Today I will show our latest 4-length game, Popcraft. Okay. I always ask myself some questions about the necessity, enjoyment, benefits, timing, and the contribution, but using blockchain in this field. So what's the outcome? In this field, a lot of people have to do a lot of exploring. And we want to do something different. So we chose the cross and composability as our core start point, which lead us to create PopCraft. It's a fully composable game, which makes cross-embedded compatibility no longer just a meme. So the gameplay is just like PopStar. You can even rewind by eliminating all items or bots in 90 seconds. Multiple adjacent items can be clicked to eliminate directly. Sequel items need to buy a program to eliminate. Okay. Okay. This is a video, but I cannot to play it. Okay. What is PopCraft? Simply put, PopCraft equals to pixel long plus this custom scene plus RedSwap. OK. So how we build it? Actually, we build two things based on mount. IEVM compatible Pixelon and PopCraft. OK, we just use Pixelon as a game board for Puffcraft, which gives game very excellent composability at a core level. Okay, so we use this custom machine as a game item factory. When we play this custom machine, we think how we can use this material more useful. We end up, came up with an idea of, we just use this material in another game, yeah. Okay, we use Reswarp as a game marketplace. Okay, Reswarp as a game marketplace. Restwarp is DX or Resto, and it makes the game problem price very transparent. So, the problem now is live on Resto mainnet. You can play it anytime. Okay. Also, you can play beyond Popcraft. You can play Popcraft, purchase game proper in Popcraft. Raise the price of the game prop. You can also play this custom machine, produce machine and produce material and sell them on RedSwap. You can also just be a trader on RedSwap, buying and selling T7 material, they are all ERC20 tokens. Okay, you can do, choose any of the above, all of the above. Okay, composability in PopGraft. Okay, we can replace this custom machine as any other project which has ESC20 tokens as a game prop and a game item. Also, we can replace RedShop with any other DEX. One more thing. I think we need to make crypto vegetable in 4.2 game. We might have to do another thing, but we also need to do one thing. Thank you.", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "ddy" + ], "eventId": "devcon-7", - "slot_start": 1731557100000, - "slot_end": 1731557400000, + "slot_start": 1731556200000, + "slot_end": 1731556500000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/12K7Vn_cc7jQu6WzJS3EQxpVW_8a_ylzYwi82LxCmSBw", - "resources_slides": null, - "speakers": [ - "ck" - ] + "resources_presentation": "https://docs.google.com/presentation/d/14aQQNVk55JWYMHYKeZITv12OkJVvgS-kWDNWXp6cpX4", + "resources_slides": "https://drive.google.com/file/d/1XUdsAECewuDi5VDfG59fzYoPRWkPzIzz/view" }, "vector": [ 0, @@ -578228,8 +576585,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -578355,30 +576710,28 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -578684,14 +577037,13 @@ 2, 0, 0, + 2, 0, 0, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -578703,27 +577055,43 @@ }, { "session": { - "id": "porting-dark-forest-to-mud-mud-day-demo", - "sourceId": "VBS9CJ", - "title": "Porting Dark Forest to MUD - MUD Day Demo", - "description": "We recently ported Dark Forest to the MUD engine and would like to share some of the insights we gained during this process with everyone.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", + "id": "postcards-from-the-cutting-edge-of-gas-research-what-you-dont-know-can-hurt-you-and-your-users", + "sourceId": "X8VZDJ", + "title": "Postcards from the cutting edge of Gas research: what you don’t know can hurt you & your users", + "description": "In July of 2024, we shared original research describing how the interaction between privately transmitted transactions and altruistic self-built blocks unexpectedly increase Base Fee volatility (see references below). We also warned that this effect would likely get more pronounced as private transaction share continues to grow. In this session we will revisit our original findings but with 4 months of additional data and deeper investigative research. Has gas price volatility increased as predi", + "track": "Usability", "type": "Lightning Talk", - "expertise": "", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [ - "ddy" + "tags": [ + "eip-4844", + "Gas", + "Layer 1", + "UI/UX" ], + "keywords": [ + "1559", + "Blobs", + "4844" + ], + "duration": 434, + "language": "en", + "sources_swarmHash": "a727fa169242eec4b80126341a1150efb4a45bc5a1b4a6a288a8c0e8bf19c107", + "sources_youtubeId": "NKGOZ154rPM", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731556200000, - "slot_end": 1731556500000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/14aQQNVk55JWYMHYKeZITv12OkJVvgS-kWDNWXp6cpX4" + "slot_start": 1731407400000, + "slot_end": 1731408000000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1AzgmOOm16-VrlFGtmsr5MOvsAabE-h1nClU9xydV9I4", + "resources_slides": "https://drive.google.com/file/d/1uyzMjnZLyMuDfMc6HN5tWiAbrNz3s9eQ/view", + "speakers": [ + "matt-cutler" + ] }, "vector": [ 0, @@ -578734,10 +577102,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -579244,17 +577608,11 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -579499,6 +577857,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -579542,6 +577901,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -579709,6 +578069,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -579921,6 +578282,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -580044,6 +578406,7 @@ 2, 0, 0, + 0, 2, 0, 0, @@ -580062,54 +578425,52 @@ }, { "session": { - "id": "postcards-from-the-cutting-edge-of-gas-research-what-you-dont-know-can-hurt-you-and-your-users", - "sourceId": "X8VZDJ", - "title": "Postcards from the cutting edge of Gas research: what you don’t know can hurt you & your users", - "description": "In July of 2024, we shared original research describing how the interaction between privately transmitted transactions and altruistic self-built blocks unexpectedly increase Base Fee volatility (see references below). We also warned that this effect would likely get more pronounced as private transaction share continues to grow. In this session we will revisit our original findings but with 4 months of additional data and deeper investigative research. Has gas price volatility increased as predi", - "track": "Usability", - "type": "Lightning Talk", + "id": "practical-endgame-on-issuance-policy", + "sourceId": "TQMWK9", + "title": "Practical endgame on issuance policy", + "description": "A practical endgame on issuance policy stops the growth in stake while guaranteeing proper consensus incentives and positive regular rewards to solo stakers. Viable reward curves for this endgame are presented. Motivations, impacts and potential downsides of an issuance reduction are in focus. A tangible framework is also introduced: never exceed an issuance rate of 0.5%. A stringent cap on issuance caps the inflation rate, solidifying ETH as trustless sound money with robust economic security.", + "track": "Cryptoeconomics", + "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "eip-4844", - "Gas", - "Layer 1", - "UI/UX" - ], - "keywords": [ - "1559", - "Blobs", - "4844" + "Consensus", + "Economics", + "Staking", + "Tokenomics" ], - "duration": 434, + "keywords": [], + "duration": 1636, "language": "en", - "sources_swarmHash": "a727fa169242eec4b80126341a1150efb4a45bc5a1b4a6a288a8c0e8bf19c107", - "sources_youtubeId": "NKGOZ154rPM", + "sources_swarmHash": "343fa2625e225c2ce506d01b26c6323f2618c82ca604b1fad8189db7465585aa", + "sources_youtubeId": "m91Wu6-cdwk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673cc204982f234a128c12d5", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731407400000, - "slot_end": 1731408000000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1AzgmOOm16-VrlFGtmsr5MOvsAabE-h1nClU9xydV9I4", - "resources_slides": null, + "slot_start": 1731558600000, + "slot_end": 1731560400000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1xmwhrvV65FuGDVnNb8_zGgVoMM4-pg6gMEP0t1Iw-OU", + "resources_slides": "https://drive.google.com/file/d/1s9Jmnf7bpHGGaWFAY2jUDxVK1Avusw3x/view", "speakers": [ - "matt-cutler" + "anders-elowsson" ] }, "vector": [ 0, 0, + 6, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -580854,6 +579215,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -580867,12 +579229,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -580889,6 +579245,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -580897,6 +579254,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -580911,7 +579269,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -580982,6 +579339,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -581079,7 +579437,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -581293,7 +579650,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -581419,12 +579775,12 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -581437,44 +579793,42 @@ }, { "session": { - "id": "practical-endgame-on-issuance-policy", - "sourceId": "TQMWK9", - "title": "Practical endgame on issuance policy", - "description": "A practical endgame on issuance policy stops the growth in stake while guaranteeing proper consensus incentives and positive regular rewards to solo stakers. Viable reward curves for this endgame are presented. Motivations, impacts and potential downsides of an issuance reduction are in focus. A tangible framework is also introduced: never exceed an issuance rate of 0.5%. A stringent cap on issuance caps the inflation rate, solidifying ETH as trustless sound money with robust economic security.", - "track": "Cryptoeconomics", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Community", + "id": "prediction-market-panel", + "sourceId": "CCZCSH", + "title": "Prediction market panel", + "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Consensus", - "Economics", - "Staking", - "Tokenomics" - ], + "tags": [], "keywords": [], - "duration": 1636, + "duration": 928, "language": "en", - "sources_swarmHash": "343fa2625e225c2ce506d01b26c6323f2618c82ca604b1fad8189db7465585aa", - "sources_youtubeId": "m91Wu6-cdwk", + "sources_swarmHash": "7f1382c2276a5b8073634d3ef9ea7e087d715d1817fdfb2383dfc53dbe76bdbb", + "sources_youtubeId": "oq34OAKrU5M", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673cc204982f234a128c12d5", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "673595919dbb7a90e13b23f7", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673595919dbb7a90e13b23f7.vtt", + "transcript_text": " Hello everyone and can you hear me? Awesome. I'm Vaughan Mackenzie-Landell. I'm one of the co-founders of Butter. We've been working in the governance space for a very long time and Obviously very excited about futarki and prediction markets and that's why we're all here We have this incredible panel full of lots of people who my love and admire And I'm gonna just hand over to each of them to give a very short introduction and then we'll kick off Yeah, I'm Martin. So in a previous live venosis, we started with prediction markets and Wanted to implement food hierarchy and other things. So in general, I'm absolutely a super big fan of that, but I think in this panel I will a little bit play the skeptic here because I think as Robin mentioned, you have the idea and then you have all the details that need to be figured out. So I think I can a little bit bring in that perspective. I just talked, so I'm Robin Hanson. So I'm Calvin, also CAS.if. I'm the co-founder of Utarki.fi. We just announced our project a few days ago. Our plan is to help DAOs and other organizations make great, impactful, positive decisions. Hi, everyone. I'm CJ from Limitless. We're building, like, basically we're processing around a million dollars a day in bets right now for, like, zero-day contracts on financial markets. I believe that believe that like with crypto, we can build the world's largest economy on chain outside the jurisdiction of any one particular nation state. And I think that as well as that, we need to build really efficient global marketplaces on top of the infrastructure. And so that's what we're doing at Limitless Labs. Thanks, guys. Really appreciate that. So we're here at DIAC today. And so the first question is, how do prediction markets tied to DIAC, how do prediction markets and epistemic tech make the biggest impact in the vein of DIAC on differential progress, defense, and democracy? Over to any one of the panelists. One ring to rule them all. If you could have reliable prediction markets on whatever policy question you have, then you can just do better on all of your other areas of life. You could better know how to pursue longevity, better know how to pursue decentralization. Whatever it is you're trying to do, having better information on that can make you do that better. So could we do much better? And how does that map directly to, say for example, defense? I can just replicate that. I think the promise is very clear. So the promise is whether we we talk about Even simple things like this community node or so from micro decisions whether to show that community node or not to show that to Extremely large macro macro decisions should we raise the interest rate or should we make this huge? Well defense spending here or there. In general, the promise is that this is a tool for more robust decision-making to bring in, yeah, to have a better quality of bringing information that actually shows the true impact of a decision. Yeah, so I'm going to make a comment here. So over the last few weeks, everybody has seen a lot of people are talking about prediction markets, of course predicting who's going to win the election, and this generated a lot of excitement. But actually, the most interesting thing is not only predicting probabilities, that's what normal prediction markets do, but predicting the consequences of things. So if you could have, for instance, conditional markets on inflation, on GDP growth, on spending, on unemployment, conditional on candidates, right? This would be even much more interesting for democracy. You could actually see like a scorecard for the candidates, right? Like a game, you can pick one. This one's better on this one. This one's better on this other thing, right? So this is the promise of using conditional markets for decision-making. Yeah. Sorry, Monik, go ahead. Maybe let me throw in one kind of concept to challenge this idea. So to say, I mean, I'm just pulling up here CoinGecko and look at prices of crypto. And, well, they change by 6%, 7% a day. So I think prediction markets rely on the assumption that markets are so efficient that out of those prices, we can get reliable information. So I think prediction markets rely on the assumption that markets are so efficient that out of those prices we can get reliable information or better information. But I would argue well there is also a lot of noise in markets and I would say that markets here, prices change by six percent is largely not related to any meaningful signal, but is... Well, I need to be careful in the statement, but there's still a lot of noise. So that's, I guess, my question. How can... Is there enough signal in the noise? So when you compare head to head this mechanism to other ones, consistently this wins. So that means all those other mechanisms have more than 6% mistake. Think about any committee you've ever been on. Think about the gossip network. Do you really think that's within 6% of the truth? Come on. Like being 6% close is great compared to what we usually have. Yeah, I would, I guess I would come with the, like the whole question from, from a little bit of like a different angle. I think that definitely prediction markets have a lot of power in terms of forecasting and decision making. But I think in a crypto context, what's really interesting and powerful is the fact that essentially we can build these global efficient marketplaces. And so I really believe that markets in general accelerate human progress. If it wasn't for markets, we wouldn't be where we were today in almost every field. And even they have positive trickle-down effects to science funding, for example. Brian Armstrong sells his Coinbase stock to fund moonshots. And science funding, that's a trivial example in a global context, but still kind of proves the point. And I think that building prediction markets onchain can be defensive in the sense that we actually have a capital formation and wealth generation outside of the nation-state in this kind of global on-chain economy and I think why that's defensive is because we can build this kind of resilient independent decentralized systems that manage to kind of form their own capital, like without, for example, being taxed on it, also without the very heavy regulation, which is a big issue. There are only 16 designated contract makers in the United States. There are 16 license derivatives exchanges, but money cannot move globally at the speed of info in the traditional system. Here it can, and we can build these really powerful marketplaces that are global and accelerate human progress. I completely agree. I think markets are a very important piece of technology. I actually do believe that one of the reasons that we are able to accelerate defense, particularly with prediction markets, is because we're basically getting rid of any authority's ability to lie to us, right? Because we can delegate information and truth to markets. I think this is great. And actually, speaking about this, if you think about Futaki so far, as you said, we've had lots of messy details. I'd love to know what you think the key challenges in implementing Futaki will be for a world organizational decision making. Right. So some challenges, of course. Robin mentioned the matter of the autist in the boardroom. So there's always this risk that prediction markets and also decision markets are, in some sense, threatening to the power of insiders and some organizations that maybe are not making the most efficient decisions. So this is for sure a big challenge. So another challenge that is marching up, I'm going to try to connect the previous question from Martin to the combinatorial market idea from Robin, which is part of our basically our grand vision on fitarki, which is so the matter of noise When we're evaluating potentially a small or medium-sized proposal or decision in a company Many of those if there's a smaller medium, you're not gonna have a clear impact, visible impact on the share price, so that makes it really hard to use this directly to estimate, but if you've had a full network or a full tree where you can see, like you could have markets for share price or token price, but connected to these you also see the KPIs, markets for different KPIs, number of users, revenue, all the kinds of metrics important for the company. So you could actually see, okay, maybe I don't, there's a lot of noise, I don't see the impact of this decision on the token price, but I see the impact of this decision on the token price, but I see the impact of this decision on this minor KPI that has, but it's minor KPI associated with this major KPI, which isn't then related to the share price. So I think that's a, this is a big trend region for many decisions is actually have the markets, all the information connected to each other. Maybe to kind of reorient the question, what problems are you running into building and operating these things? Prediction markets, futakis, what are the hard problems, what are the solutions? I can say one hard problem, relatively hard problem is to just find traders that are comfortable going into those positions. And just to give you an example of, let's say we would do this for Ethereum. We would say, okay, conditional, we might go this roadmap or that roadmap and then we want to use this food mechanism, so essentially people can say, conditionally to this roadmap, I buy Ether and conditional to that I sell. But then to practically do that you need to believe that going back to this correctness of prices, I mean yes 6% is in a day but I mean we really, what is really the fair price of ether right now? And I think there could be a range of some people believe it should be 10,000 and some people would believe it could be less than 1,000. So if you are in the camp of it should be 10,000, then kind of probably in both decisions, you would say, well, I want to be long on Ether, I want to hold Ether and if you're in the camp of well this is really not worth it, it's just worth 10,000 and again in both sides, so you need that person that says well the price is pretty much right now and really that decision makes a marginal difference for me to hold the asset. And then let's say there's a really good proposal and you really like that, so you go long on that. Now you hold this conditional yes. Now the next day, some external completely unrelated event happens where the whole crypto market crashes by 50%. You hold this yes proposal and hold it under the price from yesterday, you still think the proposal is good, but now your incentives are for the proposal absolutely to not pass because then you would hold all the dollars and not the asset at a price that's now 50% or where the real asset is now 50% cheaper from what you bought it. So I'm not sure if anyone could follow, but those are kind of the challenges I see. So 20 years ago we had a burst of applications of prediction markets, including in corporations. So I think we saw typical failure modes there that are instructive about today. Typically if you went to work with a company about setting a prediction market, you would say, well, what are your most important issues and numbers you'd like to track, and let's set up markets on those, and they would usually say, well, that's a little sensitive. Let's do something a little safer, and they would pick safer topics, and then they'd get accurate estimates on them and satisfied users, but they go, yeah, but we didn't really care about those. So, you know, that would be one outcome. Or, you know, they would say, talk about something important, and then management couldn't resist having opinions that disagreed with the market, and the market gets proven right, and the management wrong, and they're just really mad and want to kill it. So, for example, the US government missile test agency had prediction markets on which tests would actually go forward. So they put up, they try to make a lot of tests, and a lot of them don't actually happen, because they have to coordinate a lot of different parts of the military, make a test happen, and if one part isn't there, the whole thing has to be scrapped, and a lot of money is lost. And so they wanted to know which tests would actually go through it and maybe save money by cutting back earlier but then it was more legible that they knew ahead of time that it wasn't going to work and that was a management problem so they didn't want that anymore they'd rather pretend they don't know how these things are failing yeah uh like i guess i'm gonna like talk about this from more like consumer product building perspective, less like the future side. And I think definitely like historically, it's absolutely true that marketplaces are like very hard like products to bootstrap and to activate like the atomic network effect. I think that like it's definitely like to incentivize like market makers or liquidity providers, especially for things like pop culture markets, right? I mean, you can always build the model for, for like elections or sports markets, or especially in limitless cases like the financial markets, right? You can use the volatility to understand how to price them. And so it can be attractive to institutional market makers because it's essentially like a short term retail retail option flow but at the same time you know you may look at Polymarket how much they spend per month to incentivize market makers to come like into their order book actually I spent a lot of time previously and like I spoke to Robin about it in Berkeley like kind of obsessing about incremental improvements in the AMM which we see like for example like paradigm just launched the PMAM, right? We're actually going to spin up a contract for it to see how much of an improvement is it. But I definitely think that challenge is like, how do you incentivize the market makers? Either you use cash or you use your token or what, but it's still a huge expense for bootstrapping like the initial network, yeah. But maybe over time it will be worth it. All right, brilliant.", "eventId": "devcon-7", - "slot_start": 1731558600000, - "slot_end": 1731560400000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1xmwhrvV65FuGDVnNb8_zGgVoMM4-pg6gMEP0t1Iw-OU", - "resources_slides": null, + "slot_start": 1731563400000, + "slot_end": 1731564300000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1Rm-aNAjKTe4WozwIfgJQZhQN1chtswB5-fuDukQWE5k", + "resources_slides": "", "speakers": [ - "anders-elowsson" + "robin-hanson", + "martin-k", + "joey-krug", + "cj", + "kas" ] }, "vector": [ - 0, 0, 6, 0, @@ -581688,6 +580042,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -581995,6 +580350,61 @@ 0, 0, 6, + 6, + 6, + 6, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -582230,7 +580640,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -582260,7 +580669,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -582269,7 +580677,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -582354,62 +580761,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -582791,14 +581142,13 @@ 2, 0, 0, + 2, 0, 0, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -582810,50 +581160,51 @@ }, { "session": { - "id": "prediction-market-panel", - "sourceId": "CCZCSH", - "title": "Prediction market panel", - "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "id": "privacy-enabled-smart-contract-driven-fair-and-transparent-reward-mechanism-in-federated-ai", + "sourceId": "LKD3RG", + "title": "Privacy enabled, Smart Contract driven Fair and transparent reward mechanism in Federated AI", + "description": "Federated learning enables multiple parties to contribute their locally trained models to an aggregation server, which securely combines individual models into a global one. However, it lacks a fair, verifiable, and proportionate reward (or penalty) mechanism for each contributor. Implementing a smart contract-based contribution analysis framework for federated learning on a privacy-enabled Ethereum L2 can address this challenge, and build the economics of federated learning public chain.", + "track": "Real World Ethereum", "type": "Lightning Talk", - "expertise": "", - "audience": "Engineering", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 928, + "tags": [ + "transparency" + ], + "keywords": [ + "Federated AI", + "Smart Contracts", + "Transparency" + ], + "duration": 531, "language": "en", - "sources_swarmHash": "7f1382c2276a5b8073634d3ef9ea7e087d715d1817fdfb2383dfc53dbe76bdbb", - "sources_youtubeId": "oq34OAKrU5M", + "sources_swarmHash": "8688138370ef7be9ee67412e069fec2019678bcefd1d8d3c719553a958c67365", + "sources_youtubeId": "TyWi4laTAUo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673595919dbb7a90e13b23f7", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673595919dbb7a90e13b23f7.vtt", - "transcript_text": " Hello everyone and can you hear me? Awesome. I'm Vaughan Mackenzie-Landell. I'm one of the co-founders of Butter. We've been working in the governance space for a very long time and Obviously very excited about futarki and prediction markets and that's why we're all here We have this incredible panel full of lots of people who my love and admire And I'm gonna just hand over to each of them to give a very short introduction and then we'll kick off Yeah, I'm Martin. So in a previous live venosis, we started with prediction markets and Wanted to implement food hierarchy and other things. So in general, I'm absolutely a super big fan of that, but I think in this panel I will a little bit play the skeptic here because I think as Robin mentioned, you have the idea and then you have all the details that need to be figured out. So I think I can a little bit bring in that perspective. I just talked, so I'm Robin Hanson. So I'm Calvin, also CAS.if. I'm the co-founder of Utarki.fi. We just announced our project a few days ago. Our plan is to help DAOs and other organizations make great, impactful, positive decisions. Hi, everyone. I'm CJ from Limitless. We're building, like, basically we're processing around a million dollars a day in bets right now for, like, zero-day contracts on financial markets. I believe that believe that like with crypto, we can build the world's largest economy on chain outside the jurisdiction of any one particular nation state. And I think that as well as that, we need to build really efficient global marketplaces on top of the infrastructure. And so that's what we're doing at Limitless Labs. Thanks, guys. Really appreciate that. So we're here at DIAC today. And so the first question is, how do prediction markets tied to DIAC, how do prediction markets and epistemic tech make the biggest impact in the vein of DIAC on differential progress, defense, and democracy? Over to any one of the panelists. One ring to rule them all. If you could have reliable prediction markets on whatever policy question you have, then you can just do better on all of your other areas of life. You could better know how to pursue longevity, better know how to pursue decentralization. Whatever it is you're trying to do, having better information on that can make you do that better. So could we do much better? And how does that map directly to, say for example, defense? I can just replicate that. I think the promise is very clear. So the promise is whether we we talk about Even simple things like this community node or so from micro decisions whether to show that community node or not to show that to Extremely large macro macro decisions should we raise the interest rate or should we make this huge? Well defense spending here or there. In general, the promise is that this is a tool for more robust decision-making to bring in, yeah, to have a better quality of bringing information that actually shows the true impact of a decision. Yeah, so I'm going to make a comment here. So over the last few weeks, everybody has seen a lot of people are talking about prediction markets, of course predicting who's going to win the election, and this generated a lot of excitement. But actually, the most interesting thing is not only predicting probabilities, that's what normal prediction markets do, but predicting the consequences of things. So if you could have, for instance, conditional markets on inflation, on GDP growth, on spending, on unemployment, conditional on candidates, right? This would be even much more interesting for democracy. You could actually see like a scorecard for the candidates, right? Like a game, you can pick one. This one's better on this one. This one's better on this other thing, right? So this is the promise of using conditional markets for decision-making. Yeah. Sorry, Monik, go ahead. Maybe let me throw in one kind of concept to challenge this idea. So to say, I mean, I'm just pulling up here CoinGecko and look at prices of crypto. And, well, they change by 6%, 7% a day. So I think prediction markets rely on the assumption that markets are so efficient that out of those prices, we can get reliable information. So I think prediction markets rely on the assumption that markets are so efficient that out of those prices we can get reliable information or better information. But I would argue well there is also a lot of noise in markets and I would say that markets here, prices change by six percent is largely not related to any meaningful signal, but is... Well, I need to be careful in the statement, but there's still a lot of noise. So that's, I guess, my question. How can... Is there enough signal in the noise? So when you compare head to head this mechanism to other ones, consistently this wins. So that means all those other mechanisms have more than 6% mistake. Think about any committee you've ever been on. Think about the gossip network. Do you really think that's within 6% of the truth? Come on. Like being 6% close is great compared to what we usually have. Yeah, I would, I guess I would come with the, like the whole question from, from a little bit of like a different angle. I think that definitely prediction markets have a lot of power in terms of forecasting and decision making. But I think in a crypto context, what's really interesting and powerful is the fact that essentially we can build these global efficient marketplaces. And so I really believe that markets in general accelerate human progress. If it wasn't for markets, we wouldn't be where we were today in almost every field. And even they have positive trickle-down effects to science funding, for example. Brian Armstrong sells his Coinbase stock to fund moonshots. And science funding, that's a trivial example in a global context, but still kind of proves the point. And I think that building prediction markets onchain can be defensive in the sense that we actually have a capital formation and wealth generation outside of the nation-state in this kind of global on-chain economy and I think why that's defensive is because we can build this kind of resilient independent decentralized systems that manage to kind of form their own capital, like without, for example, being taxed on it, also without the very heavy regulation, which is a big issue. There are only 16 designated contract makers in the United States. There are 16 license derivatives exchanges, but money cannot move globally at the speed of info in the traditional system. Here it can, and we can build these really powerful marketplaces that are global and accelerate human progress. I completely agree. I think markets are a very important piece of technology. I actually do believe that one of the reasons that we are able to accelerate defense, particularly with prediction markets, is because we're basically getting rid of any authority's ability to lie to us, right? Because we can delegate information and truth to markets. I think this is great. And actually, speaking about this, if you think about Futaki so far, as you said, we've had lots of messy details. I'd love to know what you think the key challenges in implementing Futaki will be for a world organizational decision making. Right. So some challenges, of course. Robin mentioned the matter of the autist in the boardroom. So there's always this risk that prediction markets and also decision markets are, in some sense, threatening to the power of insiders and some organizations that maybe are not making the most efficient decisions. So this is for sure a big challenge. So another challenge that is marching up, I'm going to try to connect the previous question from Martin to the combinatorial market idea from Robin, which is part of our basically our grand vision on fitarki, which is so the matter of noise When we're evaluating potentially a small or medium-sized proposal or decision in a company Many of those if there's a smaller medium, you're not gonna have a clear impact, visible impact on the share price, so that makes it really hard to use this directly to estimate, but if you've had a full network or a full tree where you can see, like you could have markets for share price or token price, but connected to these you also see the KPIs, markets for different KPIs, number of users, revenue, all the kinds of metrics important for the company. So you could actually see, okay, maybe I don't, there's a lot of noise, I don't see the impact of this decision on the token price, but I see the impact of this decision on the token price, but I see the impact of this decision on this minor KPI that has, but it's minor KPI associated with this major KPI, which isn't then related to the share price. So I think that's a, this is a big trend region for many decisions is actually have the markets, all the information connected to each other. Maybe to kind of reorient the question, what problems are you running into building and operating these things? Prediction markets, futakis, what are the hard problems, what are the solutions? I can say one hard problem, relatively hard problem is to just find traders that are comfortable going into those positions. And just to give you an example of, let's say we would do this for Ethereum. We would say, okay, conditional, we might go this roadmap or that roadmap and then we want to use this food mechanism, so essentially people can say, conditionally to this roadmap, I buy Ether and conditional to that I sell. But then to practically do that you need to believe that going back to this correctness of prices, I mean yes 6% is in a day but I mean we really, what is really the fair price of ether right now? And I think there could be a range of some people believe it should be 10,000 and some people would believe it could be less than 1,000. So if you are in the camp of it should be 10,000, then kind of probably in both decisions, you would say, well, I want to be long on Ether, I want to hold Ether and if you're in the camp of well this is really not worth it, it's just worth 10,000 and again in both sides, so you need that person that says well the price is pretty much right now and really that decision makes a marginal difference for me to hold the asset. And then let's say there's a really good proposal and you really like that, so you go long on that. Now you hold this conditional yes. Now the next day, some external completely unrelated event happens where the whole crypto market crashes by 50%. You hold this yes proposal and hold it under the price from yesterday, you still think the proposal is good, but now your incentives are for the proposal absolutely to not pass because then you would hold all the dollars and not the asset at a price that's now 50% or where the real asset is now 50% cheaper from what you bought it. So I'm not sure if anyone could follow, but those are kind of the challenges I see. So 20 years ago we had a burst of applications of prediction markets, including in corporations. So I think we saw typical failure modes there that are instructive about today. Typically if you went to work with a company about setting a prediction market, you would say, well, what are your most important issues and numbers you'd like to track, and let's set up markets on those, and they would usually say, well, that's a little sensitive. Let's do something a little safer, and they would pick safer topics, and then they'd get accurate estimates on them and satisfied users, but they go, yeah, but we didn't really care about those. So, you know, that would be one outcome. Or, you know, they would say, talk about something important, and then management couldn't resist having opinions that disagreed with the market, and the market gets proven right, and the management wrong, and they're just really mad and want to kill it. So, for example, the US government missile test agency had prediction markets on which tests would actually go forward. So they put up, they try to make a lot of tests, and a lot of them don't actually happen, because they have to coordinate a lot of different parts of the military, make a test happen, and if one part isn't there, the whole thing has to be scrapped, and a lot of money is lost. And so they wanted to know which tests would actually go through it and maybe save money by cutting back earlier but then it was more legible that they knew ahead of time that it wasn't going to work and that was a management problem so they didn't want that anymore they'd rather pretend they don't know how these things are failing yeah uh like i guess i'm gonna like talk about this from more like consumer product building perspective, less like the future side. And I think definitely like historically, it's absolutely true that marketplaces are like very hard like products to bootstrap and to activate like the atomic network effect. I think that like it's definitely like to incentivize like market makers or liquidity providers, especially for things like pop culture markets, right? I mean, you can always build the model for, for like elections or sports markets, or especially in limitless cases like the financial markets, right? You can use the volatility to understand how to price them. And so it can be attractive to institutional market makers because it's essentially like a short term retail retail option flow but at the same time you know you may look at Polymarket how much they spend per month to incentivize market makers to come like into their order book actually I spent a lot of time previously and like I spoke to Robin about it in Berkeley like kind of obsessing about incremental improvements in the AMM which we see like for example like paradigm just launched the PMAM, right? We're actually going to spin up a contract for it to see how much of an improvement is it. But I definitely think that challenge is like, how do you incentivize the market makers? Either you use cash or you use your token or what, but it's still a huge expense for bootstrapping like the initial network, yeah. But maybe over time it will be worth it. All right, brilliant.", + "sources_streamethId": "67359f259dbb7a90e195ae42", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731563400000, - "slot_end": 1731564300000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1Rm-aNAjKTe4WozwIfgJQZhQN1chtswB5-fuDukQWE5k", - "resources_slides": null, + "slot_start": 1731564600000, + "slot_end": 1731565200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1aXt8K7kJm7xJ0limjmVm0ZVioUUzgILAGxnm6NBfVoU", + "resources_slides": "https://drive.google.com/file/d/1kTeYrkTRWF9oVRKQoZY7yi5JXjc4dBrD/view", "speakers": [ - "robin-hanson", - "martin-k", - "joey-krug", - "cj", - "kas" + "sudhir-upadhyay" ] }, "vector": [ - 0, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -583059,7 +581410,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -583367,15 +581717,12 @@ 0, 0, 0, - 6, - 6, - 6, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -584040,6 +582387,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -584159,9 +582507,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 2, @@ -584176,47 +582524,47 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "privacy-enabled-smart-contract-driven-fair-and-transparent-reward-mechanism-in-federated-ai", - "sourceId": "LKD3RG", - "title": "Privacy enabled, Smart Contract driven Fair and transparent reward mechanism in Federated AI", - "description": "Federated learning enables multiple parties to contribute their locally trained models to an aggregation server, which securely combines individual models into a global one. However, it lacks a fair, verifiable, and proportionate reward (or penalty) mechanism for each contributor. Implementing a smart contract-based contribution analysis framework for federated learning on a privacy-enabled Ethereum L2 can address this challenge, and build the economics of federated learning public chain.", + "id": "privacy-first-cbdcs", + "sourceId": "TWMAWN", + "title": "Privacy-First CBDCs", + "description": "This talk explores how central bank digital currencies (CBDCs) can leverage zero-knowledge proofs (ZKPs) and Ethereum to create privacy-centric monetary systems. We'll examine how ZKPs enable robust AML/CTF compliance while preserving user privacy, discuss the benefits of Ethereum deployment for financial inclusion and innovation, and showcase how these technologies could revolutionize digital currency design. Future CBDCs can and should offer unparalleled privacy, security, and functionality.", "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Research", + "type": "Talk", + "expertise": "Beginner", + "audience": "Lobby", "featured": false, "doNotRecord": false, "tags": [ - "transparency" + "Payment", + "Privacy", + "Zero-Knowledge" ], "keywords": [ - "Federated AI", - "Smart Contracts", - "Transparency" + "CBDC" ], - "duration": 531, + "duration": 1538, "language": "en", - "sources_swarmHash": "8688138370ef7be9ee67412e069fec2019678bcefd1d8d3c719553a958c67365", - "sources_youtubeId": "TyWi4laTAUo", + "sources_swarmHash": "2fcc5d2003328d6a5974f449d4235f5bb8901d112e31c10318763719e26e7e96", + "sources_youtubeId": "zNgmBX0c8yo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67359f259dbb7a90e195ae42", + "sources_streamethId": "673321023a168eb5354cf6cb", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731564600000, - "slot_end": 1731565200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1aXt8K7kJm7xJ0limjmVm0ZVioUUzgILAGxnm6NBfVoU", - "resources_slides": null, + "slot_start": 1731400200000, + "slot_end": 1731402000000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1yAUh-BkJ1oE5n2L_-NknKAtAJ9okKkjhrA-_VvME4rw", + "resources_slides": "https://drive.google.com/file/d/1-V9S_6Mf_j8rlh7h6L0cjPVl9xUoSG5T/view", "speakers": [ - "sudhir-upadhyay" + "joe-andrews", + "andre-omietanski" ] }, "vector": [ @@ -584451,7 +582799,7 @@ 0, 0, 0, - 0, + 6, 0, 0, 0, @@ -584980,6 +583328,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -585083,6 +583432,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -585107,6 +583457,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -585413,12 +583764,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -585539,7 +583884,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -585550,48 +583894,64 @@ 0, 0, 0, - 0, - 0 + 2 ] }, { "session": { - "id": "privacy-first-cbdcs", - "sourceId": "TWMAWN", - "title": "Privacy-First CBDCs", - "description": "This talk explores how central bank digital currencies (CBDCs) can leverage zero-knowledge proofs (ZKPs) and Ethereum to create privacy-centric monetary systems. We'll examine how ZKPs enable robust AML/CTF compliance while preserving user privacy, discuss the benefits of Ethereum deployment for financial inclusion and innovation, and showcase how these technologies could revolutionize digital currency design. Future CBDCs can and should offer unparalleled privacy, security, and functionality.", - "track": "Real World Ethereum", - "type": "Talk", + "id": "privacy-preserving-groups", + "sourceId": "LSA3JK", + "title": "Privacy-Preserving Groups", + "description": "This talk will explore the concept of privacy-preserving groups and the challenges associated with managing them. It will cover different ideas to add anti-sybil mechanisms to enhance group security and trust. The presentation will also highlight real-world projects working on it and provide practical use cases to illustrate their application and impact.", + "track": "Applied Cryptography", + "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Lobby", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Payment", + "Tooling", + "DAO", "Privacy", - "Zero-Knowledge" + "Anonymity", + "Identity", + "Open Source Software", + "ZKP", + "Zero-Knowledge", + "Use cases of cryptography", + "Public good", + "User Experience", + "groups", + "Anonymity", + "DAO", + "Identity", + "Open Source Software", + "Privacy", + "Public good", + "Tooling", + "Use cases of cryptography", + "User Experience", + "Zero-Knowledge", + "ZKP" ], "keywords": [ - "CBDC" + "Groups" ], - "duration": 1538, + "duration": 464, "language": "en", - "sources_swarmHash": "2fcc5d2003328d6a5974f449d4235f5bb8901d112e31c10318763719e26e7e96", - "sources_youtubeId": "zNgmBX0c8yo", + "sources_swarmHash": "18b4db550bcad65fa27ad24340bd8c75da7a5d04c9944d8303de3e690ebbdaf8", + "sources_youtubeId": "dWQWoqJVfn8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673321023a168eb5354cf6cb", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731400200000, - "slot_end": 1731402000000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1yAUh-BkJ1oE5n2L_-NknKAtAJ9okKkjhrA-_VvME4rw", - "resources_slides": null, + "slot_start": 1731396600000, + "slot_end": 1731397200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/13v7xDojqK_R5sq5GZJLvGNitJNJ0JqrztXhZYzs0pXM", + "resources_slides": "https://drive.google.com/file/d/1xrvM_TgYGCccvpk-3V-y0JVz5DxZp-bE/view", "speakers": [ - "andre-omietanski", - "joe-andrews" + "vivian-plasencia" ] }, "vector": [ @@ -585601,13 +583961,11 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -585826,7 +584184,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -586121,10 +584478,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -586362,15 +584719,19 @@ 0, 0, 0, + 6, 0, 0, 0, 0, + 2, 0, 0, 0, 0, + 2, 0, + 2, 0, 0, 0, @@ -586391,6 +584752,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -586416,6 +584778,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -586438,8 +584801,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -586447,12 +584812,14 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -586462,12 +584829,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -586487,13 +584848,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -586791,6 +585145,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -586913,6 +585268,7 @@ 0, 2, 0, + 2, 0, 0, 0, @@ -586925,71 +585281,58 @@ 0, 0, 0, - 0, - 2 + 0 ] }, { "session": { - "id": "privacy-preserving-groups", - "sourceId": "LSA3JK", - "title": "Privacy-Preserving Groups", - "description": "This talk will explore the concept of privacy-preserving groups and the challenges associated with managing them. It will cover different ideas to add anti-sybil mechanisms to enhance group security and trust. The presentation will also highlight real-world projects working on it and provide practical use cases to illustrate their application and impact.", - "track": "Applied Cryptography", - "type": "Lightning Talk", + "id": "prize-worthy-an-ethereum-python-hackathon-guide", + "sourceId": "73J9ZG", + "title": "Prize-Worthy: An Ethereum Python Hackathon Guide", + "description": "An interactive and beginner-friendly Ethereum Python Speedrun tailored for hackathons, hosted by the EF Python team. Quickly get up to speed with fundamental building blocks, then stack them into a live application. By the end of this workshop, you'll have a clear idea of how to get your own projects off the ground.", + "track": "Developer Experience", + "type": "Workshop", "expertise": "Beginner", - "audience": "Engineering", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ "Tooling", - "DAO", - "Privacy", - "Anonymity", - "Identity", + "DevEx", "Open Source Software", - "ZKP", - "Zero-Knowledge", - "Use cases of cryptography", - "Public good", - "User Experience", - "groups", - "Anonymity", - "DAO", - "Identity", + "solidity", + "DevEx", "Open Source Software", - "Privacy", - "Public good", - "Tooling", - "Use cases of cryptography", - "User Experience", - "Zero-Knowledge", - "ZKP" + "Tooling" ], "keywords": [ - "Groups" + "Vyper", + "Solidity" ], - "duration": 464, + "duration": 4162, "language": "en", - "sources_swarmHash": "18b4db550bcad65fa27ad24340bd8c75da7a5d04c9944d8303de3e690ebbdaf8", - "sources_youtubeId": "dWQWoqJVfn8", + "sources_swarmHash": "0ff20ddcd42b89b3971de379d28a01b1621d77ab795fc1962c7fc0a4441104e6", + "sources_youtubeId": "tetTX0ozcCg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673427ae9dbb7a90e19e4c2f", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731396600000, - "slot_end": 1731397200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/13v7xDojqK_R5sq5GZJLvGNitJNJ0JqrztXhZYzs0pXM", - "resources_slides": null, + "slot_start": 1731465900000, + "slot_end": 1731471300000, + "slot_roomId": "classroom-d", + "resources_presentation": "https://docs.google.com/presentation/d/1BdovxuMXRzh3v5kgPx7kmJtQ78cQ3TRzKpVqoR27GwE", + "resources_slides": "https://drive.google.com/file/d/1b9gNV69oc0xVh6cerP43zpGHOqzD7c2U/view", "speakers": [ - "vivian-plasencia" + "marc-garreau" ] }, "vector": [ 0, 0, 0, + 6, 0, 0, 0, @@ -586997,7 +585340,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -587750,12 +586092,9 @@ 0, 0, 0, - 6, 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -587764,7 +586103,9 @@ 0, 0, 0, - 2, + 0, + 0, + 0, 0, 2, 0, @@ -587787,7 +586128,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -587813,7 +586153,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -587839,7 +586178,6 @@ 2, 0, 0, - 2, 0, 0, 0, @@ -587847,14 +586185,12 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -588105,6 +586441,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -588181,7 +586518,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -588303,12 +586639,13 @@ 0, 0, 0, - 2, 0, 2, 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -588323,58 +586660,53 @@ }, { "session": { - "id": "prize-worthy-an-ethereum-python-hackathon-guide", - "sourceId": "73J9ZG", - "title": "Prize-Worthy: An Ethereum Python Hackathon Guide", - "description": "An interactive and beginner-friendly Ethereum Python Speedrun tailored for hackathons, hosted by the EF Python team. Quickly get up to speed with fundamental building blocks, then stack them into a live application. By the end of this workshop, you'll have a clear idea of how to get your own projects off the ground.", - "track": "Developer Experience", - "type": "Workshop", + "id": "product-led-blockchain-development", + "sourceId": "8YS9LW", + "title": "Product-Led Blockchain Development", + "description": "As teams spin up new app-specific rollups and L2s, we've moved into an era of product-led blockchain development. In this model, developers are not only building the first product or client to leverage their protocol, but establishing what ‘product-defined blockspace’ means. \r\n\r\nIn this talk, I go over the history of product-led growth, how it evolved to product-led protocol development in web3, and finally, what product-led blockchain development means in the context of app-specific rollups.", + "track": "Usability", + "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Developer", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Tooling", - "DevEx", - "Open Source Software", - "solidity", - "DevEx", - "Open Source Software", - "Tooling" + "development", + "product" ], "keywords": [ - "Vyper", - "Solidity" + "usability", + "product development" ], - "duration": 4162, + "duration": 519, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "tetTX0ozcCg", + "sources_swarmHash": "e673aab0a55e626aee65b66612cfff54e5bc047220abde3acee5e741714ea2b5", + "sources_youtubeId": "RIyvlEGFyHo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673427ae9dbb7a90e19e4c2f", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "67356d759dbb7a90e16a807e", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67356d759dbb7a90e16a807e.vtt", + "transcript_text": " Hello everyone, my name is Gregory Rocco as mentioned and I tried to figure out what I could do in a short amount of time for a lightning talk. I felt like you'd have a lot of highly specific technical talks, so I figured I'd give product a go when it comes to giving one of these. So the title itself is Product-Led Blockchain Development, so it's kind of covering the whole lineage of going from product-led growth for startups to product-led protocol development, which was originally coined by Dan Romero a couple of years ago when thinking about Forecaster, and then finally product-led blockchain development, which is this new era of everyone seemingly wanting their own block space for some strange reason, but we'll figure it out eventually. But the subtitle here is Why You're Wrong and the Market is Right deals specifically with making sure that whenever you think about product development, protocol development, or even blockchain development, that you are serving some customer need and not your own interests to a certain degree. You have to be interested in what you're doing, but at the same time, you have to serve a market. And a disclaimer to this talk is that I was a founder, and I was wrong plenty of times. I specifically started my career in this space in decentralized identity, which namely is a hard space to crack when it comes to product development. And probably there are just very few companies and protocols that have done it. Maybe ENS is the closest to product market fit when it comes to identity. But user-controlled credentials generally were quite tough. Going back into the core basics of product development, one of the kind of core pillars there is product-led growth. Namely, that's whenever you build a product, you specifically have to make sure that customer needs and customer experience are at the forefront, dictating everything that you do and become the full kind of motion for what you do. So feedback comes in, you then take that feedback as a product developer, and that's what influences engineering, design, messaging, and everything in between. It shouldn't come from, oh, just one more feature and all these users are going to use it. It never works out that way. It's specifically in the case of someone requesting a feature from what you build. You bring something to market, you get your initial set of users, and you're constantly using that messaging to inform what you're going to do and all the decisions you're going to make when thinking about product development. This was the framework for a long time when it came to startups. It's what I was indoctrinated with when I went through Y Combinator. And then later in the space, since we're all kind of of building these emerging protocols and blockchains, the framework had to be changed a little bit because it was a bit of like a blue ocean space when it came to protocol development. Kind of fast forward a little bit later and you have the situations where developers do developer-y things, where the developer might have an interesting idea and might have some problems bringing it to market specifically because they're just kind of handing a protocol over to someone and saying, here, build on this. Build all these great ideas on my protocol. It'll be great. But the problem is it's not taking those first principles into account. And a couple of years ago, Dan Romero from Forecaster put out a post called Product-Led Protocol Development, and it specifically dealt with the idea that as a protocol developer, you're building the first apps and clients that leverage your own protocol and are then taking the feedback from those to inform protocol development. Namely, in his case, there are three core tenets of product-led protocol development. The first is that users use apps and clients, they don't use protocols. So let's use Farcaster for the sake of the example. Users are using Warpcast. They're kind of making posts. They're interacting with the application. On the back end, it's Farcaster, the protocol, that's kind of informing all of these interactions. The second thing is that a developer worth their salt will make sure that they're concerned with meaningful daily active users instead of the coolest technology that they want to use. A developer that's building for an audience specifically and not experimenting. And the third and final part of it is that protocols are valued based on the clients and app success that build on said protocol. So it's always making sure that apps and clients are at the center here. And then if you're building a protocol, you are defining the blueprint for how that protocol should be used by being informed of what's happening on these apps and clients that people are actually using. And then finally, to round it out, complication requires a bit of order. Recently in the space, we're finding protocols wanting to have their own block space, building app-specific roll-ups. And I think just the bottom line of all this is just making sure that if you kind of go down this route, that you have to be the block space you want to see in the world. You have to make sure that when you're building this, you are building the killer app, leveraging this block space, that you're not going the usual route of building this thing and saying, hey, everyone, we're going to get a growth marketer to bring a million developers through the door, and then they're going to build a whole bunch of stuff on our new protocol, and it's going to be great. It's like, no, you're going to be building the app that specifically leverages this for what you designed it to do. And if it doesn't work out well, then you might need to hit the drawing board again. But it's all specifically going back to this core idea of product-led growth, where you are presenting something to the market. You're presenting the end users with something to use, they are then informing what happens with that and everything in between. So kind of returning to first principles, I encourage experimentation, but at the end of the experimentation, if you do wish to bring something to market, please keep designing and developing with users in mind and making sure those users are able to give you feedback to inform the next stages of what you're building. So thanks. Thanks a lot, Gregory. And now we have a little bit of time for questions. Wow, someone's ready. I can see that. All right, let me get this box to you. Ready? And go. Hello, thank you. I'm curious to have your opinion about the fact, don't you think the space is even designed to not incentivize competition between different roll-ups? Because they're all looking to attract liquidity, attract users, and just grow individually. And so this doesn't go in pair with taking care of every user, because they compete for users, they compete for liquidity, and there is a lack of interoperability, really. So what are your thoughts on this? So in the case of very app-specific case, you're then competing with others. We don't need all those roll-ups. No, we don't. It's unnecessary. I think I'm in agreement with you. We don't to a certain extent, but making sure that if you were building something to then build in in those use cases at the same time. I might not be understanding the question correctly. Well, I think the space is designed in a way like everyone wants to top their own roll-up, get users, get liquidity, but at the end, we're not building something that actually can be useful for people. And it's also sometimes just serving the ecosystem. But to an extent, so if there is a dominant use case that's an app-specific roll-up, then good for them. I hope it amasses the most amount of liquidity in that own way and dominates that space if it's providing a good enough of experience. Like, I want to allow the ability for anyone to create that app-specific roll-up, but at the same time, I want someone to win. And that's okay because it means users getting the value. And if that app-specific roll-up turns around and does something against user wants, there's another app-specific roll-up kind of waiting for that, or waiting for that user base. OK, thank you. Is there another question? Because otherwise I was another one here. Yes, there are others. So you essentially mentioned that developers should focus on building a product that the market wants. What makes it, and we see that most companies actually are having a hard time doing this. What do you think makes it so difficult to apply that very easy, fundamental, first principle thinking? I think being in the space for a number of years, everyone, there's a number of years, everyone, there's a lot of cases where people aren't working deeply enough or pinpoint enough on a very specific issue. Namely that, good example, let's use a hypothetical of social protocols, or someone building a chain for all the social protocols. They're thinking mostly about, this is again a hypothetical, but like we're going to amass all the social protocols. They're going to be using our chain. There are different clients and all that without building the very specific client themselves and saying we're going to be offering this experience first as a first-class citizen. They're thinking too much about offer it to everyone before let's get the experience right and figure out how to then open it up. And I think that's been a part of the thinking or an issue with the thinking for a long time. It's ambitious and it's very like a good thing overall, but at the same time it becomes difficult to provide that very narrow experience or solve that very specific problem before thinking too widely. And I think you have to balance both of that when developing and designing.", "eventId": "devcon-7", - "slot_start": 1731465900000, - "slot_end": 1731471300000, - "slot_roomId": "classroom-d", - "resources_presentation": "https://docs.google.com/presentation/d/1BdovxuMXRzh3v5kgPx7kmJtQ78cQ3TRzKpVqoR27GwE", - "resources_slides": null, + "slot_start": 1731552900000, + "slot_end": 1731553500000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1aMtbpw97Q1DjqYA3pKLPTVpJ9vWOJoduN-rGCXYlHck", + "resources_slides": "https://drive.google.com/file/d/1PUCNi_DKlJJuey7QpQLcAhHDq9kjGi4l/view", "speakers": [ - "marc-garreau" + "gregory-rocco" ] }, "vector": [ 0, 0, 0, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -589138,16 +587470,12 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -589215,7 +587543,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -589392,6 +587719,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -589482,8 +587810,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -589513,6 +587839,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -589686,9 +588013,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -589702,45 +588029,47 @@ }, { "session": { - "id": "product-led-blockchain-development", - "sourceId": "8YS9LW", - "title": "Product-Led Blockchain Development", - "description": "As teams spin up new app-specific rollups and L2s, we've moved into an era of product-led blockchain development. In this model, developers are not only building the first product or client to leverage their protocol, but establishing what ‘product-defined blockspace’ means. \r\n\r\nIn this talk, I go over the history of product-led growth, how it evolved to product-led protocol development in web3, and finally, what product-led blockchain development means in the context of app-specific rollups.", - "track": "Usability", + "id": "programmable-cryptography-and-dacc", + "sourceId": "PNA8NU", + "title": "Programmable Cryptography and d/acc", + "description": "This short panel will explore the role of advanced programmable cryptography, beyond ZK and MPC, in d/acc. Programmable cryptographic primitives like functional encryption (FE), witness encryption (WE), and indistinguishability obfuscation (iO) have become theoretically feasible and even moving towards real-world practicality. This panel will explore how these primitives can be used to improve trust-minimized infrastructure and applications.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "development", - "product" + "Cryptography", + "Use cases of cryptography" ], "keywords": [ - "usability", - "product development" + "d/acc", + "programmable cryptography" ], - "duration": 519, + "duration": 1223, "language": "en", - "sources_swarmHash": "e673aab0a55e626aee65b66612cfff54e5bc047220abde3acee5e741714ea2b5", - "sources_youtubeId": "RIyvlEGFyHo", + "sources_swarmHash": "c51ad64f5cc04390985fbfe82798708f9cf1c378086fcb689bc0ee5ee3d75a64", + "sources_youtubeId": "NrhmX3yHNdA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67356d759dbb7a90e16a807e", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67356d759dbb7a90e16a807e.vtt", - "transcript_text": " Hello everyone, my name is Gregory Rocco as mentioned and I tried to figure out what I could do in a short amount of time for a lightning talk. I felt like you'd have a lot of highly specific technical talks, so I figured I'd give product a go when it comes to giving one of these. So the title itself is Product-Led Blockchain Development, so it's kind of covering the whole lineage of going from product-led growth for startups to product-led protocol development, which was originally coined by Dan Romero a couple of years ago when thinking about Forecaster, and then finally product-led blockchain development, which is this new era of everyone seemingly wanting their own block space for some strange reason, but we'll figure it out eventually. But the subtitle here is Why You're Wrong and the Market is Right deals specifically with making sure that whenever you think about product development, protocol development, or even blockchain development, that you are serving some customer need and not your own interests to a certain degree. You have to be interested in what you're doing, but at the same time, you have to serve a market. And a disclaimer to this talk is that I was a founder, and I was wrong plenty of times. I specifically started my career in this space in decentralized identity, which namely is a hard space to crack when it comes to product development. And probably there are just very few companies and protocols that have done it. Maybe ENS is the closest to product market fit when it comes to identity. But user-controlled credentials generally were quite tough. Going back into the core basics of product development, one of the kind of core pillars there is product-led growth. Namely, that's whenever you build a product, you specifically have to make sure that customer needs and customer experience are at the forefront, dictating everything that you do and become the full kind of motion for what you do. So feedback comes in, you then take that feedback as a product developer, and that's what influences engineering, design, messaging, and everything in between. It shouldn't come from, oh, just one more feature and all these users are going to use it. It never works out that way. It's specifically in the case of someone requesting a feature from what you build. You bring something to market, you get your initial set of users, and you're constantly using that messaging to inform what you're going to do and all the decisions you're going to make when thinking about product development. This was the framework for a long time when it came to startups. It's what I was indoctrinated with when I went through Y Combinator. And then later in the space, since we're all kind of of building these emerging protocols and blockchains, the framework had to be changed a little bit because it was a bit of like a blue ocean space when it came to protocol development. Kind of fast forward a little bit later and you have the situations where developers do developer-y things, where the developer might have an interesting idea and might have some problems bringing it to market specifically because they're just kind of handing a protocol over to someone and saying, here, build on this. Build all these great ideas on my protocol. It'll be great. But the problem is it's not taking those first principles into account. And a couple of years ago, Dan Romero from Forecaster put out a post called Product-Led Protocol Development, and it specifically dealt with the idea that as a protocol developer, you're building the first apps and clients that leverage your own protocol and are then taking the feedback from those to inform protocol development. Namely, in his case, there are three core tenets of product-led protocol development. The first is that users use apps and clients, they don't use protocols. So let's use Farcaster for the sake of the example. Users are using Warpcast. They're kind of making posts. They're interacting with the application. On the back end, it's Farcaster, the protocol, that's kind of informing all of these interactions. The second thing is that a developer worth their salt will make sure that they're concerned with meaningful daily active users instead of the coolest technology that they want to use. A developer that's building for an audience specifically and not experimenting. And the third and final part of it is that protocols are valued based on the clients and app success that build on said protocol. So it's always making sure that apps and clients are at the center here. And then if you're building a protocol, you are defining the blueprint for how that protocol should be used by being informed of what's happening on these apps and clients that people are actually using. And then finally, to round it out, complication requires a bit of order. Recently in the space, we're finding protocols wanting to have their own block space, building app-specific roll-ups. And I think just the bottom line of all this is just making sure that if you kind of go down this route, that you have to be the block space you want to see in the world. You have to make sure that when you're building this, you are building the killer app, leveraging this block space, that you're not going the usual route of building this thing and saying, hey, everyone, we're going to get a growth marketer to bring a million developers through the door, and then they're going to build a whole bunch of stuff on our new protocol, and it's going to be great. It's like, no, you're going to be building the app that specifically leverages this for what you designed it to do. And if it doesn't work out well, then you might need to hit the drawing board again. But it's all specifically going back to this core idea of product-led growth, where you are presenting something to the market. You're presenting the end users with something to use, they are then informing what happens with that and everything in between. So kind of returning to first principles, I encourage experimentation, but at the end of the experimentation, if you do wish to bring something to market, please keep designing and developing with users in mind and making sure those users are able to give you feedback to inform the next stages of what you're building. So thanks. Thanks a lot, Gregory. And now we have a little bit of time for questions. Wow, someone's ready. I can see that. All right, let me get this box to you. Ready? And go. Hello, thank you. I'm curious to have your opinion about the fact, don't you think the space is even designed to not incentivize competition between different roll-ups? Because they're all looking to attract liquidity, attract users, and just grow individually. And so this doesn't go in pair with taking care of every user, because they compete for users, they compete for liquidity, and there is a lack of interoperability, really. So what are your thoughts on this? So in the case of very app-specific case, you're then competing with others. We don't need all those roll-ups. No, we don't. It's unnecessary. I think I'm in agreement with you. We don't to a certain extent, but making sure that if you were building something to then build in in those use cases at the same time. I might not be understanding the question correctly. Well, I think the space is designed in a way like everyone wants to top their own roll-up, get users, get liquidity, but at the end, we're not building something that actually can be useful for people. And it's also sometimes just serving the ecosystem. But to an extent, so if there is a dominant use case that's an app-specific roll-up, then good for them. I hope it amasses the most amount of liquidity in that own way and dominates that space if it's providing a good enough of experience. Like, I want to allow the ability for anyone to create that app-specific roll-up, but at the same time, I want someone to win. And that's okay because it means users getting the value. And if that app-specific roll-up turns around and does something against user wants, there's another app-specific roll-up kind of waiting for that, or waiting for that user base. OK, thank you. Is there another question? Because otherwise I was another one here. Yes, there are others. So you essentially mentioned that developers should focus on building a product that the market wants. What makes it, and we see that most companies actually are having a hard time doing this. What do you think makes it so difficult to apply that very easy, fundamental, first principle thinking? I think being in the space for a number of years, everyone, there's a number of years, everyone, there's a lot of cases where people aren't working deeply enough or pinpoint enough on a very specific issue. Namely that, good example, let's use a hypothetical of social protocols, or someone building a chain for all the social protocols. They're thinking mostly about, this is again a hypothetical, but like we're going to amass all the social protocols. They're going to be using our chain. There are different clients and all that without building the very specific client themselves and saying we're going to be offering this experience first as a first-class citizen. They're thinking too much about offer it to everyone before let's get the experience right and figure out how to then open it up. And I think that's been a part of the thinking or an issue with the thinking for a long time. It's ambitious and it's very like a good thing overall, but at the same time it becomes difficult to provide that very narrow experience or solve that very specific problem before thinking too widely. And I think you have to balance both of that when developing and designing.", + "sources_streamethId": "6735d0a79dbb7a90e146832f", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731552900000, - "slot_end": 1731553500000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1aMtbpw97Q1DjqYA3pKLPTVpJ9vWOJoduN-rGCXYlHck", - "resources_slides": null, + "slot_start": 1731578400000, + "slot_end": 1731579600000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1NOKA9WOe3iWdApB0QmpWreDTMUpsQvJeG7afyjEMBSQ", + "resources_slides": "", "speakers": [ - "gregory-rocco" + "wei-dai", + "muthu-venkitasubramaniam" ] }, "vector": [ 0, + 6, 0, 0, 0, @@ -589748,8 +588077,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -590163,6 +588490,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -590501,6 +588829,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -590515,6 +588844,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -590765,10 +589095,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -590885,7 +589211,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -591052,6 +589377,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -591062,9 +589388,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -591076,47 +589399,50 @@ }, { "session": { - "id": "programmable-cryptography-and-dacc", - "sourceId": "PNA8NU", - "title": "Programmable Cryptography and d/acc", - "description": "This short panel will explore the role of advanced programmable cryptography, beyond ZK and MPC, in d/acc. Programmable cryptographic primitives like functional encryption (FE), witness encryption (WE), and indistinguishability obfuscation (iO) have become theoretically feasible and even moving towards real-world practicality. This panel will explore how these primitives can be used to improve trust-minimized infrastructure and applications.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "programmable-cryptography-and-ethereum-panel", + "sourceId": "MWKMBQ", + "title": "Programmable Cryptography and Ethereum, Panel", + "description": "One of the core themes of this panel is how Programmable Cryptography synergizes with Ethereum. Panelists will discuss questions such as ''Why have we not been able to do everything we've wanted with Ethereum?'' and ''Why have certain kinds of applications - from decentralized social to decentralized games to decentralized finance - not been able to reach their full potential with only consensus technology?''", + "track": "Applied Cryptography", + "type": "Panel", + "expertise": "Beginner", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Cryptography", - "Use cases of cryptography" - ], "keywords": [ - "d/acc", - "programmable cryptography" + "Programmable Cryptography", + "ZKP", + "MPC", + "FHE", + "ORAM", + "Obfuscation", + "Panel", + "0xPARC" ], - "duration": 1223, + "tags": [], "language": "en", "sources_swarmHash": "", - "sources_youtubeId": "NrhmX3yHNdA", + "sources_youtubeId": "yCOCZhkDmnc", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735d0a79dbb7a90e146832f", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731578400000, - "slot_end": 1731579600000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1NOKA9WOe3iWdApB0QmpWreDTMUpsQvJeG7afyjEMBSQ", - "resources_slides": null, "speakers": [ - "muthu-venkitasubramaniam", - "wei-dai" - ] + "gubsheep", + "albert-ni", + "barry-whitehat", + "vitalik-buterin" + ], + "eventId": "devcon-7", + "slot_start": 1731400200000, + "slot_end": 1731403800000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/17ZRAYhS4Uh4J1-UKAL-2OFQwRR2N0dQ4bBZOVPIoYQU", + "resources_slides": "" }, "vector": [ 0, - 6, 0, 0, 0, @@ -591126,6 +589452,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -591298,6 +589625,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -591306,6 +589634,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -591645,7 +589975,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -591879,7 +590208,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -591894,10 +590222,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -592431,9 +590755,6 @@ 0, 2, 0, - 0, - 0, - 0, 2, 0, 0, @@ -592446,53 +590767,52 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "programmable-cryptography-and-ethereum-panel", - "sourceId": "MWKMBQ", - "title": "Programmable Cryptography and Ethereum, Panel", - "description": "One of the core themes of this panel is how Programmable Cryptography synergizes with Ethereum. Panelists will discuss questions such as ''Why have we not been able to do everything we've wanted with Ethereum?'' and ''Why have certain kinds of applications - from decentralized social to decentralized games to decentralized finance - not been able to reach their full potential with only consensus technology?''", - "track": "Applied Cryptography", - "type": "Panel", + "id": "programmable-cryptography-and-smart-contract", + "sourceId": "VJEDLX", + "title": "Programmable Cryptography and Smart Contract", + "description": "Overview\r\nIn some use cases, developers may want to execute smart contracts based on the results of FHE or MPC execution. This session will introduce several design patterns for such use cases and show how Programmable Cryptography can be applied to dApps.\r\n\r\nIn detail\r\nThe results of FHE executions are encrypted and need to be designed to be processed by smart contracts. In addition, the MPC+ZK-based method can solve the private state problem relatively easily using the conventional SNARK verifier.", + "track": "Developer Experience", + "type": "Lightning Talk", "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Programmable Cryptography", - "ZKP", + "tags": [ + "DevEx", + "Cryptography", "MPC", - "FHE", - "ORAM", - "Obfuscation", - "Panel", - "0xPARC" + "programmable", + "DevEx", + "MPC" ], - "tags": [], - "language": "en", - "speakers": [ - "gubsheep", - "albert-ni", - "barry-whitehat", - "vitalik-buterin" + "keywords": [ + "Programable", + "Cryptography" ], + "duration": 355, + "language": "en", + "sources_swarmHash": "17c804065df8bd60c3c0133f7d5ffdea4c30fe373f0ed0d016d0c9351279fb84", + "sources_youtubeId": "j9YnU1-MeiU", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731400200000, - "slot_end": 1731403800000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/17ZRAYhS4Uh4J1-UKAL-2OFQwRR2N0dQ4bBZOVPIoYQU" + "slot_start": 1731472200000, + "slot_end": 1731472800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1dUK2fPW4Yka7X0nBzFRlJXDKOPHcZn0iLzNpS3rUVcI", + "resources_slides": "https://drive.google.com/file/d/1RO17LT7cZMlx8nuuMcUGCoRviKeGEkG7/view", + "speakers": [ + "shouki-tsuda" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -592667,7 +590987,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -592677,9 +590996,7 @@ 0, 0, 0, - 6, 0, - 6, 0, 0, 0, @@ -592913,7 +591230,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -593027,6 +591343,8 @@ 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -593256,6 +591574,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -593272,6 +591591,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -593330,6 +591650,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -593683,6 +592004,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -593822,51 +592144,41 @@ }, { "session": { - "id": "programmable-cryptography-and-smart-contract", - "sourceId": "VJEDLX", - "title": "Programmable Cryptography and Smart Contract", - "description": "Overview\r\nIn some use cases, developers may want to execute smart contracts based on the results of FHE or MPC execution. This session will introduce several design patterns for such use cases and show how Programmable Cryptography can be applied to dApps.\r\n\r\nIn detail\r\nThe results of FHE executions are encrypted and need to be designed to be processed by smart contracts. In addition, the MPC+ZK-based method can solve the private state problem relatively easily using the conventional SNARK verifier.", - "track": "Developer Experience", - "type": "Lightning Talk", - "expertise": "Beginner", + "id": "programmable-cryptography-and-the-future-of-the-internet", + "sourceId": "JVGEDS", + "title": "Programmable Cryptography and the future of the Internet", + "description": "You rarely hear of issues at the networking layer of the Internet: networking companies are running utilities business: they are fungible and can be swapped if distrusted.\r\nMost of the value captured on the Internet -- and also most abuse -- happen at the Compute and Data layer of the Web. Ethereum gave us a glimpse of a fundamentally different architecture for Compute and Data than Client/Server architecture.We think the Internet is 1/3 complete, and that programmable cryptography can finish it.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "DevEx", - "Cryptography", - "MPC", - "programmable", - "DevEx", - "MPC" - ], + "tags": [], "keywords": [ - "Programable", - "Cryptography" + "None" ], - "duration": 355, + "duration": 1525, "language": "en", - "sources_swarmHash": "17c804065df8bd60c3c0133f7d5ffdea4c30fe373f0ed0d016d0c9351279fb84", - "sources_youtubeId": "j9YnU1-MeiU", + "sources_swarmHash": "0fb50c195df8ce92474b5cefa3ba1a750793c1efe6f7bc27f06d16f5a2040a3c", + "sources_youtubeId": "onclocmZeR0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "67341d7a9dbb7a90e166b21c", "eventId": "devcon-7", - "slot_start": 1731472200000, - "slot_end": 1731472800000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1dUK2fPW4Yka7X0nBzFRlJXDKOPHcZn0iLzNpS3rUVcI", - "resources_slides": null, + "slot_start": 1731465900000, + "slot_end": 1731467700000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1yuek7FVsP0Ov8ZWMCbVJX0zA_KsFKhhx7JBnbKcs_qY", + "resources_slides": "https://drive.google.com/file/d/1rSDOMq1chOochPW5XY3kb62IMl4OwqYI/view", "speakers": [ - "shouki-tsuda" + "justin-glibert" ] }, "vector": [ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -593874,6 +592186,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -594045,6 +592358,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -594393,7 +592707,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -594626,7 +592939,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -594643,7 +592955,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -594702,7 +593013,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -595057,7 +593367,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -595176,9 +593485,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 2, 0, @@ -595198,36 +593507,38 @@ }, { "session": { - "id": "programmable-cryptography-and-the-future-of-the-internet", - "sourceId": "JVGEDS", - "title": "Programmable Cryptography and the future of the Internet", - "description": "You rarely hear of issues at the networking layer of the Internet: networking companies are running utilities business: they are fungible and can be swapped if distrusted.\r\nMost of the value captured on the Internet -- and also most abuse -- happen at the Compute and Data layer of the Web. Ethereum gave us a glimpse of a fundamentally different architecture for Compute and Data than Client/Server architecture.We think the Internet is 1/3 complete, and that programmable cryptography can finish it.", - "track": "Applied Cryptography", - "type": "Talk", + "id": "programmable-cryptography-from-a-software-engineering-lens", + "sourceId": "SWD9LD", + "title": "Programmable Cryptography from a Software Engineering Lens", + "description": "Different cryptographic primitives have different affordances, especially when using them in practice, and especially together. In this session, we explore a new way of interacting with PCs at a software engineering level via a LISP like programming language. This language enables creating self-verifying graphs of computation.", + "track": "[CLS] Programmable / Frogrammable Cryptography, by 0xPARC", + "type": "Workshop", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "", "featured": false, "doNotRecord": false, - "tags": [], "keywords": [ - "None" + "Programmable", + "Cryptography" + ], + "tags": [ + "Cryptography" ], - "duration": 1525, "language": "en", - "sources_swarmHash": "0fb50c195df8ce92474b5cefa3ba1a750793c1efe6f7bc27f06d16f5a2040a3c", - "sources_youtubeId": "onclocmZeR0", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "67341d7a9dbb7a90e166b21c", - "eventId": "devcon-7", - "slot_start": 1731465900000, - "slot_end": 1731467700000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1yuek7FVsP0Ov8ZWMCbVJX0zA_KsFKhhx7JBnbKcs_qY", - "resources_slides": null, "speakers": [ - "justin-glibert" - ] + "aayush-gupta", + "justin-glibert", + "arnaucube", + "ahmad", + "kevin-kwok" + ], + "eventId": "devcon-7", + "sources_youtubeId": "5C8SovQZnqY", + "slot_start": 1731648600000, + "slot_end": 1731654000000, + "slot_roomId": "breakout-2", + "resources_presentation": "https://docs.google.com/presentation/d/1yWVJ6yTEFsI9WxcM3wmAe6YClRLfYGGhGBGYB8pv2Sg", + "resources_slides": "" }, "vector": [ 0, @@ -595240,13 +593551,11 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -595255,6 +593564,7 @@ 0, 0, 0, + 4, 0, 0, 0, @@ -595346,6 +593656,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -595499,6 +593810,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -595760,6 +594072,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -595989,14 +594302,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 6, 0, 0, 0, @@ -596548,7 +594854,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -596559,6 +594864,7 @@ 0, 0, 0, + 2, 0, 0, 0 @@ -596566,36 +594872,36 @@ }, { "session": { - "id": "programmable-cryptography-from-a-software-engineering-lens", - "sourceId": "SWD9LD", - "title": "Programmable Cryptography from a Software Engineering Lens", - "description": "Different cryptographic primitives have different affordances, especially when using them in practice, and especially together. In this session, we explore a new way of interacting with PCs at a software engineering level via a LISP like programming language. This language enables creating self-verifying graphs of computation.", - "track": "[CLS] Programmable / Frogrammable Cryptography, by 0xPARC", - "type": "Workshop", + "id": "project-mirage-mud-day-demo", + "sourceId": "BVANRC", + "title": "Project Mirage - MUD Day Demo", + "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications. Project Mirage is an onchain island management game where players build, expand and trade their islands.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Programmable", - "Cryptography" - ], - "tags": [ - "Cryptography" - ], + "tags": [], + "keywords": [], + "duration": 206, "language": "en", - "speakers": [ - "aayush-gupta", - "justin-glibert", - "arnaucube", - "ahmad", - "kevin-kwok" - ], + "sources_swarmHash": "447ea7e3caa545f9b9abe80a90fee1ee9304096a65c62f14a199a99ec90b97e2", + "sources_youtubeId": "YEZN8zdHRx4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673591ec9dbb7a90e1f51b8b", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673591ec9dbb7a90e1f51b8b.vtt", + "transcript_text": " Steal someone's identity Xavier? No, no, okay. Hey, my name is Xavier. Closer, closer. Closer, okay. Hey, my name is Xavier. I'm the owner of Project Mirage. Project Mirage is an island building game, and we are using Redstone Garnet, testnet, and we are building Foley Ahmad. So let me demo the game. So when you begin, it's not like that. It will be like an empty island with a city hall in the middle. You have to pave road. Yeah, you have to pave road and add more buildings. You can see there are drop downs. We have a road pavement. You can drag. You can save the road and add more buildings. There are various categories of buildings. There are residential. The first thing you need to build is like resident buildings to add like laborers, but you also can build like different commercial, industrial, civic landscape to like fulfill certain requirement and produce certain resources. So for example, I can put like a house here and put like a commercial building nearby. You can view the details by clicking into the buildings. We have some red ones here. Oh, now, oh, level up. Unlock some buildings and put some names, like X island. By reaching certain levels, you will see something like this. This is my own island. So I have built like virus buildings. We also have like a functionality called summon. So I don't have like resource to summon any character right now, but you can view the character here. For example, I have this one. We have some description. And you can actually produce resource by assigning the character. So for example, this one, he can plus 4% onto the resources. Yeah. He can plus four percent Onto the resources. Yeah and also we have a functionality called shares so for example if I visit my co-workers Island I Can trade shares on her island for to become like a shareholder on different people's island you gain profit by gaining resources when the other owners produce resources. So you gain a certain percentage. And also you can trade shares. And with the priority of the island coming up, the share becomes more expensive. Yeah. We can also visit other people's island. There's a leaderboard on our game test. For example, this is one of our top players. And we can see his island's much larger than mine. Yeah, that's it. Thank you.", "eventId": "devcon-7", - "slot_start": 1731648600000, - "slot_end": 1731654000000, - "slot_roomId": "breakout-2", - "resources_presentation": "https://docs.google.com/presentation/d/1yWVJ6yTEFsI9WxcM3wmAe6YClRLfYGGhGBGYB8pv2Sg" + "slot_start": 1731557400000, + "slot_end": 1731557700000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1d-1krZg7I-YltJPVKWhfg0Tl6wSDlA4A7_wN3qi3s3M", + "resources_slides": "", + "speakers": [ + "y77cao" + ] }, "vector": [ 0, @@ -596610,9 +594916,9 @@ 0, 0, 0, + 6, 0, 0, - 6, 0, 0, 0, @@ -596621,7 +594927,6 @@ 0, 0, 0, - 4, 0, 0, 0, @@ -596713,7 +595018,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -596780,7 +595084,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -596868,7 +595171,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -596952,6 +595254,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -597130,7 +595433,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -597362,7 +595664,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -597916,6 +596217,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -597926,7 +596228,6 @@ 0, 0, 0, - 2, 0, 0, 0 @@ -597934,39 +596235,44 @@ }, { "session": { - "id": "project-mirage-mud-day-demo", - "sourceId": "BVANRC", - "title": "Project Mirage - MUD Day Demo", - "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications. Project Mirage is an onchain island management game where players build, expand and trade their islands.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", + "id": "proof-of-personhood-panel", + "sourceId": "GVML7H", + "title": "Proof of personhood panel", + "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", "type": "Lightning Talk", - "expertise": "Intermediate", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [], "keywords": [], - "duration": 206, + "duration": 1226, "language": "en", - "sources_swarmHash": "447ea7e3caa545f9b9abe80a90fee1ee9304096a65c62f14a199a99ec90b97e2", - "sources_youtubeId": "YEZN8zdHRx4", + "sources_swarmHash": "69ebe38a57c1f0da151727dc18122546f6a0d7d37ed10de07a0bf33c35098d32", + "sources_youtubeId": "42cJ6IZlhMk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673591ec9dbb7a90e1f51b8b", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673591ec9dbb7a90e1f51b8b.vtt", - "transcript_text": " Steal someone's identity Xavier? No, no, okay. Hey, my name is Xavier. Closer, closer. Closer, okay. Hey, my name is Xavier. I'm the owner of Project Mirage. Project Mirage is an island building game, and we are using Redstone Garnet, testnet, and we are building Foley Ahmad. So let me demo the game. So when you begin, it's not like that. It will be like an empty island with a city hall in the middle. You have to pave road. Yeah, you have to pave road and add more buildings. You can see there are drop downs. We have a road pavement. You can drag. You can save the road and add more buildings. There are various categories of buildings. There are residential. The first thing you need to build is like resident buildings to add like laborers, but you also can build like different commercial, industrial, civic landscape to like fulfill certain requirement and produce certain resources. So for example, I can put like a house here and put like a commercial building nearby. You can view the details by clicking into the buildings. We have some red ones here. Oh, now, oh, level up. Unlock some buildings and put some names, like X island. By reaching certain levels, you will see something like this. This is my own island. So I have built like virus buildings. We also have like a functionality called summon. So I don't have like resource to summon any character right now, but you can view the character here. For example, I have this one. We have some description. And you can actually produce resource by assigning the character. So for example, this one, he can plus 4% onto the resources. Yeah. He can plus four percent Onto the resources. Yeah and also we have a functionality called shares so for example if I visit my co-workers Island I Can trade shares on her island for to become like a shareholder on different people's island you gain profit by gaining resources when the other owners produce resources. So you gain a certain percentage. And also you can trade shares. And with the priority of the island coming up, the share becomes more expensive. Yeah. We can also visit other people's island. There's a leaderboard on our game test. For example, this is one of our top players. And we can see his island's much larger than mine. Yeah, that's it. Thank you.", + "sources_streamethId": "673592a79dbb7a90e1088fac", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673592a79dbb7a90e1088fac.vtt", + "transcript_text": " Hello, hello. Great. So we just had a nice overview of the three protocols. And what I want to do in this small amount of time we have is just challenge each one. I think there's something to learn from each approach here and see if we can come to some greater synthesis. So I'll start with Remco. So Remco, I actually wrote a critique of proof of personhood. It's called Compressed to Zero, The Silent Strings of Proof of Personhood. And what I did is I looked at a parallel protocol to WorldCoin, a different authentication mechanism. But they had succeeded in verifying unique individuals through cognitive tests. And they had avoided the problem of account trading, which WorldCoin has had to grapple with because each account has money. And then, of course, there's an incentive to sell accounts early on and so that's something you're grappling with. They had solved that with this novel mechanism called sublinear identity staking, which I'm happy to talk to you about later. But they were still left with this information game theoretic problem around puppeteering, which is that when you have an account and it has monetary value, it has a UBI stream coming towards it, which is one of the goals of WorldCoin, you create incentives for other people to try to control those accounts and extract from them. And so the kind of pithy way to put it is when you try to differentiate humans from bots and use UBI incentives, you actually create an unintended consequence of humans trying to control other humans as if they were bots, right? And so the control problem becomes an information problem, and it's an information game theoretic one. So I want to know how you think about this challenge, because there are two goals that are very explicit with WorldCoin. One is both UBI and a kind of rails for democratic governance and even maybe even AI alignment. But if ultimately, you know, pursuing both of those goals ends up in coalitions and oligopolies and basically politics, right? And one person, one vote breaks down very quickly in a coalition. It's like, how do you deal with that? How are you thinking about that? So we're at 16 million users now. 16? 8 million in the highest tier or verified unique human. And in the process of growing to this scale and being active in dozens, if not nearly 100 countries right now, you kind of see everything that people can throw at you. And while the vast majority of people is honest, there are some very dishonest actors out there and you have the Byzantine generals' problem of you need to make your system resistant against malicious actors. I think this is not, as you mentioned, this is for every proof of personhood primitive out there. There is this issue. As I said, proof of personhood is not a CAPTCHA. So there is this idea that you can delegate an account. This is not something I think we should prevent, but it is something that you need to give the user the tools to properly manage, which means that it needs to be explicit, it needs to be consensual, it needs to be revocable, and so on. So I think the emphasis is on developing those tools, empowering the user to control their world ID in the way that they want to control it. Blockchains as a whole are not particularly good at the moment at these kinds of things because a lot of decisions you make on chain are one-way streets. But there are a number of things we are already doing here, like allowing you to recover your account by going back to an orb. One thing we've rolled out that is very powerful is using the phone's front camera to do additional interactive face authentications to make sure that the wallet hasn't been stolen. It's still in the possession of the original human that signed up and so on Right Maybe I can Comment there directly so I made in this presentation my claim that I think this absolute identity They're actually not that many use cases for it and kind of just to give the extreme example Why I'm making this claim is let's say somehow North Korea does get hold of one of those orbs and and can can we are properly authenticate they're all there 25 million I'm not exactly sure how many people they have probably no one knows but 25 million people so let's just say in WorldCoin now you go from 8 million fully verified to now 32 but 25 of them are just people from North Korea where it's not even clear whether they have control of their key or probably it's somehow centrally organized that someone ultimately has full control of those keys. Now you have 32 million properly verified actual humans. The question is, for what application would that set be useful? So my claim is, in absolutely most applications, and probably actually this gas subsidy that you're doing is one of the few where I think it is useful, but in absolutely most applications, I would say you need more context than just being human. If you want to go even more extreme, we can think of this Matrix movie where they're literally, I mean, in the Matrix movie where they're literally farming humans, or it kind of has those farms of humans that basically just sit there and yes, they might have an eyeball, you might be able to scan that, but that's really not what you want. Right, and even just to add on, as long as there's money at stake, I mean, we have synthetic biology, it could also be just sort of synthetic eyeball forms to hack the system. Okay, hold on, there's like a couple different things going through each other here. Static eyeballs don't work because with biometrics it's critical that you verify that it's actually part of a real live human body. If you get possession of an orb and you abuse it, while the signup process between the user is absolutely anonymous and privacy preserving, the orbs themselves are not. Their behavior is public and there are methods you can use to actually revoke the fake accounts that might be... But there are no fake accounts. Okay actually revoke the fake accounts. But there are no fake accounts. Okay, that's the second part. If they are not fake accounts, then the system works as intended, in a way. And you make a very good point that proof of personhood is just a primitive, and you need a richer ecosystem of things around it. The example I like, a very simple one, is let's say you have an on-chain DAO. You want to implement one person, one vote. If you just do this and allow all of the world ID verified users to vote on this and your social community is only 5,000 people, you can easily get swamped. You don't have a very strong governance system that way. So you do need to incorporate this in more complex systems. Lascha? I 100% agree. Lascha? Can I add something? I kind of like... I get all the points, but I think like forgetting the orb or the eyes and stuff. So I agree that overall identities, and I don't believe in universal identities, because identities have to be built for the governance purposes. So we should come up with frameworks that increase the coordination quality among the humans and then decide the depths of the identity, how it should be built. When we talk about the proof of personhood, it has like two dimensions, right? One is the proof of uniqueness. And I agree for the real world and especially in multiple cases, just uniqueness proof and badge does not guarantee the quality of decision making or coordination. And then we have to rely on passports or like citizenship or the age or like all the other stuff. And the thing is that like if we, like for example for voting, refer to the passports, the dependency on these document is so big that we can generate the uniqueness based on data and we might not need any type of biometrics additional to it. And I think, in a way, that's the purpose of identity. So I don't believe in universal identities. I believe of defining whether we like fighting AI or we like fighting the misinformation stuff. The only systems that are dynamically evolving and improving are the ones that have this kind of fluid identity aspects and not just the universal ones. Like on the example of Twitter and the community nodes. So just labeling is just creating this very low barriers in terms of defining the permissions. What you can do or or you can't do as a human or not. And like you said, yeah, there's a kind of North Korea scenario. There's kind of a dystopian scenario where we might be like creating market, right, for the eyeballs and stuff. And like you got two kidneys, you got two eyes, and then what happens next, right? Well, what you raised is an interesting point about governance, right? Because in some sense, the point Martin was making is highlighting an enforcement problem, right? And this gets back to the information problem. Yeah. Right? And so even if you do succeed in authenticating unique individuals in North Korea, how do you actually enforce that? And what you're saying is, well, you need richer governance mechanisms. We need to express identity in different ways, right, as a complement. And if you just have that one identity system, ultimately enforcement would require surveillance. And, of course, surveillance results in everyone becoming informationally the same. So I want to shift gears, Martin, and poke a little holes at circles. I think it's a great experiment. But one of the things I noticed on your slides is, you know, you're talking about currency and money. I think of them almost as community currencies and like nested networks of cooperation building in a nation states and like, you know, even the globe. But, you know, not everybody in a community should receive the same amount, blood, sweat, and tears, and contribution. It shouldn't just sort of be like an hourly rate. That's another kind of UBI that has its own pitfalls because it's not tied to incentives. And you mentioned a 7% interest rate. These things seem arbitrary, and they don't seem to leverage incentives or people's voice, choice, and stake in a community. So how do you think about that? Yeah, I mean, should they receive the same amount? I would say if a bunch of people together comes together and decides to use something as money, then just that coordination effort to some extent creates value to whatever they decide is money. And I would argue that the fairest form of distributing this value that has been created by just this coordination effort would be to distribute it equally among those that join this coordination effort. Super important to understand that that doesn't mean that everyone needs to work for the same hourly rate or doesn't need, I mean, that's absolutely not the case. So it's just that the coordination effort is in a way or the benefit of that is equally distributed. Sorry, there was a second question that I... Just, you know, non-equal distributions within, you know, acknowledging that there's different levels of contribution. I believe it's the 7%. Yeah, 7%, right. So that is probably the only parameter in circles that has been set and that's therefore debatable. And yes, you could say every community should be able to set that themselves. That would make coordination much, much, much harder. Why? Well, because then you would need to, or in circles, it is meant to be so simple that I issue circles, and then I know I can kind of trust that other account. I can exchange my money to their money one-to-one because we are issuing under the exact same rules. If everyone could define their own rules it would be much harder to negotiate of how much should my money worth compared to yours. So I would say for a stable equilibrium, the goal of circles is to have the stable equilibrium where the exchange rates between circles are just one-to-one. The goal would be to say one circle is one circle, and it's kind of abstracted away the fact that under the hood, we have all those different... Just like in the dollar system right now, technically speaking, if you have a dollar at one bank and a dollar at another bank, those are not the same dollars. But the system is built in such a way that usually it feels like one-to-one because if everything goes well, you can exchange them one-to-one. But if it breaks, you will realize, oh, the dollar at the bank that just went bankrupt is different from the other one. But the US and Euro have floating exchange rates. Why can't you... Say again? The US and Euro have floating exchange rates. Oh, no, no, for sure, for sure. So, in circles, yes. I mean, the expectation is they are within a connected cluster, the exchange rate is one-to-one. But, yeah, there can be different clusters where within the clusters the exchange rate is one to one but between the clusters there are floating exchanges. Why not just rely on supply and demand? Well it will be I mean the price between those clusters will be defined by supply and demand and what we are actually supposing in our upcoming relaunch I'd say, or restart, or reactivation, it's a continuation of the system, is that people or groups can back their very own circles with reserve assets they want, Bitcoin, dollar, whatever they want, and then there will be a free floating exchange rate for those circles, so defined by supply and demand. But as long as people trust each other, then they are essentially creating additional one-to-one demand between those currencies. Maybe we need to go back to the identity topic. No, but you're expressing identity in a different way, and so I just want to dig on that. But I want to turn to Lasha now. So how do you see yourself and Rarimo in this constellation of identity protocols? What like differentiation do you see that you're bringing? I think we try to use the both sides of the world, such as passports as an instance first, but then extended with graph and capture more depths of the interactions. What I would say is how we try to solve the identity problem or enable the buildup is very use case centric and like I said, the identity problem or enable the buildup is very use case centric. And like I said, the governance centric. The fact that we understand this voting stuff has to be done that way, it gave rise to solving the uniqueness problem in a certain way. But this doesn't mean that the same uniqueness should be reused universally across other use cases. And so going at a primitive level of these kind of registries and the building blocks and having more like this kind of flexible framework, I think that's the way forward. And as well as we've seen mostly on the regulatory pressure or this kind of collusion risk, I think the permissionlessness and empowering of the users going forward should be this kind of collusion risk. I think the permissionlessness and empowering of the users going forward should be like this kind of core principle that no one should kind of violate or cross the line for. So we only have three minutes left. I want to quickly do one minute each on where, you know, AI is a part of identity is helping us leverage AI to communicate with each other, also to govern AI. So starting with you, Remco, where do you see the most immediate use case with WorldCoin in the near future? And world or world ID interfacing with AI will, you know, open AI, for example, grant free access if you have a world ID. What is the sort of immediate near-term goals on that interaction? I mean, I can't speak for open AI, but we do share some money in common, so we'll see what happens there. Regarding AI, I think we need to embrace the fact that it's going to be some sort of human-AI hybrid operator. I'm a big believer of AI is just making humans better versions of themselves. So this is why I think it's so important that you distinguish between a CAPTCHA and a proof of personhood, which are different primitives with different use cases. And we might not even be that interested in the CAPTCHA use case. What is going to be a very important problem is distinguishing real world sensory data from faked data and authenticity. I don't think proof of presence alone is a solution there, but I do think it can be an important part of a functioning solution that allows us to authenticate content and information. Lascha, any immediate applications? Yeah. In my world, I don't have this like us humans and AIs and kind of like the separate camps of the camp. I more see and question like, okay, what is the first thing? Like I own multiple AIs. So the first dimension is how do I control it so how do we build this my me my AI and identifying these relations and control and then zooming out like all the AIs of individuals or like the systems how do they coordinate and what are the mechanism for us so I don't know like lower down the computational capacity or shut certain things down and abstract them. This is the next level of governance problems that we should be figuring out. That's where the identity aspects will be born uniquely. Martin, I know you're thinking a lot about AI? Yeah, I think it's certainly good to have a robust identity framework in place that also is relational because, yeah, for the reason in the very beginning. So I like to see a version of something like Circles that's completely permissionless as a base layer built on this trust graph. But then, yeah, projects like DK Passport permissionless as a base layer built on this trust graph, but then projects like DK Passport or WorldID that can essentially provide additional attestations, what I would call it, to those open base layer identities can bring us a very robust system overall where humans can coordinate if necessary maybe against AI. Will each circle have its own AI agent? That's a good question whether AI agents will also use circuits or not. I don't know yet. They could. Alright, let's give them a round of applause everybody. Thank you.", "eventId": "devcon-7", - "slot_start": 1731557400000, - "slot_end": 1731557700000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1d-1krZg7I-YltJPVKWhfg0Tl6wSDlA4A7_wN3qi3s3M", - "resources_slides": null, + "slot_start": 1731559800000, + "slot_end": 1731561000000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1jVtcSZgrBxcYG4lFAatpVuooVRxzUpgPKggpcsgETVM", + "resources_slides": "", "speakers": [ - "y77cao" + "vitalik-buterin", + "remco-bloeman", + "martin-k", + "lasha", + "puja" ] }, "vector": [ 0, + 6, 0, 0, 0, @@ -597978,15 +596284,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -598167,6 +596464,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -598493,6 +596791,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -598504,6 +596803,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -599283,7 +597584,6 @@ 2, 0, 0, - 0, 2, 0, 0, @@ -599302,53 +597602,53 @@ }, { "session": { - "id": "proof-of-personhood-panel", - "sourceId": "GVML7H", - "title": "Proof of personhood panel", - "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "", + "id": "protec-and-attac-programmatic-execution-layer-consensus-tests", + "sourceId": "GZBP8A", + "title": "Protec and Attac: Programmatic Execution Layer Consensus Tests", + "description": "We'll give an overview of Ethereum Execution Spec Tests (EEST), the new Python framework used since Shanghai to generate test vectors for Ethereum Virtual Machine (EVM) implementations. By generating tests programmatically this modular framework allows test cases to be readily parametrized and dynamically executed against clients on live networks. It tightly integrates with the Ethereum Execution Layer Specification (EELS) and could potentially be used across the L2 EVM ecosystem.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 1226, + "keywords": [ + "Python", + "pytest" + ], + "tags": [ + "Core Protocol", + "EVM-equivalent", + "Testing", + "pytest", + "Core Protocol", + "EVM-equivalent", + "Testing" + ], "language": "en", - "sources_swarmHash": "69ebe38a57c1f0da151727dc18122546f6a0d7d37ed10de07a0bf33c35098d32", - "sources_youtubeId": "42cJ6IZlhMk", + "sources_swarmHash": "5a5ad86c8d093f50adfe6bd4116a642bdc62e1c5b5634d61f35d653a32f5e250", + "sources_youtubeId": "4OF-TJ9nB4I", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673592a79dbb7a90e1088fac", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673592a79dbb7a90e1088fac.vtt", - "transcript_text": " Hello, hello. Great. So we just had a nice overview of the three protocols. And what I want to do in this small amount of time we have is just challenge each one. I think there's something to learn from each approach here and see if we can come to some greater synthesis. So I'll start with Remco. So Remco, I actually wrote a critique of proof of personhood. It's called Compressed to Zero, The Silent Strings of Proof of Personhood. And what I did is I looked at a parallel protocol to WorldCoin, a different authentication mechanism. But they had succeeded in verifying unique individuals through cognitive tests. And they had avoided the problem of account trading, which WorldCoin has had to grapple with because each account has money. And then, of course, there's an incentive to sell accounts early on and so that's something you're grappling with. They had solved that with this novel mechanism called sublinear identity staking, which I'm happy to talk to you about later. But they were still left with this information game theoretic problem around puppeteering, which is that when you have an account and it has monetary value, it has a UBI stream coming towards it, which is one of the goals of WorldCoin, you create incentives for other people to try to control those accounts and extract from them. And so the kind of pithy way to put it is when you try to differentiate humans from bots and use UBI incentives, you actually create an unintended consequence of humans trying to control other humans as if they were bots, right? And so the control problem becomes an information problem, and it's an information game theoretic one. So I want to know how you think about this challenge, because there are two goals that are very explicit with WorldCoin. One is both UBI and a kind of rails for democratic governance and even maybe even AI alignment. But if ultimately, you know, pursuing both of those goals ends up in coalitions and oligopolies and basically politics, right? And one person, one vote breaks down very quickly in a coalition. It's like, how do you deal with that? How are you thinking about that? So we're at 16 million users now. 16? 8 million in the highest tier or verified unique human. And in the process of growing to this scale and being active in dozens, if not nearly 100 countries right now, you kind of see everything that people can throw at you. And while the vast majority of people is honest, there are some very dishonest actors out there and you have the Byzantine generals' problem of you need to make your system resistant against malicious actors. I think this is not, as you mentioned, this is for every proof of personhood primitive out there. There is this issue. As I said, proof of personhood is not a CAPTCHA. So there is this idea that you can delegate an account. This is not something I think we should prevent, but it is something that you need to give the user the tools to properly manage, which means that it needs to be explicit, it needs to be consensual, it needs to be revocable, and so on. So I think the emphasis is on developing those tools, empowering the user to control their world ID in the way that they want to control it. Blockchains as a whole are not particularly good at the moment at these kinds of things because a lot of decisions you make on chain are one-way streets. But there are a number of things we are already doing here, like allowing you to recover your account by going back to an orb. One thing we've rolled out that is very powerful is using the phone's front camera to do additional interactive face authentications to make sure that the wallet hasn't been stolen. It's still in the possession of the original human that signed up and so on Right Maybe I can Comment there directly so I made in this presentation my claim that I think this absolute identity They're actually not that many use cases for it and kind of just to give the extreme example Why I'm making this claim is let's say somehow North Korea does get hold of one of those orbs and and can can we are properly authenticate they're all there 25 million I'm not exactly sure how many people they have probably no one knows but 25 million people so let's just say in WorldCoin now you go from 8 million fully verified to now 32 but 25 of them are just people from North Korea where it's not even clear whether they have control of their key or probably it's somehow centrally organized that someone ultimately has full control of those keys. Now you have 32 million properly verified actual humans. The question is, for what application would that set be useful? So my claim is, in absolutely most applications, and probably actually this gas subsidy that you're doing is one of the few where I think it is useful, but in absolutely most applications, I would say you need more context than just being human. If you want to go even more extreme, we can think of this Matrix movie where they're literally, I mean, in the Matrix movie where they're literally farming humans, or it kind of has those farms of humans that basically just sit there and yes, they might have an eyeball, you might be able to scan that, but that's really not what you want. Right, and even just to add on, as long as there's money at stake, I mean, we have synthetic biology, it could also be just sort of synthetic eyeball forms to hack the system. Okay, hold on, there's like a couple different things going through each other here. Static eyeballs don't work because with biometrics it's critical that you verify that it's actually part of a real live human body. If you get possession of an orb and you abuse it, while the signup process between the user is absolutely anonymous and privacy preserving, the orbs themselves are not. Their behavior is public and there are methods you can use to actually revoke the fake accounts that might be... But there are no fake accounts. Okay actually revoke the fake accounts. But there are no fake accounts. Okay, that's the second part. If they are not fake accounts, then the system works as intended, in a way. And you make a very good point that proof of personhood is just a primitive, and you need a richer ecosystem of things around it. The example I like, a very simple one, is let's say you have an on-chain DAO. You want to implement one person, one vote. If you just do this and allow all of the world ID verified users to vote on this and your social community is only 5,000 people, you can easily get swamped. You don't have a very strong governance system that way. So you do need to incorporate this in more complex systems. Lascha? I 100% agree. Lascha? Can I add something? I kind of like... I get all the points, but I think like forgetting the orb or the eyes and stuff. So I agree that overall identities, and I don't believe in universal identities, because identities have to be built for the governance purposes. So we should come up with frameworks that increase the coordination quality among the humans and then decide the depths of the identity, how it should be built. When we talk about the proof of personhood, it has like two dimensions, right? One is the proof of uniqueness. And I agree for the real world and especially in multiple cases, just uniqueness proof and badge does not guarantee the quality of decision making or coordination. And then we have to rely on passports or like citizenship or the age or like all the other stuff. And the thing is that like if we, like for example for voting, refer to the passports, the dependency on these document is so big that we can generate the uniqueness based on data and we might not need any type of biometrics additional to it. And I think, in a way, that's the purpose of identity. So I don't believe in universal identities. I believe of defining whether we like fighting AI or we like fighting the misinformation stuff. The only systems that are dynamically evolving and improving are the ones that have this kind of fluid identity aspects and not just the universal ones. Like on the example of Twitter and the community nodes. So just labeling is just creating this very low barriers in terms of defining the permissions. What you can do or or you can't do as a human or not. And like you said, yeah, there's a kind of North Korea scenario. There's kind of a dystopian scenario where we might be like creating market, right, for the eyeballs and stuff. And like you got two kidneys, you got two eyes, and then what happens next, right? Well, what you raised is an interesting point about governance, right? Because in some sense, the point Martin was making is highlighting an enforcement problem, right? And this gets back to the information problem. Yeah. Right? And so even if you do succeed in authenticating unique individuals in North Korea, how do you actually enforce that? And what you're saying is, well, you need richer governance mechanisms. We need to express identity in different ways, right, as a complement. And if you just have that one identity system, ultimately enforcement would require surveillance. And, of course, surveillance results in everyone becoming informationally the same. So I want to shift gears, Martin, and poke a little holes at circles. I think it's a great experiment. But one of the things I noticed on your slides is, you know, you're talking about currency and money. I think of them almost as community currencies and like nested networks of cooperation building in a nation states and like, you know, even the globe. But, you know, not everybody in a community should receive the same amount, blood, sweat, and tears, and contribution. It shouldn't just sort of be like an hourly rate. That's another kind of UBI that has its own pitfalls because it's not tied to incentives. And you mentioned a 7% interest rate. These things seem arbitrary, and they don't seem to leverage incentives or people's voice, choice, and stake in a community. So how do you think about that? Yeah, I mean, should they receive the same amount? I would say if a bunch of people together comes together and decides to use something as money, then just that coordination effort to some extent creates value to whatever they decide is money. And I would argue that the fairest form of distributing this value that has been created by just this coordination effort would be to distribute it equally among those that join this coordination effort. Super important to understand that that doesn't mean that everyone needs to work for the same hourly rate or doesn't need, I mean, that's absolutely not the case. So it's just that the coordination effort is in a way or the benefit of that is equally distributed. Sorry, there was a second question that I... Just, you know, non-equal distributions within, you know, acknowledging that there's different levels of contribution. I believe it's the 7%. Yeah, 7%, right. So that is probably the only parameter in circles that has been set and that's therefore debatable. And yes, you could say every community should be able to set that themselves. That would make coordination much, much, much harder. Why? Well, because then you would need to, or in circles, it is meant to be so simple that I issue circles, and then I know I can kind of trust that other account. I can exchange my money to their money one-to-one because we are issuing under the exact same rules. If everyone could define their own rules it would be much harder to negotiate of how much should my money worth compared to yours. So I would say for a stable equilibrium, the goal of circles is to have the stable equilibrium where the exchange rates between circles are just one-to-one. The goal would be to say one circle is one circle, and it's kind of abstracted away the fact that under the hood, we have all those different... Just like in the dollar system right now, technically speaking, if you have a dollar at one bank and a dollar at another bank, those are not the same dollars. But the system is built in such a way that usually it feels like one-to-one because if everything goes well, you can exchange them one-to-one. But if it breaks, you will realize, oh, the dollar at the bank that just went bankrupt is different from the other one. But the US and Euro have floating exchange rates. Why can't you... Say again? The US and Euro have floating exchange rates. Oh, no, no, for sure, for sure. So, in circles, yes. I mean, the expectation is they are within a connected cluster, the exchange rate is one-to-one. But, yeah, there can be different clusters where within the clusters the exchange rate is one to one but between the clusters there are floating exchanges. Why not just rely on supply and demand? Well it will be I mean the price between those clusters will be defined by supply and demand and what we are actually supposing in our upcoming relaunch I'd say, or restart, or reactivation, it's a continuation of the system, is that people or groups can back their very own circles with reserve assets they want, Bitcoin, dollar, whatever they want, and then there will be a free floating exchange rate for those circles, so defined by supply and demand. But as long as people trust each other, then they are essentially creating additional one-to-one demand between those currencies. Maybe we need to go back to the identity topic. No, but you're expressing identity in a different way, and so I just want to dig on that. But I want to turn to Lasha now. So how do you see yourself and Rarimo in this constellation of identity protocols? What like differentiation do you see that you're bringing? I think we try to use the both sides of the world, such as passports as an instance first, but then extended with graph and capture more depths of the interactions. What I would say is how we try to solve the identity problem or enable the buildup is very use case centric and like I said, the identity problem or enable the buildup is very use case centric. And like I said, the governance centric. The fact that we understand this voting stuff has to be done that way, it gave rise to solving the uniqueness problem in a certain way. But this doesn't mean that the same uniqueness should be reused universally across other use cases. And so going at a primitive level of these kind of registries and the building blocks and having more like this kind of flexible framework, I think that's the way forward. And as well as we've seen mostly on the regulatory pressure or this kind of collusion risk, I think the permissionlessness and empowering of the users going forward should be this kind of collusion risk. I think the permissionlessness and empowering of the users going forward should be like this kind of core principle that no one should kind of violate or cross the line for. So we only have three minutes left. I want to quickly do one minute each on where, you know, AI is a part of identity is helping us leverage AI to communicate with each other, also to govern AI. So starting with you, Remco, where do you see the most immediate use case with WorldCoin in the near future? And world or world ID interfacing with AI will, you know, open AI, for example, grant free access if you have a world ID. What is the sort of immediate near-term goals on that interaction? I mean, I can't speak for open AI, but we do share some money in common, so we'll see what happens there. Regarding AI, I think we need to embrace the fact that it's going to be some sort of human-AI hybrid operator. I'm a big believer of AI is just making humans better versions of themselves. So this is why I think it's so important that you distinguish between a CAPTCHA and a proof of personhood, which are different primitives with different use cases. And we might not even be that interested in the CAPTCHA use case. What is going to be a very important problem is distinguishing real world sensory data from faked data and authenticity. I don't think proof of presence alone is a solution there, but I do think it can be an important part of a functioning solution that allows us to authenticate content and information. Lascha, any immediate applications? Yeah. In my world, I don't have this like us humans and AIs and kind of like the separate camps of the camp. I more see and question like, okay, what is the first thing? Like I own multiple AIs. So the first dimension is how do I control it so how do we build this my me my AI and identifying these relations and control and then zooming out like all the AIs of individuals or like the systems how do they coordinate and what are the mechanism for us so I don't know like lower down the computational capacity or shut certain things down and abstract them. This is the next level of governance problems that we should be figuring out. That's where the identity aspects will be born uniquely. Martin, I know you're thinking a lot about AI? Yeah, I think it's certainly good to have a robust identity framework in place that also is relational because, yeah, for the reason in the very beginning. So I like to see a version of something like Circles that's completely permissionless as a base layer built on this trust graph. But then, yeah, projects like DK Passport permissionless as a base layer built on this trust graph, but then projects like DK Passport or WorldID that can essentially provide additional attestations, what I would call it, to those open base layer identities can bring us a very robust system overall where humans can coordinate if necessary maybe against AI. Will each circle have its own AI agent? That's a good question whether AI agents will also use circuits or not. I don't know yet. They could. Alright, let's give them a round of applause everybody. Thank you.", - "eventId": "devcon-7", - "slot_start": 1731559800000, - "slot_end": 1731561000000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1jVtcSZgrBxcYG4lFAatpVuooVRxzUpgPKggpcsgETVM", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "lasha", - "martin-k", - "puja", - "remco-bloeman", - "vitalik-buterin" - ] + "danceratopz" + ], + "eventId": "devcon-7", + "slot_start": 1731483000000, + "slot_end": 1731484800000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1H_C3_bcxmpSTe9V9Z7CXA4jdQBIVdf6U0HYmPOFadS0", + "resources_slides": "https://drive.google.com/file/d/10jgTJmeBRAW0McIiqqpsKrZ8DAAd9dBP/view" }, "vector": [ - 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -599531,7 +597831,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -599683,7 +597982,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -599756,6 +598054,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -599859,7 +598158,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -599871,8 +598169,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -600115,6 +598411,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -600339,6 +598636,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -600419,6 +598717,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -600472,6 +598771,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -600656,6 +598956,7 @@ 2, 0, 0, + 0, 2, 0, 0, @@ -600674,46 +598975,49 @@ }, { "session": { - "id": "protec-and-attac-programmatic-execution-layer-consensus-tests", - "sourceId": "GZBP8A", - "title": "Protec and Attac: Programmatic Execution Layer Consensus Tests", - "description": "We'll give an overview of Ethereum Execution Spec Tests (EEST), the new Python framework used since Shanghai to generate test vectors for Ethereum Virtual Machine (EVM) implementations. By generating tests programmatically this modular framework allows test cases to be readily parametrized and dynamically executed against clients on live networks. It tightly integrates with the Ethereum Execution Layer Specification (EELS) and could potentially be used across the L2 EVM ecosystem.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "protocol-alignment-governing-like-a-protocol", + "sourceId": "JDKAJD", + "title": "Protocol Alignment: Governing like a Protocol", + "description": "We define a protocol as aligned when all stakeholders in its network agree:\r\n1. The protocol’s objectives\r\n2. How to measure progress toward objectives\r\n3. How to achieve the objectives\r\n\r\nIn this talk, we'll explore both new and old decentralized mechanisms that governance leads and protocol designers can leverage to address misalignment in governance.", + "track": "Coordination", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Research", "featured": false, "doNotRecord": false, - "keywords": [ - "Python", - "pytest" - ], "tags": [ - "Core Protocol", - "EVM-equivalent", - "Testing", - "pytest", - "Core Protocol", - "EVM-equivalent", - "Testing" + "Governance", + "Futarchy", + "Mechanism design", + "Futarchy", + "Governance", + "Mechanism design" ], - "language": "en", - "speakers": [ - "danceratopz" + "keywords": [ + "n/a" ], + "duration": 548, + "language": "en", + "sources_swarmHash": "cb195897f2e256e070d9110b111d2d7e584f889225a323b751e7cc280d7e8864", + "sources_youtubeId": "I_lMVnDgxvk", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6734816a9dbb7a90e1cffacb", "eventId": "devcon-7", - "slot_start": 1731483000000, - "slot_end": 1731484800000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1H_C3_bcxmpSTe9V9Z7CXA4jdQBIVdf6U0HYmPOFadS0" + "slot_start": 1731490200000, + "slot_end": 1731490800000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1n1_ahUlOLb7iuUb9uaTE_CyPbh0s7FZKpQGTyQ4xxps", + "resources_slides": "https://drive.google.com/file/d/1g4W7rGdWR-rMu4mKY_KsdIgc3j_c-HiK/view", + "speakers": [ + "noturhandle" + ] }, "vector": [ 0, 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -600721,6 +599025,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -601119,7 +599424,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -601244,6 +599548,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -601464,6 +599769,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -601478,7 +599784,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -601490,6 +599795,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -601552,6 +599858,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -601703,7 +600010,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -601785,7 +600091,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -601839,8 +600144,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -602025,7 +600328,6 @@ 2, 0, 0, - 0, 2, 0, 0, @@ -602038,58 +600340,52 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "protocol-alignment-governing-like-a-protocol", - "sourceId": "JDKAJD", - "title": "Protocol Alignment: Governing like a Protocol", - "description": "We define a protocol as aligned when all stakeholders in its network agree:\r\n1. The protocol’s objectives\r\n2. How to measure progress toward objectives\r\n3. How to achieve the objectives\r\n\r\nIn this talk, we'll explore both new and old decentralized mechanisms that governance leads and protocol designers can leverage to address misalignment in governance.", - "track": "Coordination", - "type": "Lightning Talk", + "id": "protocol-guild-funding-the-ethereum-commons", + "sourceId": "EJVT7E", + "title": "Protocol Guild: funding the Ethereum commons", + "description": "Ethereum produces shared resources within the commons frame.\r\n\r\nProtocol Guild is a way collectively fund the people maintaining the crucial underlying software, while rebalancing the incentives to do this work relative to the broader industry context.\r\n\r\nThe entire ecosystem benefits when there is consistent incentives to recognize this work.", + "track": "Core Protocol", + "type": "Talk", "expertise": "Beginner", - "audience": "Research", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Governance", - "Futarchy", - "Mechanism design", - "Futarchy", - "Governance", - "Mechanism design" + "Gaming", + "theory" ], "keywords": [ - "n/a" + "ACD", + "Core Protocol", + "DAO", + "Onchain Organization", + "Game Theory" ], - "duration": 548, + "duration": 1534, "language": "en", - "sources_swarmHash": "cb195897f2e256e070d9110b111d2d7e584f889225a323b751e7cc280d7e8864", - "sources_youtubeId": "I_lMVnDgxvk", + "sources_swarmHash": "0f113065ce2a6b6c41f92bfdfadafdcb1c4e2b703debb37ffe0e52577bedd617", + "sources_youtubeId": "4Hc664qQkV0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734816a9dbb7a90e1cffacb", + "sources_streamethId": "6736ea1d1b0f83434d40f7be", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731490200000, - "slot_end": 1731490800000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1n1_ahUlOLb7iuUb9uaTE_CyPbh0s7FZKpQGTyQ4xxps", - "resources_slides": null, + "slot_start": 1731646800000, + "slot_end": 1731648600000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1X-IkjzbaZoye8kj19czZe1suKsBA9C7jL4gsmxYI5ko", + "resources_slides": "https://drive.google.com/file/d/1CL3uBIiEScBDHDe3k-Nlz6Cy3qoLzB_b/view", "speakers": [ - "noturhandle" + "trent-van-epps" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -602618,9 +600914,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -602628,6 +600921,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -602841,7 +601135,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -602867,7 +601160,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -602930,7 +601222,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -602954,6 +601245,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -603286,6 +601578,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -603399,7 +601695,6 @@ 0, 0, 0, - 2, 0, 0, 2, @@ -603409,6 +601704,9 @@ 0, 0, 0, + 2, + 0, + 0, 0, 0, 0, @@ -603419,52 +601717,50 @@ }, { "session": { - "id": "protocol-guild-funding-the-ethereum-commons", - "sourceId": "EJVT7E", - "title": "Protocol Guild: funding the Ethereum commons", - "description": "Ethereum produces shared resources within the commons frame.\r\n\r\nProtocol Guild is a way collectively fund the people maintaining the crucial underlying software, while rebalancing the incentives to do this work relative to the broader industry context.\r\n\r\nThe entire ecosystem benefits when there is consistent incentives to recognize this work.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "proving-liquidity-of-an-amm", + "sourceId": "AD3X38", + "title": "Proving liquidity of an AMM", + "description": "Liquidity providers in an AMM expect that they can always withdraw their tokens, even in case of a bank run. Taking the concrete implementation of Uniswap v4, we formally proved that the funds owned by the contract always cover the provided liquidity. This talk describes the methodology for proving this critical property, which can be applied to other protocols holding the liquidity for their users.", + "track": "Security", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Developer", "featured": false, "doNotRecord": false, - "tags": [ - "Gaming", - "theory" - ], "keywords": [ - "ACD", - "Core Protocol", - "DAO", - "Onchain Organization", - "Game Theory" + "Invariants" + ], + "tags": [ + "Formal Verification", + "Reentrancy", + "invariants", + "Formal Verification", + "Reentrancy" ], - "duration": 1534, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "4d4e75970d8eed994781f33c0552f03df8afc1086d5c55d330347bbaea91d763", + "sources_youtubeId": "CnrZyDeGwKI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736ea1d1b0f83434d40f7be", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731646800000, - "slot_end": 1731648600000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1X-IkjzbaZoye8kj19czZe1suKsBA9C7jL4gsmxYI5ko", - "resources_slides": null, "speakers": [ - "trent-van-epps" - ] + "jochen-hoenicke" + ], + "eventId": "devcon-7", + "slot_start": 1731471000000, + "slot_end": 1731471600000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1QlA6rBFr3f12d9BFrh9CBVqTCO60FFqlit1W076MzQ8", + "resources_slides": "https://drive.google.com/file/d/1yVT_piGcfSE6Y-oFggao2522m6sJ4N6H/view" }, "vector": [ + 6, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -604322,10 +602618,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -604409,6 +602701,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -604657,6 +602950,7 @@ 0, 0, 2, + 2, 0, 0, 0, @@ -604771,6 +603065,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -604783,10 +603078,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -604796,42 +603087,44 @@ }, { "session": { - "id": "proving-liquidity-of-an-amm", - "sourceId": "AD3X38", - "title": "Proving liquidity of an AMM", - "description": "Liquidity providers in an AMM expect that they can always withdraw their tokens, even in case of a bank run. Taking the concrete implementation of Uniswap v4, we formally proved that the funds owned by the contract always cover the provided liquidity. This talk describes the methodology for proving this critical property, which can be applied to other protocols holding the liquidity for their users.", - "track": "Security", + "id": "public-epistemics-and-futarchy", + "sourceId": "3UX8GZ", + "title": "Public epistemics and futarchy", + "description": "35 years ago I began outlining a vision of how betting markets could offer informed credibly-neutral estimates on far more disputed topics. I elaborated 25 years ago on how decision markets could support neutral governance, and 21 years ago on how combinatorial markets allow estimates on all possible combinations for existing topics. Now in the last year, we are seeing substantial crypto-based trials, especially re governance. In this talk, I’ll paint a picture of where all this could go.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Developer", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, - "keywords": [ - "Invariants" - ], "tags": [ - "Formal Verification", - "Reentrancy", - "invariants", - "Formal Verification", - "Reentrancy" + "Economics", + "Free Speech", + "Futarchy" ], + "keywords": [], + "duration": 934, "language": "en", - "speakers": [ - "jochen-hoenicke" - ], + "sources_swarmHash": "24f7e97b848f1e0a67b50759d3d1eae014903a3d32ad8ddcc76d255c18ebeddb", + "sources_youtubeId": "Kvl0LrwtE8k", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67381fca1b0f83434d0dfd4b", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731471000000, - "slot_end": 1731471600000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1QlA6rBFr3f12d9BFrh9CBVqTCO60FFqlit1W076MzQ8" + "slot_start": 1731562200000, + "slot_end": 1731563100000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1P1IH_O2NLxK_MXtmkfR8Yb6EoLR6gV1arcuKrkGimqE", + "resources_slides": "https://drive.google.com/file/d/1_-6w2mR0yjyEa0K_e7PqyfES6Fffbq_O/view", + "speakers": [ + "robin-hanson" + ] }, "vector": [ - 6, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -605043,6 +603336,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -605364,7 +603658,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -605605,12 +603898,15 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 2, 0, + 2, 0, 0, 0, @@ -605775,7 +604071,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -606024,8 +604319,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -606147,10 +604440,8 @@ 0, 0, 0, - 2, - 0, - 0, 0, + 2, 0, 0, 0, @@ -606163,47 +604454,48 @@ }, { "session": { - "id": "public-epistemics-and-futarchy", - "sourceId": "3UX8GZ", - "title": "Public epistemics and futarchy", - "description": "35 years ago I began outlining a vision of how betting markets could offer informed credibly-neutral estimates on far more disputed topics. I elaborated 25 years ago on how decision markets could support neutral governance, and 21 years ago on how combinatorial markets allow estimates on all possible combinations for existing topics. Now in the last year, we are seeing substantial crypto-based trials, especially re governance. In this talk, I’ll paint a picture of where all this could go.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "public-private-hybrid-rollups", + "sourceId": "YUFEJK", + "title": "Public-Private Hybrid Rollups", + "description": "We posit that it is a best practice that rollups have privacy capabilities. We'll focus on zero-knowledge and its role in enhancing privacy and how to deal with the need for public state for shared use cases. We'll delve into the interaction between public and private execution environments, detailing how such disparate execution environments can be combined.", + "track": "Layer 2", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Economics", - "Free Speech", - "Futarchy" + "Zk Rollups", + "Token bridging", + "Privacy", + "best", + "practice", + "Privacy", + "Token bridging", + "Zk Rollups" ], - "keywords": [], - "duration": 934, + "keywords": [ + "hybrid rollups", + "privacy as a best practice" + ], + "duration": 1396, "language": "en", - "sources_swarmHash": "24f7e97b848f1e0a67b50759d3d1eae014903a3d32ad8ddcc76d255c18ebeddb", - "sources_youtubeId": "Kvl0LrwtE8k", + "sources_swarmHash": "2e6b811ad2567c4e1aca22ebd687a47279b5f1ce00313a27a958d3092402370e", + "sources_youtubeId": "0mDlVkzde_M", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67381fca1b0f83434d0dfd4b", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731562200000, - "slot_end": 1731563100000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1P1IH_O2NLxK_MXtmkfR8Yb6EoLR6gV1arcuKrkGimqE", - "resources_slides": null, + "slot_start": 1731400200000, + "slot_end": 1731402000000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/11nsntpn_PkweY9PIGZYHntFGei0Pk5LLe9J12awK9K4", + "resources_slides": "https://drive.google.com/file/d/1Zx3-C0WpnelZq1FoCgIgLQi0SY0UCyqG/view", "speakers": [ - "robin-hanson" + "adam-domurad" ] }, "vector": [ - 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -606211,6 +604503,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -606412,8 +604705,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -606742,6 +605033,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -606977,15 +605269,12 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, - 2, 0, - 2, 0, 0, 0, @@ -607024,6 +605313,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -607070,6 +605360,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -607156,6 +605447,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -607399,6 +605691,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -607512,6 +605805,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -607522,8 +605816,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -607535,57 +605827,52 @@ }, { "session": { - "id": "public-private-hybrid-rollups", - "sourceId": "YUFEJK", - "title": "Public-Private Hybrid Rollups", - "description": "We posit that it is a best practice that rollups have privacy capabilities. We'll focus on zero-knowledge and its role in enhancing privacy and how to deal with the need for public state for shared use cases. We'll delve into the interaction between public and private execution environments, detailing how such disparate execution environments can be combined.", - "track": "Layer 2", - "type": "Talk", + "id": "putting-identities-on-chain-passport-zkp", + "sourceId": "HBH3Y7", + "title": "Putting Identities On-Chain (Passport ZKP)", + "description": "Discussing the creation of an on-chain registry system for storing zero-knowledge (zk) identities. This system will enable individuals to self-issue and control their data without a central authority, showcasing the ZK passport use case.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Zk Rollups", - "Token bridging", - "Privacy", - "best", - "practice", - "Privacy", - "Token bridging", - "Zk Rollups" - ], "keywords": [ - "hybrid rollups", - "privacy as a best practice" + "ZK passport", + "social graph" + ], + "tags": [ + "Digital Sovereignty", + "Identity", + "Permissionless" ], - "duration": 1396, "language": "en", - "sources_swarmHash": "2e6b811ad2567c4e1aca22ebd687a47279b5f1ce00313a27a958d3092402370e", - "sources_youtubeId": "0mDlVkzde_M", + "sources_swarmHash": "711c0f65607fb40239c8612823886e2a83225b79b042bf0b6ab91efe797f7c92", + "sources_youtubeId": "acy5cANoAuQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731400200000, - "slot_end": 1731402000000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/11nsntpn_PkweY9PIGZYHntFGei0Pk5LLe9J12awK9K4", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "adam-domurad" - ] + "lasha" + ], + "eventId": "devcon-7", + "slot_start": 1731559320000, + "slot_end": 1731559800000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1zX7rgpH4mVoH4btzraVJBbogPvAVttN57Twcs55Kb2I", + "resources_slides": "https://drive.google.com/file/d/1BKKWrg9BM1H-ZORuZec2Q7DLppx_2i4j/view" }, "vector": [ 0, + 6, 0, 0, 0, 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -608110,13 +606397,12 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -608376,6 +606662,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -608397,7 +606685,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -608444,7 +606731,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -608531,9 +606817,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -608597,6 +606880,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -608776,7 +607060,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -608913,39 +607196,50 @@ }, { "session": { - "id": "putting-identities-on-chain-passport-zkp", - "sourceId": "HBH3Y7", - "title": "Putting Identities On-Chain (Passport ZKP)", - "description": "Discussing the creation of an on-chain registry system for storing zero-knowledge (zk) identities. This system will enable individuals to self-issue and control their data without a central authority, showcasing the ZK passport use case.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "id": "putting-intents-and-users-together", + "sourceId": "YUPJGZ", + "title": "Putting Intents and Users Together", + "description": "Intents represent a new approach to Web3 interactions. However, the transition from the existing structure to an intent-centric space remains uncertain unless we maintain user familiarity. We conducted experiments on user experience for intents and tested them with a focus group. This talk will explore how we can approach intents in a way that users will adopt more readily by leveraging the latest standards and EIPs, including EIP-7702, ERC-4337, ERC-7579, and ERC-7715.", + "track": "Usability", "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "ZK passport", - "social graph" - ], "tags": [ - "Digital Sovereignty", - "Identity", - "Permissionless" + "Rollups", + "Account Abstraction", + "Intents", + "chain", + "abstraction", + "Account Abstraction", + "Intents", + "Rollups" ], - "language": "en", - "speakers": [ - "lasha" + "keywords": [ + "Chain", + "Abstraction" ], + "duration": 520, + "language": "en", + "sources_swarmHash": "289be16f743d082567c4698ba4f6e9e23627809b187c2df7857d8836eef1a707", + "sources_youtubeId": "0FpMhUJTwA4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67357c2b9dbb7a90e1e347be", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731559320000, - "slot_end": 1731559800000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1zX7rgpH4mVoH4btzraVJBbogPvAVttN57Twcs55Kb2I" + "slot_start": 1731557400000, + "slot_end": 1731558000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1oa41JFQPp-vuRePzM4jYH0K22uvY02iOso74y9q_Ryc", + "resources_slides": "https://drive.google.com/file/d/1ozLbEySYeXshZwmeZlAuE0_WVV_kCYfo/view", + "speakers": [ + "abhimanyu-shekhawat" + ] }, "vector": [ - 0, - 6, - 0, 0, 0, 0, @@ -608954,6 +607248,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -609476,7 +607771,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -609484,6 +607778,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -609735,6 +608030,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -609744,15 +608040,10 @@ 0, 0, 2, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, + 2, 0, 0, 0, @@ -609898,6 +608189,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -609962,7 +608255,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -610279,47 +608571,50 @@ }, { "session": { - "id": "putting-intents-and-users-together", - "sourceId": "YUPJGZ", - "title": "Putting Intents and Users Together", - "description": "Intents represent a new approach to Web3 interactions. However, the transition from the existing structure to an intent-centric space remains uncertain unless we maintain user familiarity. We conducted experiments on user experience for intents and tested them with a focus group. This talk will explore how we can approach intents in a way that users will adopt more readily by leveraging the latest standards and EIPs, including EIP-7702, ERC-4337, ERC-7579, and ERC-7715.", - "track": "Usability", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "quarkid-bringing-south-america-on-chain-with-ssi-and-account-abstraction", + "sourceId": "QXCTMB", + "title": "QuarkID: Bringing South America on-chain with SSI and account Abstraction", + "description": "QuarkID is a Self-Sovereign Identity protocol bringing millions of South American citizens on-chain. Citizens in Buenos Aires, Argentina, Monterrey, and Nuevo Leon, Mexico, are using government SSI deployed on ZKsync Era through the QuarkID protocol. Driver's licenses, birth certificates, and over 50 different credentials are secured by Ethereum technology in the world’s first case of governments using Ethereum’s permissionless blockchain to meet their identity needs.", + "track": "Real World Ethereum", + "type": "Talk", + "expertise": "Beginner", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Rollups", - "Account Abstraction", - "Intents", - "chain", - "abstraction", + "2FA", "Account Abstraction", - "Intents", - "Rollups" + "Identity", + "Open Source Software", + "Political systems", + "Politics", + "Public good", + "Use Cases", + "Validiums", + "Zero-Knowledge", + "ZK-EVMs", + "ZKP" ], "keywords": [ - "Chain", - "Abstraction" + "Sovereign" ], - "duration": 520, + "duration": 1183, "language": "en", - "sources_swarmHash": "289be16f743d082567c4698ba4f6e9e23627809b187c2df7857d8836eef1a707", - "sources_youtubeId": "0FpMhUJTwA4", + "sources_swarmHash": "b778c903f53cea71812ed118675883effd57f913af3ef91ebe387eecd6c274a6", + "sources_youtubeId": "5c43HjCcZeg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67357c2b9dbb7a90e1e347be", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6736bbe99dbb7a90e1323fe9", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736bbe99dbb7a90e1323fe9.vtt", + "transcript_text": " Thanks so much for being here. Let me share with you the... My name is Diego Fernandez, I'm the co-creator of QuarkID. And let me start by showing some hard facts about what QuarkID is. First, it's a self-sovereign identity protocol. It's open source under Apache 2.0 license. It's a digital public good enrolled in the Digital Public Good Alliance. It was launched in October the 21st in Buenos Aires City with 3 million inhabitants, and we onboarded almost 165K users in less than one month. And we have 80 million users in the confirmed pipeline, and that comes from three cities in Argentina and in Mexico, four subnational states, both in Argentina and Mexico, who are doing two pilot projects with national governments in El Salvador and in Argentina. And we have onboarded also two major banks, which is Banco Macro, the second biggest retail bank in Argentina, and Banco Santander, as well as one of the biggest telco companies in America, which is Tomex, with more than 100 million users. Why did Buenos Aires City start the QuarkID journey? Our vision was first governments should pay the friction to create highways of trust because it will bring enormous value to society. And that highways of trust should be open, non-permissioned and available without any government oversight to everyone. And that should give users the ability to decide how, when, with whom and what information to share. And we did this in collaboration between governments and startups. And we found and we discovered that that creates a thriving web-free ecosystem that we believe will bring prosperity to society. What were, in my perspective, the three key decisions that we needed to make? Well, the first, decentralized versus centralized. The second, open non-permission versus private permission. And the third, open source versus proprietary. And this is pretty important because governments, unfortunately, usually choose the downside of these options. Centralized, private and permissioned, and proprietary. And we believe that that sort of waves are starting to change. And of course, we chose decentralized, open permission, and open source. Very briefly, what's QuarkID tech stack? QuarkID again is a protocol which runs on top of the CKSync stack which uses Ethereum as a security layer. But when you are planning to deploy a system to millions of people using decentralized non-permission technology, you face an evil dilemma, which is users use today centralized Web2 applications, which have an amazing UX. And when we migrate to Web3 world, we're very familiar with that, UX is so terrible. Just claiming an NFT requires you to go back and forth in wallets and signing transactions. It's so hard, so hard. UX rules the game. If we don't have a simple approach to UX, so hard, so hard. UX rules the game. If we don't have a simple approach to UX, there will be no adoption. What is the evil dilemma solution that we found? Well, we give users the choice. You can download the open source wallet from GitHub even and compile it yourself, or you can download it from the App Store. It's an open source, non-permission self-custodial wallet. But we implemented QuarkKD technology within the government application, which is running on decentralized railways, utilizing CK-SYNC era and Ethereum, but it's a self-custodial wallet for users which are not so used to deal with this type of technologies. Our way of thinking is, hey, what application will use my 85-year-old mother when she wants to download a new application in her iPhone, she calls me and says, hey, son, this application is safe. That's the level of users that you need to understand that we will be a substantial part of your user base. So we need to give users the choice of having a very simple custodial solution that runs, again, on decentralized railways. Or you can go full decentralized encrypted. That's your choosing, not ours. And it's always a use case. And I think that we as a community need to understand that people don't buy technology. Decision makers in governments and institutions, they don't buy technology. They mostly don't care about technology. They buy use cases that solve real world problems. We need to think, and that's my perspective, as technology, we need to think in technology as a tool, as a means to an end, not an end in itself. From real world decision makers, if we have the use case, which is pretty simple, a policeman, and that is happening in Buenos Aires today, a policeman, instead of asking you for a piece of plastic, shows you a QR code, you scan it with your wallet, and that gives the policeman the proof that you're able to thrive in. For decision makers, for a major, a governor, the president of a bank or whatever, 99% of the decision relies on use case. Of course, we use Ethereum as a security layer, CK-SYNC to provide CK proofs. We have native account of fraction. I will follow up on that because it's pretty important. And users control their information, not institutions. But decision makers will see use case. We love technology. Decision makers make decisions based on use case. And the question is, how do we get billions of people on chain? Today, according to statistics, we have almost 600 millions of people. There are 8 billion people in humanity. How do we solve this problem? I like to think that the Quark ID mission is adoption, adoption, adoption. This is a very famous quote that Steve Ballmer gave once, you know, this champion all over the place, we probably all saw that. And of course he was saying developers, developers, developers, and many of us are developers here and of course developers are crucial. But when Steve Ballmer was saying developers, developers, developers, his company, Microsoft, had 95% of the user base using Windows. So then it's easy. He already had gained adoption. We as a community haven't yet and we need to get there. And in my perspective, and this is kind of radical, we need to try and horse the system. And I don't mean this in a bad way. We need to find a way to gain adoption through the system, not against the system. And I am positive that that Trojan horse, that positive Trojan horse, is self-sovereign identity using account abstraction. And why is that? Because, let me go back there. Account abstraction is crucial because each user, when he gets an identity minted in QuarkID, they are using account abstraction and they have an account in CK-Sync era. It's as straightforward as that. And that enables a lot of use cases. And I was discussing the other day, next year, Buenos Aires City will be piloting the chance of giving subsidies. I mean, the city gives out subsidies to several different things, something like $35 million a year to different type of beneficiaries. Buenos Aires is evaluating the chance of giving subsidies using their identity wallets, using a token. And there is a great advantage when you're dealing with subsidies. And that is being done because of account abstractions. That's a great opportunity. And why are governments so, so important? Because they provide us, or they may provide us, with a reverse adoption life cycle. If governments can't help to create the necessary railways of trust, we as an industry should help governments to achieve this goal. And for me that is crucial. We need to focus on that. We need to understand that there are champions, there are government champions, there are institution champions out there that have shared the same ethos that we share, that believe in a future with self-custodial basis, decentralized, non-permissioned, censorship resistant and they need help from our side and we should provide their help. I firmly believe that a decentralized, non-permission, open-source future is possible. And I'm convinced that we need to do that with governments and with institutions and not against them. And that's our goal. Thanks so much. That was quick, man. I got 10 minutes left. We can do a lot of Q&A. We can definitely do a lot of Q&A. We can definitely do a lot of Q&A. Well, feel free to send all your questions here, but let me go to the first one as well. How does the open source nature of the project impact business model and perhaps monetization? No, there's no business model for Croquet. This is an open source protocol, which is being used by several governments. It's not a business. I see. Going through the top questions, is it possible to interoperate with other protocols? Yeah, definitely. I mean, two things here. Of course, we... Over here. Great, thanks. Sorry. So, of course, we have two things there. First is the W4C standards, which define DIDs, which is Decentralized Identifiers and Verified Credentials. And that gives you a level of interoperability. But, and I'm happy to say this, this is the first time in any DEVCON that we have a specific workshop of self-sovereign identity. And all of us in the industry are meeting today this afternoon in order to precisely work on interoperability with the cash from privado and so on and we think that there's a bright future in doing that. All right next question why Buenos Aires West start there and I can recall that you are planning to expand to Argentina, Mexico and Colombia. Perhaps you talk about the country expansion plans and the different regulation challenges in each jurisdiction as well. I have a very particular view on regulations. First, Buenos Aires is an extremely crypto-friendly city. You have a great pool of talent. Many of the most important projects in the Ethereum ecosystem and in the crypto ecosystem came out from Buenos Aires, just to mention a few. Open Zeppelin, NAMIC Foundation, I don't know, Crecimiento and Aleph and so on. We have very, very important projects coming out from Buenos Aires. Decentraland, one of the founders of Sandbox. I could keep on mentioning a lot of projects. So Buenos Aires has this amazing talent pool. We are so focused in the crypto industry. And when I, as my former role of Secretary of Innovation of the city, made a call out to the startups and to the innovators in Buenos Aires, everyone started collaborating and we came up with this solution which we're really proud of. Okay. Crypto-friendly city. Where else besides Buenos Aires? Well, I mean, Buenos Aires is a crypto-friendly city, of course, and we are working as a community very, very strongly and now Milagros from Crescimiento will share that with you. We think that Buenos Aires and Argentina could become one of the first, if not the first, crypto country. We're struggling to do that, we're sort of working to do that, and of course we'll need your help. All right. Can you describe your level of partnership with other protocols, maybe some strategic planning, maybe other companies that you're looking to collaborate with? Well, I mean, of course, and we shared that yesterday in a panel that we were with Evan and Anthony from Privado ID. Something like three to four years ago, there was basically nobody speaking about identity but Privado and Disco XYZ by that time. And our vision is there are 8 billion people in the world. There's so, so, so much space that we should work together. Okay. There's a question that we'd like to ask. Sure. The answer there is, where are the credentials stored? On-chain or off-chain? If off-chain, if on-chain, how do you handle? Privacy risk. No, not on-chain. Not on-chain. Never, never, ever, never, ever, ever you can put personal information on chain. The technology behind the standards of digital identifiers and verified credentials work this way. What you have anchored on chain is a digital identifier. That's the only thing which is anchored on chain. Credentials are minted and stored off chain. So just to give an example, you have the DID of the City of Buenos Aires or any other government or institution, and you have your own DID. So when you want to mint a new credential, and those are anchored in our case in CK Sinqueira, and when you need to mint a new credential, you ask the issuer to give you that credential. That happens on a peer-to-peer basis using a protocol which is named Ditcom. And what you are doing is you're signing a JSON digitally signed by this DID in favor of this DID. And that is stored in the personal device that the users have in their hand. So when you need to verify that credential, what you do is you check that the credential is valid, that is not new, that is not void and that it was signed by the issuer in favor of the holder. So you rely on on-chain security but the data is store-of-chain. How does QuarkID plan to maintain its technological advantage in the long term? Well, basically, as any other open source protocol, expanding our user base generated interest. The amazing thing is that when you do this again reverse adoption lifecycle and you start to have millions and millions and millions of users, developers start to build on top of it and we have several startups and several companies building on top of the protocol and even expanding it. Just to give an idea, when we were involved in the, this is a very small thing but it's quite nice when we did the the left pop-up city in august the the passport for entering lf was a credential minted in qwerkyd and one amazing guy his name tule he's not, I guess, did this extension of the protocol, connecting it with an Arduino device in order to open a door. So when you access the hub, you scan that, show your credential, and the door got open, which was so nice. And that is, I mean, the community building and developing. Can we squeeze in a question about competition? How is this project different from ProvidoID or PolygonID? I don't know in such a detail the stack of ProvidoID and how they are working now with this Coxyz because they recently merged. But again, we of course believe in competition and we respect each other very much. And as I said before, I don't think... You need to have the technological basics that you need to implement. But this thing is not about technology. This thing uses technology to gain adoption. So what's our focus? Adoption, adoption, adoption. And perhaps on the final question on adoption, since we have a few more minutes remaining as well. Can Argentina's adoption by both its government and citizens serve as a model for other Latin American countries? I wouldn't say serve as a model, but when we're dealing with such border innovations, and the other day I was thinking, isn't there a more obvious case where innovation is needed than identity? We are sending or trying to send rockets to Mars. Elon Musk is catching rockets falling out of space in mid-air. We're discussing artificial general intelligence models, if they're going to rule or not the world. And when we need to identify ourselves, we pick a piece of plastic and start doing like this on camera. And my God, that's so stupid, so stupid. We need to change that. Now, of course, it's identity. Institutions can feel fear. And finding some governments and big institutions as Banco Santander and MACRO that were able of taking the first steps, of course, makes every other one in the place, hey, come on, take a look at those guys. They did it. Why don't we? Alrighty. So that concludes our session. Can you do a quick wrap-up about what you do? Come on, take a look at those guys. They did it. I don't win. All righty. So that concludes our session. Can you do a quick wrap-up about what you do, a key takeaway, maybe a high-level description, and for the audience before we conclude the session? Yeah, of course. I mean, for me, the most important part is if we are going to achieve what we think is the ethos and we feel the ethos of this community, of having decentralized non-permission trust and value being exchanged over the Ethereum network, identity is the way to go. Governments and institutions are the adoption path for going from 600 million to 7, 5, 6, whatever billion people in the next 3, 4 to 5 years. And QuarkID and my team is trying to achieve that or help to achieve that with all of the other players in the industry. Thank you so, so much. Thank you. Please give a round of applause to Diego. That is QuarkID bringing South America...", "eventId": "devcon-7", - "slot_start": 1731557400000, - "slot_end": 1731558000000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1oa41JFQPp-vuRePzM4jYH0K22uvY02iOso74y9q_Ryc", - "resources_slides": null, + "slot_start": 1731556800000, + "slot_end": 1731558600000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1nZf4Y4ZKlAYK_rEfdGkjjq6S4WGbMxpwSUXYgi9pq-M", + "resources_slides": "https://drive.google.com/file/d/1u5k2F3znVW_oBeyBhxBascv83UEbEwQb/view", "speakers": [ - "abhimanyu-shekhawat" + "diego-fernandez" ] }, "vector": [ @@ -610329,8 +608624,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -610862,12 +609155,9 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -611088,6 +609378,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -611116,7 +609407,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -611127,8 +609417,6 @@ 0, 2, 0, - 0, - 0, 2, 0, 0, @@ -611153,9 +609441,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -611174,6 +609464,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -611181,8 +609472,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -611240,6 +609533,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -611275,11 +609569,6 @@ 0, 0, 0, - 2, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -611413,6 +609702,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -611424,6 +609714,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -611523,6 +609814,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -611641,11 +609933,9 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, + 2, 0, 0, 0, @@ -611659,60 +609949,43 @@ }, { "session": { - "id": "quarkid-bringing-south-america-on-chain-with-ssi-and-account-abstraction", - "sourceId": "QXCTMB", - "title": "QuarkID: Bringing South America on-chain with SSI and account Abstraction", - "description": "QuarkID is a Self-Sovereign Identity protocol bringing millions of South American citizens on-chain. Citizens in Buenos Aires, Argentina, Monterrey, and Nuevo Leon, Mexico, are using government SSI deployed on ZKsync Era through the QuarkID protocol. Driver's licenses, birth certificates, and over 50 different credentials are secured by Ethereum technology in the world’s first case of governments using Ethereum’s permissionless blockchain to meet their identity needs.", - "track": "Real World Ethereum", - "type": "Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "reading-before-writing-an-approach-to-brain-interfaces", + "sourceId": "AECBRW", + "title": "Reading Before Writing: An Approach to Brain Interfaces", + "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "2FA", - "Account Abstraction", - "Identity", - "Open Source Software", - "Political systems", - "Politics", - "Public good", - "Use Cases", - "Validiums", - "Zero-Knowledge", - "ZK-EVMs", - "ZKP" - ], - "keywords": [ - "Sovereign" - ], - "duration": 1183, + "tags": [], + "keywords": [], + "duration": 512, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "9a8cb184078eae668de170bf56770a6a31c5bcf2c71b1a6a9fa068ba7a9ae576", + "sources_youtubeId": "LYfOHxvgApA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736bbe99dbb7a90e1323fe9", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736bbe99dbb7a90e1323fe9.vtt", - "transcript_text": " Thanks so much for being here. Let me share with you the... My name is Diego Fernandez, I'm the co-creator of QuarkID. And let me start by showing some hard facts about what QuarkID is. First, it's a self-sovereign identity protocol. It's open source under Apache 2.0 license. It's a digital public good enrolled in the Digital Public Good Alliance. It was launched in October the 21st in Buenos Aires City with 3 million inhabitants, and we onboarded almost 165K users in less than one month. And we have 80 million users in the confirmed pipeline, and that comes from three cities in Argentina and in Mexico, four subnational states, both in Argentina and Mexico, who are doing two pilot projects with national governments in El Salvador and in Argentina. And we have onboarded also two major banks, which is Banco Macro, the second biggest retail bank in Argentina, and Banco Santander, as well as one of the biggest telco companies in America, which is Tomex, with more than 100 million users. Why did Buenos Aires City start the QuarkID journey? Our vision was first governments should pay the friction to create highways of trust because it will bring enormous value to society. And that highways of trust should be open, non-permissioned and available without any government oversight to everyone. And that should give users the ability to decide how, when, with whom and what information to share. And we did this in collaboration between governments and startups. And we found and we discovered that that creates a thriving web-free ecosystem that we believe will bring prosperity to society. What were, in my perspective, the three key decisions that we needed to make? Well, the first, decentralized versus centralized. The second, open non-permission versus private permission. And the third, open source versus proprietary. And this is pretty important because governments, unfortunately, usually choose the downside of these options. Centralized, private and permissioned, and proprietary. And we believe that that sort of waves are starting to change. And of course, we chose decentralized, open permission, and open source. Very briefly, what's QuarkID tech stack? QuarkID again is a protocol which runs on top of the CKSync stack which uses Ethereum as a security layer. But when you are planning to deploy a system to millions of people using decentralized non-permission technology, you face an evil dilemma, which is users use today centralized Web2 applications, which have an amazing UX. And when we migrate to Web3 world, we're very familiar with that, UX is so terrible. Just claiming an NFT requires you to go back and forth in wallets and signing transactions. It's so hard, so hard. UX rules the game. If we don't have a simple approach to UX, so hard, so hard. UX rules the game. If we don't have a simple approach to UX, there will be no adoption. What is the evil dilemma solution that we found? Well, we give users the choice. You can download the open source wallet from GitHub even and compile it yourself, or you can download it from the App Store. It's an open source, non-permission self-custodial wallet. But we implemented QuarkKD technology within the government application, which is running on decentralized railways, utilizing CK-SYNC era and Ethereum, but it's a self-custodial wallet for users which are not so used to deal with this type of technologies. Our way of thinking is, hey, what application will use my 85-year-old mother when she wants to download a new application in her iPhone, she calls me and says, hey, son, this application is safe. That's the level of users that you need to understand that we will be a substantial part of your user base. So we need to give users the choice of having a very simple custodial solution that runs, again, on decentralized railways. Or you can go full decentralized encrypted. That's your choosing, not ours. And it's always a use case. And I think that we as a community need to understand that people don't buy technology. Decision makers in governments and institutions, they don't buy technology. They mostly don't care about technology. They buy use cases that solve real world problems. We need to think, and that's my perspective, as technology, we need to think in technology as a tool, as a means to an end, not an end in itself. From real world decision makers, if we have the use case, which is pretty simple, a policeman, and that is happening in Buenos Aires today, a policeman, instead of asking you for a piece of plastic, shows you a QR code, you scan it with your wallet, and that gives the policeman the proof that you're able to thrive in. For decision makers, for a major, a governor, the president of a bank or whatever, 99% of the decision relies on use case. Of course, we use Ethereum as a security layer, CK-SYNC to provide CK proofs. We have native account of fraction. I will follow up on that because it's pretty important. And users control their information, not institutions. But decision makers will see use case. We love technology. Decision makers make decisions based on use case. And the question is, how do we get billions of people on chain? Today, according to statistics, we have almost 600 millions of people. There are 8 billion people in humanity. How do we solve this problem? I like to think that the Quark ID mission is adoption, adoption, adoption. This is a very famous quote that Steve Ballmer gave once, you know, this champion all over the place, we probably all saw that. And of course he was saying developers, developers, developers, and many of us are developers here and of course developers are crucial. But when Steve Ballmer was saying developers, developers, developers, his company, Microsoft, had 95% of the user base using Windows. So then it's easy. He already had gained adoption. We as a community haven't yet and we need to get there. And in my perspective, and this is kind of radical, we need to try and horse the system. And I don't mean this in a bad way. We need to find a way to gain adoption through the system, not against the system. And I am positive that that Trojan horse, that positive Trojan horse, is self-sovereign identity using account abstraction. And why is that? Because, let me go back there. Account abstraction is crucial because each user, when he gets an identity minted in QuarkID, they are using account abstraction and they have an account in CK-Sync era. It's as straightforward as that. And that enables a lot of use cases. And I was discussing the other day, next year, Buenos Aires City will be piloting the chance of giving subsidies. I mean, the city gives out subsidies to several different things, something like $35 million a year to different type of beneficiaries. Buenos Aires is evaluating the chance of giving subsidies using their identity wallets, using a token. And there is a great advantage when you're dealing with subsidies. And that is being done because of account abstractions. That's a great opportunity. And why are governments so, so important? Because they provide us, or they may provide us, with a reverse adoption life cycle. If governments can't help to create the necessary railways of trust, we as an industry should help governments to achieve this goal. And for me that is crucial. We need to focus on that. We need to understand that there are champions, there are government champions, there are institution champions out there that have shared the same ethos that we share, that believe in a future with self-custodial basis, decentralized, non-permissioned, censorship resistant and they need help from our side and we should provide their help. I firmly believe that a decentralized, non-permission, open-source future is possible. And I'm convinced that we need to do that with governments and with institutions and not against them. And that's our goal. Thanks so much. That was quick, man. I got 10 minutes left. We can do a lot of Q&A. We can definitely do a lot of Q&A. We can definitely do a lot of Q&A. Well, feel free to send all your questions here, but let me go to the first one as well. How does the open source nature of the project impact business model and perhaps monetization? No, there's no business model for Croquet. This is an open source protocol, which is being used by several governments. It's not a business. I see. Going through the top questions, is it possible to interoperate with other protocols? Yeah, definitely. I mean, two things here. Of course, we... Over here. Great, thanks. Sorry. So, of course, we have two things there. First is the W4C standards, which define DIDs, which is Decentralized Identifiers and Verified Credentials. And that gives you a level of interoperability. But, and I'm happy to say this, this is the first time in any DEVCON that we have a specific workshop of self-sovereign identity. And all of us in the industry are meeting today this afternoon in order to precisely work on interoperability with the cash from privado and so on and we think that there's a bright future in doing that. All right next question why Buenos Aires West start there and I can recall that you are planning to expand to Argentina, Mexico and Colombia. Perhaps you talk about the country expansion plans and the different regulation challenges in each jurisdiction as well. I have a very particular view on regulations. First, Buenos Aires is an extremely crypto-friendly city. You have a great pool of talent. Many of the most important projects in the Ethereum ecosystem and in the crypto ecosystem came out from Buenos Aires, just to mention a few. Open Zeppelin, NAMIC Foundation, I don't know, Crecimiento and Aleph and so on. We have very, very important projects coming out from Buenos Aires. Decentraland, one of the founders of Sandbox. I could keep on mentioning a lot of projects. So Buenos Aires has this amazing talent pool. We are so focused in the crypto industry. And when I, as my former role of Secretary of Innovation of the city, made a call out to the startups and to the innovators in Buenos Aires, everyone started collaborating and we came up with this solution which we're really proud of. Okay. Crypto-friendly city. Where else besides Buenos Aires? Well, I mean, Buenos Aires is a crypto-friendly city, of course, and we are working as a community very, very strongly and now Milagros from Crescimiento will share that with you. We think that Buenos Aires and Argentina could become one of the first, if not the first, crypto country. We're struggling to do that, we're sort of working to do that, and of course we'll need your help. All right. Can you describe your level of partnership with other protocols, maybe some strategic planning, maybe other companies that you're looking to collaborate with? Well, I mean, of course, and we shared that yesterday in a panel that we were with Evan and Anthony from Privado ID. Something like three to four years ago, there was basically nobody speaking about identity but Privado and Disco XYZ by that time. And our vision is there are 8 billion people in the world. There's so, so, so much space that we should work together. Okay. There's a question that we'd like to ask. Sure. The answer there is, where are the credentials stored? On-chain or off-chain? If off-chain, if on-chain, how do you handle? Privacy risk. No, not on-chain. Not on-chain. Never, never, ever, never, ever, ever you can put personal information on chain. The technology behind the standards of digital identifiers and verified credentials work this way. What you have anchored on chain is a digital identifier. That's the only thing which is anchored on chain. Credentials are minted and stored off chain. So just to give an example, you have the DID of the City of Buenos Aires or any other government or institution, and you have your own DID. So when you want to mint a new credential, and those are anchored in our case in CK Sinqueira, and when you need to mint a new credential, you ask the issuer to give you that credential. That happens on a peer-to-peer basis using a protocol which is named Ditcom. And what you are doing is you're signing a JSON digitally signed by this DID in favor of this DID. And that is stored in the personal device that the users have in their hand. So when you need to verify that credential, what you do is you check that the credential is valid, that is not new, that is not void and that it was signed by the issuer in favor of the holder. So you rely on on-chain security but the data is store-of-chain. How does QuarkID plan to maintain its technological advantage in the long term? Well, basically, as any other open source protocol, expanding our user base generated interest. The amazing thing is that when you do this again reverse adoption lifecycle and you start to have millions and millions and millions of users, developers start to build on top of it and we have several startups and several companies building on top of the protocol and even expanding it. Just to give an idea, when we were involved in the, this is a very small thing but it's quite nice when we did the the left pop-up city in august the the passport for entering lf was a credential minted in qwerkyd and one amazing guy his name tule he's not, I guess, did this extension of the protocol, connecting it with an Arduino device in order to open a door. So when you access the hub, you scan that, show your credential, and the door got open, which was so nice. And that is, I mean, the community building and developing. Can we squeeze in a question about competition? How is this project different from ProvidoID or PolygonID? I don't know in such a detail the stack of ProvidoID and how they are working now with this Coxyz because they recently merged. But again, we of course believe in competition and we respect each other very much. And as I said before, I don't think... You need to have the technological basics that you need to implement. But this thing is not about technology. This thing uses technology to gain adoption. So what's our focus? Adoption, adoption, adoption. And perhaps on the final question on adoption, since we have a few more minutes remaining as well. Can Argentina's adoption by both its government and citizens serve as a model for other Latin American countries? I wouldn't say serve as a model, but when we're dealing with such border innovations, and the other day I was thinking, isn't there a more obvious case where innovation is needed than identity? We are sending or trying to send rockets to Mars. Elon Musk is catching rockets falling out of space in mid-air. We're discussing artificial general intelligence models, if they're going to rule or not the world. And when we need to identify ourselves, we pick a piece of plastic and start doing like this on camera. And my God, that's so stupid, so stupid. We need to change that. Now, of course, it's identity. Institutions can feel fear. And finding some governments and big institutions as Banco Santander and MACRO that were able of taking the first steps, of course, makes every other one in the place, hey, come on, take a look at those guys. They did it. Why don't we? Alrighty. So that concludes our session. Can you do a quick wrap-up about what you do? Come on, take a look at those guys. They did it. I don't win. All righty. So that concludes our session. Can you do a quick wrap-up about what you do, a key takeaway, maybe a high-level description, and for the audience before we conclude the session? Yeah, of course. I mean, for me, the most important part is if we are going to achieve what we think is the ethos and we feel the ethos of this community, of having decentralized non-permission trust and value being exchanged over the Ethereum network, identity is the way to go. Governments and institutions are the adoption path for going from 600 million to 7, 5, 6, whatever billion people in the next 3, 4 to 5 years. And QuarkID and my team is trying to achieve that or help to achieve that with all of the other players in the industry. Thank you so, so much. Thank you. Please give a round of applause to Diego. That is QuarkID bringing South America...", + "sources_streamethId": "67357a999dbb7a90e1d8dcbc", "eventId": "devcon-7", - "slot_start": 1731556800000, - "slot_end": 1731558600000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1nZf4Y4ZKlAYK_rEfdGkjjq6S4WGbMxpwSUXYgi9pq-M", - "resources_slides": null, + "slot_start": 1731557160000, + "slot_end": 1731557580000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1yeFg5w90FisDwxUH5GvlEr2tT53GBDkWeBVcOZI2p7c", + "resources_slides": "https://drive.google.com/file/d/1JspEHBhgNXpk5SWCNFeXr4DLW63ZWIXF/view", "speakers": [ - "diego-fernandez" + "mackenzie-dion" ] }, "vector": [ 0, + 6, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -612469,7 +610742,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -612506,9 +610778,7 @@ 0, 0, 0, - 2, 0, - 2, 0, 0, 0, @@ -612532,11 +610802,9 @@ 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, @@ -612555,7 +610823,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -612563,10 +610830,8 @@ 0, 0, 0, - 2, 0, 0, - 2, 0, 0, 0, @@ -612624,7 +610889,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -612794,7 +611058,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -612806,7 +611069,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -612906,27 +611168,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -613022,14 +611263,41 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, 2, 0, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -613042,43 +611310,58 @@ }, { "session": { - "id": "reading-before-writing-an-approach-to-brain-interfaces", - "sourceId": "AECBRW", - "title": "Reading Before Writing: An Approach to Brain Interfaces", - "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "", - "audience": "Engineering", + "id": "reading-ethereums-tea-leaves-with-xatu-data", + "sourceId": "LGXA3Q", + "title": "Reading Ethereum's Tea Leaves with Xatu data", + "description": "Demonstrate how we collect data from the Ethereum network and how it's used for upgrades, research, and analytics. We'll then run through some examples of how to use the tools and public datasets yourself.", + "track": "Core Protocol", + "type": "Workshop", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 512, + "tags": [ + "Layer 1", + "Consensus", + "Testing", + "observability", + "Consensus", + "Layer 1", + "Testing" + ], + "keywords": [ + "Data", + "Analysis", + "Observability" + ], + "duration": 3344, "language": "en", - "sources_swarmHash": "9a8cb184078eae668de170bf56770a6a31c5bcf2c71b1a6a9fa068ba7a9ae576", - "sources_youtubeId": "LYfOHxvgApA", + "sources_swarmHash": "223c4c92e673157087235386107c1c6f5cfca28a376877be1a72c3d4a7f311a9", + "sources_youtubeId": "MKZ7tFBMrsk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67357a999dbb7a90e1d8dcbc", + "sources_streamethId": "67358b949dbb7a90e19ee9d9", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67358b949dbb7a90e19ee9d9.vtt", + "transcript_text": " Hey, good morning everyone. Thank you for coming to our workshop today. Before we start, if you have any questions just throw your hand up. It'll be pretty relaxed, so yeah. Just look into the workshop overview. Introduction, that's us right now. We're going to go into Xar2 Genesis, see sort of how it happened, why it happened, and then we'll look into the data sets that have emerged since. We'll then progress into how we're using the data, how others are using the data. Leo over here will then present. He's from the Megalabs team. He'll present the big blocks test that happened last year, and then we'll have a live tutorial from Tony. He'll go through how he uses Paisa to run his analysis. Sweet. So a little intro on who we are. I'm Sam. That's Andrew. He's hiding. We've both been doing DevOps on the PandaOps team for the last few years. And we both have a deep appreciation for things like Ethereum and observability. As for our team, ETH Panda Ops, we started out in 2021, being thrown straight into the deep end with the merge. We're embedded in the F and do DevOps for the protocol. And we post blogs semi-frequently on our website. We try to keep them really high signal. They're the best way to keep up to date with us. Scan the QR code and it'll jump you straight there. Our team has a pretty wide range of projects cooking at all times. You've probably come across a couple of them. For example, if you've ever run a node, you've probably checkpoint synced from an endpoint that was running checkpoint Z. You may have also seen Parry and Barnabas whipping core devs in the core dev calls. Yeah, there's a lot of stuff going on. So let's move on to the workshop. To set the scene, it's late 2022 to the workshop. To set the scene, it's late 2022 and the merger's just happened. We've switched from proof of work to proof of stake, but in doing so, our consensus mechanism has become a lot more sensitive to time. Suddenly the when of things happening has become a lot more important. And this data at a global and network level doesn't really exist. It's easy to check that a block was seen, but it's much harder to see when that block was actually seen in Sydney compared to Berlin, for example. And now that we're clear from the merge, researchers start hacking away. They want to upgrade the beacon chain. But yeah, they need this timing data to validate their ideas. And they start capturing it themselves. Varying scales, different implementations. It's hard to expose, it's hard to validate. There's a bit of potential errors in there. So we started to brainstorm ideas on how to solve our problems. We needed to somehow integrate with existing beacon node implementations as neither of us were really too keen to implement a full beacon node. As DevOps engineers, we'd usually just implement a few Prometheus metrics, put the feet up and call it a day. But millisecond-level precision is pretty important, so that rules out Prometheus metrics. Log aggregation was also another option, but it's definitely a moving target. These things change all the time. It's not really something that the client devs really pay too much attention to. And we'd also just have to turn on debug-level logging. We'd be throwing the kitchen sink at our log aggregation pipeline, and it would potentially be an unreliable result anyway. So that rules out logs. So we started to look at other options. Turns out that the beacon API has this thing called the event stream. You can subscribe to it, and when the beacon node sees things or does things, it will emit an event. So blocks, attestations, voluntary exits, everything. It was all there. The beautiful thing is that the beacon node implementation all supported this endpoint in a standardized fashion. So what we landed on was Zatu. We used Go, gRPC, and we thought that it would be responsible for just collecting Ethereum timing data. It definitely wasn't going to be trying to store or query that data, but the plan was to derive events and ship them somewhere else. To do this, we initially created two modules. Zatu's server would create events from other modules, would collect events from other modules and send them somewhere else, and Zatu's Sentry was our first module. It would run as a sidecar next to every beacon node, modules and send them somewhere else. And Zartu Sentry was our first module. It would run as a sidecar next to every beacon node, subscribe to events, and send them off to Zartu server. We wanted to make sure that all of the events followed the same structure so that it was much easier to add new events into the future. And also, really importantly, since this is like a distributed system, we wanted to make it clear how much you could trust the data. So data coming from a client is not necessarily trusted, but if it's been derived by Xar2's server, something that we control, maybe you can trust it a bit more. This example event is for one of our Xaru sentry nodes running on mainnet, subscribing to a beacon node, and a new block has just come in. I've redacted a couple of the fields, but yeah, that's the general idea. That's all great, but we still hadn't really solved where to send the data. And it turns out it was a lot of data.", "eventId": "devcon-7", - "slot_start": 1731557160000, - "slot_end": 1731557580000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1yeFg5w90FisDwxUH5GvlEr2tT53GBDkWeBVcOZI2p7c", - "resources_slides": null, + "slot_start": 1731555000000, + "slot_end": 1731560400000, + "slot_roomId": "classroom-d", + "resources_presentation": "https://docs.google.com/presentation/d/1Ii_t0zNEsYz1aRQml-w9fPgG3GbBAXs49o3KIFZpdCM", + "resources_slides": "https://drive.google.com/file/d/1eCQVPBN0ksItKaKScucOWbSIZTH7q2qs/view", "speakers": [ - "mackenzie-dion" + "toni-wahrstatter", + "andrew-davis", + "sam-calder-mason", + "leo-bautista-gomez" ] }, "vector": [ - 0, - 6, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -613130,6 +611413,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -613535,6 +611819,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -613614,6 +611899,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -613824,6 +612110,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -613833,6 +612120,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -614061,6 +612349,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -614265,6 +612554,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -614376,20 +612666,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, 2, 0, 0, @@ -614402,68 +612683,57 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "reading-ethereums-tea-leaves-with-xatu-data", - "sourceId": "LGXA3Q", - "title": "Reading Ethereum's Tea Leaves with Xatu data", - "description": "Demonstrate how we collect data from the Ethereum network and how it's used for upgrades, research, and analytics. We'll then run through some examples of how to use the tools and public datasets yourself.", - "track": "Core Protocol", - "type": "Workshop", + "id": "realigning-with-ethereum-from-l1-to-l2", + "sourceId": "PSSQCK", + "title": "(Re)aligning with Ethereum: From L1 to L2", + "description": "In this round table, Justin Drake and Marek Olszewski will explore the rational and tangible pros and cons of (re) launching an Ethereum L2. They will explore the why and how of launching an Ethereum L2 from a technical and ecosystem perspective.", + "track": "Layer 2", + "type": "Panel", "expertise": "Intermediate", - "audience": "Research", + "audience": "Product", "featured": false, - "doNotRecord": false, + "doNotRecord": true, + "keywords": [ + "Transition", + "Ethereum Allignment", + "EVM" + ], "tags": [ "Layer 1", - "Consensus", - "Testing", - "observability", - "Consensus", + "Layer 2s", + "Values", + "EVM", "Layer 1", - "Testing" - ], - "keywords": [ - "Data", - "Analysis", - "Observability" + "Layer 2s", + "Values" ], - "duration": 3344, "language": "en", - "sources_swarmHash": "223c4c92e673157087235386107c1c6f5cfca28a376877be1a72c3d4a7f311a9", - "sources_youtubeId": "MKZ7tFBMrsk", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "67358b949dbb7a90e19ee9d9", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67358b949dbb7a90e19ee9d9.vtt", - "transcript_text": " Hey, good morning everyone. Thank you for coming to our workshop today. Before we start, if you have any questions just throw your hand up. It'll be pretty relaxed, so yeah. Just look into the workshop overview. Introduction, that's us right now. We're going to go into Xar2 Genesis, see sort of how it happened, why it happened, and then we'll look into the data sets that have emerged since. We'll then progress into how we're using the data, how others are using the data. Leo over here will then present. He's from the Megalabs team. He'll present the big blocks test that happened last year, and then we'll have a live tutorial from Tony. He'll go through how he uses Paisa to run his analysis. Sweet. So a little intro on who we are. I'm Sam. That's Andrew. He's hiding. We've both been doing DevOps on the PandaOps team for the last few years. And we both have a deep appreciation for things like Ethereum and observability. As for our team, ETH Panda Ops, we started out in 2021, being thrown straight into the deep end with the merge. We're embedded in the F and do DevOps for the protocol. And we post blogs semi-frequently on our website. We try to keep them really high signal. They're the best way to keep up to date with us. Scan the QR code and it'll jump you straight there. Our team has a pretty wide range of projects cooking at all times. You've probably come across a couple of them. For example, if you've ever run a node, you've probably checkpoint synced from an endpoint that was running checkpoint Z. You may have also seen Parry and Barnabas whipping core devs in the core dev calls. Yeah, there's a lot of stuff going on. So let's move on to the workshop. To set the scene, it's late 2022 to the workshop. To set the scene, it's late 2022 and the merger's just happened. We've switched from proof of work to proof of stake, but in doing so, our consensus mechanism has become a lot more sensitive to time. Suddenly the when of things happening has become a lot more important. And this data at a global and network level doesn't really exist. It's easy to check that a block was seen, but it's much harder to see when that block was actually seen in Sydney compared to Berlin, for example. And now that we're clear from the merge, researchers start hacking away. They want to upgrade the beacon chain. But yeah, they need this timing data to validate their ideas. And they start capturing it themselves. Varying scales, different implementations. It's hard to expose, it's hard to validate. There's a bit of potential errors in there. So we started to brainstorm ideas on how to solve our problems. We needed to somehow integrate with existing beacon node implementations as neither of us were really too keen to implement a full beacon node. As DevOps engineers, we'd usually just implement a few Prometheus metrics, put the feet up and call it a day. But millisecond-level precision is pretty important, so that rules out Prometheus metrics. Log aggregation was also another option, but it's definitely a moving target. These things change all the time. It's not really something that the client devs really pay too much attention to. And we'd also just have to turn on debug-level logging. We'd be throwing the kitchen sink at our log aggregation pipeline, and it would potentially be an unreliable result anyway. So that rules out logs. So we started to look at other options. Turns out that the beacon API has this thing called the event stream. You can subscribe to it, and when the beacon node sees things or does things, it will emit an event. So blocks, attestations, voluntary exits, everything. It was all there. The beautiful thing is that the beacon node implementation all supported this endpoint in a standardized fashion. So what we landed on was Zatu. We used Go, gRPC, and we thought that it would be responsible for just collecting Ethereum timing data. It definitely wasn't going to be trying to store or query that data, but the plan was to derive events and ship them somewhere else. To do this, we initially created two modules. Zatu's server would create events from other modules, would collect events from other modules and send them somewhere else, and Zatu's Sentry was our first module. It would run as a sidecar next to every beacon node, modules and send them somewhere else. And Zartu Sentry was our first module. It would run as a sidecar next to every beacon node, subscribe to events, and send them off to Zartu server. We wanted to make sure that all of the events followed the same structure so that it was much easier to add new events into the future. And also, really importantly, since this is like a distributed system, we wanted to make it clear how much you could trust the data. So data coming from a client is not necessarily trusted, but if it's been derived by Xar2's server, something that we control, maybe you can trust it a bit more. This example event is for one of our Xaru sentry nodes running on mainnet, subscribing to a beacon node, and a new block has just come in. I've redacted a couple of the fields, but yeah, that's the general idea. That's all great, but we still hadn't really solved where to send the data. And it turns out it was a lot of data.", - "eventId": "devcon-7", - "slot_start": 1731555000000, - "slot_end": 1731560400000, - "slot_roomId": "classroom-d", - "resources_presentation": "https://docs.google.com/presentation/d/1Ii_t0zNEsYz1aRQml-w9fPgG3GbBAXs49o3KIFZpdCM", - "resources_slides": null, "speakers": [ - "toni-wahrstatter", - "andrew-davis", - "sam-calder-mason", - "leo-bautista-gomez" - ] + "justin-drake", + "marek-olszewski", + "david-hoffman" + ], + "eventId": "devcon-7", + "slot_start": 1731488400000, + "slot_end": 1731492000000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1JF1fLnBMiSF5FSuifcPd7xXZqFJpC793NAwW7MxdqhM", + "resources_slides": "" }, "vector": [ 0, 0, 0, 0, - 6, - 0, 0, 0, 0, + 6, 0, 0, 0, @@ -614511,7 +612781,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -614890,6 +613159,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -614918,7 +613188,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -614997,11 +613266,11 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -615211,7 +613480,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -615272,6 +613540,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -615329,6 +613598,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -615411,6 +613681,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -615450,10 +613721,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -615656,7 +613923,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -615774,10 +614040,10 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, 0, 0, 0, @@ -615791,41 +614057,54 @@ }, { "session": { - "id": "realigning-with-ethereum-from-l1-to-l2", - "sourceId": "PSSQCK", - "title": "(Re)aligning with Ethereum: From L1 to L2", - "description": "In this round table, Justin Drake and Marek Olszewski will explore the rational and tangible pros and cons of (re) launching an Ethereum L2. They will explore the why and how of launching an Ethereum L2 from a technical and ecosystem perspective.", + "id": "realizing-the-rollup-centric-roadmap-with-rollup-boost", + "sourceId": "YRTHKH", + "title": "Realizing the Rollup Centric Roadmap with Rollup-Boost", + "description": "L2s are the future, but they're also the past. At this point it's clear that your phone is most likely an L6. Let's examine the feedback loops between L1, L2, and beyond and form community standards around multiprovers, distributed block building, inclusion guarantees and more that feed back into L1.", "track": "Layer 2", - "type": "Panel", + "type": "Talk", "expertise": "Intermediate", - "audience": "Product", + "audience": "Engineering", "featured": false, - "doNotRecord": true, - "keywords": [ - "Transition", - "Ethereum Allignment", - "EVM" - ], + "doNotRecord": false, "tags": [ - "Layer 1", - "Layer 2s", - "Values", - "EVM", - "Layer 1", - "Layer 2s", - "Values" + "Architecture", + "Protocol Design", + "Scalability", + "Appchains", + "Decentralization", + "User Experience", + "MEV", + "pre-confirmations", + "Appchains", + "Architecture", + "Decentralization", + "MEV", + "Protocol Design", + "Scalability", + "User Experience" ], - "language": "en", - "speakers": [ - "justin-drake", - "marek-olszewski", - "david-hoffman" + "keywords": [ + "Preconfirmations" ], + "duration": 1514, + "language": "en", + "sources_swarmHash": "1c7523d53da2a837574c9984cb490286cc347f860d0cd7fa94d06b7d23bd592a", + "sources_youtubeId": "IYZiYFzIzKc", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673980981b0f83434d1be72e", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673980981b0f83434d1be72e.vtt", + "transcript_text": " Tanya Cushman Reviewer Reviewer Hello, hello. Awesome, wow. Cool to see so many people turn out to discuss the Rollup-centric roadmap. It's a pretty full house, so I'm going to have to ask everyone to really just squish in. I think there's a lot of people standing in the back for anyone watching the live stream. Cool. Well, let's get into it. I'm Dan, also DMARS online, and I'll be talking about how we can turbocharge the rollup centric roadmap with Rollup Boost. I'll be using a lot of triple R alliteration. If you see any of those, talk to me afterwards and I'll give you a special prize. Cool. So this will be structured in three parts. I'll go over a little reflection on the rollup centric roadmap. I'll then talk about what a layer two technology tree looks like and then I'll discuss talk about what a Layer 2 technology tree looks like, and then I'll discuss how we can use Rollup Boost to accelerate innovation on the Layer 2 technology tree. Cool. So let's get into the first section. So, yeah, we're four years into the Rollup-centric roadmap. If you're unfamiliar with what that is, the Rollup-centric roadmap was Ethereum's strategic shift towards scaling the network by embracing rollups as a primary solution. If you're unfamiliar with rollups as well, they are a way which we can process transactions off-chain and submit compressed proofs to Ethereum mainnet, which allows us to increase throughput of these off-chain components without necessarily sacrificing the decentralization of the underlying layer. In this vision, Ethereum becomes a more robust and secure settlement layer while most activity happens on these layer two roll-ups and we've already seen that a lot of activity has shifted to a lot of layer two rollups. Yeah, and there's a lot of benefits of this, and we'll be going into a lot of details. But one of the biggest benefits in my opinion, is that we can innovate on a separate layer than the layer one, which doesn't need to go through the entire core dev process or have tons and tons of research and formal analysis before we ever actually start to create a proof of concept or ship something. Because a lot of the times when you are trying to build innovative features, you typically don't discover... There's a lot of unknowns, unknowns you don't discover until you start building. And we've seen many such cases of that with MEV on Ethereum. Yeah, cool. So let's go into it. So we're four years in. How are we doing? Well, we had roughly three goals. One, increase the transaction per second of Ethereum. Two, reduce transaction costs. And three, outsource innovation to the layer two ecosystem. So let's go through each of those. First, how are we doing on TPS? Well, if you look at L2B, we have scaled Ethereum 26x in transactions per second, which is no small feat. That's more than double per year since we've announced the rollup centric roadmap. So that's pretty awesome. We can check that off. Oh, and on the way to scaling transactions per second, we also discovered maybe that's not the best metric, as there's a lot of activity on chains, which aren't just, which can't just be basically categorized via a transaction so a lot of chains simply just have a transfer and that's like very simple it only touches the part of the chain associated with your balance and someone else's balance but when you look at more complex activity it could be touching hundreds or thousands of accounts doing very complicated computation and so we also started to look at GPS as a metric, which is gas per second. And this is a bit small, but if you can see on conduits, rollup.wtf, we have increased the gas per second by almost 55x compared to the Ethereum layer one. So even more great progress. Cool. So we've checked that off. What about transaction costs? They should be lower just because we're also scaling. And yeah, the slide basically speaks for itself. Most of the top roll-ups are less than a cent sometimes to transact on. Cool. And so the last one, which is much more complicated and will be the segue into the rest of this talk, is how are we doing on outsourcing of innovation to the Layer 2 ecosystem? And I think this is really important because with the roll-up-centric roadmap, we essentially said, hey, everyone focusing on Layer 1, you keep doing that, but we are going to push the boundaries and the next version of Ethereum will be built on layer twos. And so it's really important that that actually succeeds. And we as a community lended all of our credibility to these layer twos. And so, you know, we should be checking in on how they're doing. So it's a little bit complicated to look at how innovation is transferring between these layers, but there's roughly three ways we can look at it. It's innovations ported from the layer one to the layer two, innovations from the layer two back to the layer one, and then innovation between the layer twos. And so we'll go through each of them. So yeah, what's been ported from layer one to layer two? Well, basically everything. Most of the popular blockchain clients, layer two blockchain clients, with the exception of some, actually use a minimized, a version of the Ethereum one blockchain clients that has like a very minimum diff to it. And so this means anytime there's an upgrade to the layer one client, layer twos can easily pull it in. You can see this with chains like Optimism where after EIP 1559 went live, they had it very soon after and all other upgrades. But this is not true for all chains. Layer two to layer one. Basically nothing. If you know of any improvements to the layer 1 that have been made, because as a result of research from layer 2s, please let me know. But there really haven't been any EIPs driven through the layer 2 process that have gone into the layer 1. There is one large notable exception that ZK proving and zk technology has like massive massively been accelerated by the fact that layer twos have poured tons of research in usually through the support of a token but again none of that has actually gone live on the layer one yet and then layer two to layer two there's almost nothing we are now starting to see some collaboration. I think I just saw the first cross L2 EIP slash roll-up improvement proposal like yesterday, which is pretty cool, but it was only two roll-ups. So we still have a lot of room to go. Yeah. And I think that just brings me to the sort of the main point of this is we have a lot of flourishing gardens on layer 2 ecosystems. But because of coordination failure and the lack of incentives for these layer 2s to collaborate with each other, we've essentially had coordination failure where we haven't been transferring innovation between each other. And this is my chat GPT rendition of the scenario. We have a lot of awesome spaces to do cool things, but none of them are really helping each other out. Yeah, so that's like a pretty serious concern because this is the future of the ecosystem and the future how can this be the future of the ecosystem if none of these participants are talking to each other? So I think we can start debugging a few things. One is peripheral tooling lock in. And so a lot of layer twos have complained that they can't do anything very customizable or unique because they need to satisfy the existing wallet API standards, the existing indexers. If you spin up a new type of block, now integrating with BlockScout or Etherscan is suddenly much harder. All of the tooling, like the SDKs that front-end developers use to integrate with these chains also become much harder. And on top of that, you know, it's one thing for a layer to modify the tooling to make it work with, like, say, a popular SDK. But then now the maintainers of that SDK need to also maintain this modified version of the SDK. And so the incentives also aren't great there. And what I think is potentially another reason for this is what I call an innovation death loop because of tokens and tribalisms. I think it roughly goes like people launch a token, tokens tie the incentives of groups and individuals together. These shared incentives create the appearance of in-groups and out groups and out groups which you can see on Twitter when we have a roll up teams fighting against each other about very small details and then going separate ways and not creating standards to address these differences. That then leads to all of our tooling being for a specific L2. Yeah, and I think because of this tribal rivalry, innovations are gatekept. And even the extreme of this is it manifests itself in restrictive licenses. Some of the most popular L2s today, you're not even allowed to fork or you are allowed to use them, but you need to give a certain percentage of sequencer fees back to basically the creators of the chain. And I get that incentives are very hard in open source ecosystems, but I think we should hold ourselves to a higher standard because we gave our credibility to these layer twos. Yeah. So I think for the rest of this talk, I'll identify four key areas to help improve Ethereum's innovation on layer twos. And I hope that next year someone or in two years, someone will give a talk addressing one of these areas as I think they're critical. Cool. So let's go into a layer two tech tree. If you're unfamiliar with what a tech tree is, it's, you know, you can see them in strategy games, a tech tree or a research tree is a hierarchical visual representation of the possible sequences or upgrades a player can unlock. You can think of this in civilization games where if you want to reach the iron age, you need to be able to mine iron, you need to be able to melt it, you need to be able to do that at scale. And there's another, so there's an institute called the foresight institute which solely is focused on basically creating these tech trees and then allocating funding to areas of the tech tree that are underdeveloped. And here's an example of one which is very fun of a longevity tech tree where one of the top things is body replacement and in order to to do that, we need head transplants, we need brain tissue replacement, we need organ replacement. And so we set out to do this for Ethereum layer 2 to try and help us get to the golden age of layer 2s. So this is the tech tree. I'm going to go through every single leaf. No, just kidding. There's way too much. If you want to follow along, there is a URL and a QR code you can scan to reach it. If this is very interesting to people, we may end up turning this into a website that you could easily modify and add to. Cool. So let's get into it. So this is the high-level tech tree. There's four subtrees in here. There's funds are safe. This deals with the safety properties of the roll-up in that, you know, basically if the roll-up were to go away, if it was to die, if the software was to go down, we want to make sure that the funds inherit the liveness of the layer one. There's web to scalability. And so the idea here is that we want to approximate the holy grail of everything on one computer. But obviously, we can't put everything on one computer. Don't let anyone let you think that. And yeah, it should be as easy as you going to a cloud provider and hitting the auto scale button. And now you have compute for all of your use cases as long as you keep putting money in or as long as there's someone funding this software, then it should be able to grow and have abundant compute space. A big requirement of that is seamless interoperability. That means you don't need to think about the fact that you're on one chain and your assets are on one chain, but you want to do compute on another. And then another one is also like plentiful compute environment. So we don't want to just have the EVM. There's, you know, by developers, the largest ecosystem in the world is JavaScript. There's like hundreds of millions of JavaScript developers. There's like only a couple million Solidity developers. So if we really want wanna reach the next billion, we need to expand. Cool, so let's get into it. I'm gonna go into the left side of this funds is safe subtree first. And then I'm gonna focus on the censorship resistance portion of liveness. And so under that, under liveness, there is censorship resistance and ledger progression. Ledger progression is just the idea that the blocks keep coming. They keep going. The system doesn't go down. And censorship resistance is really about getting either inclusion guarantees or execution guarantees. And so some stacks actually already have a very basic version of inclusion guarantees where you can force include your transaction on the L2 from the L1. The only problem with that is it's actually at a greater than the L2 or even L1 block time granularity. So you only get the guarantee that your transaction will be included in the, like, you know, like in the epoch of L2 blocks, which could be, you know, tens, hundreds of blocks. Yeah, and so there's a lot of areas here. As well, a very popular topic is multiple concurrent proposers on Ethereum L1 right now, as well as pre-conformations, encrypted mempools. These are all things we need to improve these inclusion and execution guarantees, but we actually have no one working on them on L2s. And specifically, no one is creating shared standards across L2s to enable these. So I think, yeah, I think there's a heroic opportunity here where you can just skip the politics. Just go build multiple concurrent proposers on a layer two. Yeah, you can also experiment with fossil other censorship resistance research proposals, as well as encrypted mempools. Cool, the next part of the tree, the right side of this funds is safe, is under validity. And this one is also very exciting. Yeah, cool. So under validity, we developed a multi-prover. We developed this term, multi-prover. It's been a known term. But the idea is no rollup will likely ever trust a single proving system. And so the idea here is rollups want a proving system so that they can have faster finality. Right now most rollups are optimistic rollups, meaning they get pushed to the L1, and it takes some amount of time, which is very annoying to users, for someone to say, like, there's no fraud here. This state transition was good. You didn't have your funds stolen. But validity proofs allow us to be confident in that state transition. And so the thing is, a lot of people have developed validity proofs, but none of these teams are talking to each other. And on top of that, it's very unlikely that we will ever trust just a single proving system because of how new this crypto and technology is. Some of these proving systems are 80K lines of circuit code, and good luck auditing that. So I think probably for the next two to four years, we will not trust a single proving system, so we need multiple. And under that family, there are ZK proofs, and then also very recently, there's been a lot of teams developing TE proofs. One specifically is we've been collaborating with Automata to develop TE validity proofs, and the performance is looking pretty good, actually. We've been able to prove in TDX in 0.2 milliseconds specific blocks, and so we think this should be an additional layer of proving. But yeah, I think the main thing here is there's a lot of teams, and none of them are talking to each other. So we need a standardized interface to align proof system development teams under one multi-prover. And also we need this to enable prover markets so that anyone is able to provide these proofs for these layer twos. Cool. We'll get into the next part of this tech tree, which is Web2 scalability. As I said, the idea here is you want auto-sc scaling on these servers. You don't want to think about it. I don't want to, you know, get woken up by DevOps at 2 a.m. and like, oh, we need more capacity. And I need to go like manually deploy script to launch another chain. And, you know, all of the configurations associated with that. But in order to get there in this world where we could have multiple chains for a single application, we need seamless interoperability and this is just a massive mess. There are so many things here. This tree is broken up into out of cluster and in cluster. The idea here is that stacks like Optimism are creating a cluster, meaning they have multiple roll-ups that have some type of shared settlement and shared infrastructure and governance that allow them to make assumptions which make it easier for them to communicate with each other. And this doesn't mean you can't communicate with the outside world, but what it does mean is if I'm developing an application and I have a neighbor that I need to interact with frequently, I can put the walls down a little bit and transact with them more easily. And then if I need to call out to some more advanced or some other specialized use case, then I can just go through the regular Internet. Yeah. So that's out of cluster and in cluster. I think honestly the biggest thing here is literally just getting the layer to use to talk to each other. This doesn't even need to require, like you don't need to develop anything for this opportunity or spec anything. Just go talk to all of them, figure out what they want, write that down somewhere. That would already be a huge improvement. And then lastly I'll go through plentiful compute environments. And so this one's pretty fun I think because recently the Ethereum Foundation has launched a Manhattan project to formally verify a RISC-V VM. And so I think that's something you could easily do on like top of the OP stack or any other stack is start experimenting with different VM types. On top of that we could be experimenting with other privacy-enhanced computation as well, such as like FHE VMs, ZK VMs. There are some experimenting, but there's no open standards. They're all either partially closed source or in a very obfuscated code base that you need to be like an engineer on the team to understand. Cool. And so that's opportunity number four, experimenting with virtual machines on open source and open license. Don't be fooled by open source being the only requirement. Some are open source, not all of them are open license. And two popular ones here are like RETH and RollupGeth. Cool, so those are the four areas of improvement I think we can make today is experiment with inclusion and execution guarantees, build a multi-prover interface, help coordinate interop standards and build open source and open license code bases with different VMs. And so I'll get into the last section on rollup boost. So this is something this is a product we've been developing at Flashbots. It's open source, open license. And so I'll start with the basics. So in Ethereum, there is something called the engine API. It's how the consensus layer and the execution layer talk to each other. In order to be like maximally Ethereum compatible, you actually also ideally are using this engine API in your layer two architecture. And so, yeah, the ideal scenario is that we also use this in the CL and EL, and there are some stacks that do. And so we at Flashbot developed the software to just sit in between the CL and the EL. It uses the additional API that these two clients use to talk to each other, and we're able to send these requests between each other. We're able to proxy them to other components in the ecosystem. The example we've built today is a block builder, so you can add customizations to your chain, like revert protection or faster pre-confirmations, all through this block builder so you can add customizations to your chain like revert protection or faster pre-conformations all through this block builder. But in the future, we also see this component as being critical in outsourcing proof production to like prover markets and even multiple different types of prover markets. So yeah, I think the really cool thing about this is you don't need to fork the OP stack or the other stacks you're dealing with. If they use the engine API, this works out of the box and you can start adding customizations to the chain. And it's permissionless to innovate. And I think if we're able to innovate without these, like, you know, if I go to the repo and I don't need to sign a contract to advance the code base, and the code base is also maximally compatible with the layer one code bases, then this is like how we recreate this innovation loop. Yeah, also on top of this, Rollup Boost is powering the latest Unichain launch. And then on top of that, WorldCoin also recently announced that they're using it to enable a new type of block building algorithm that prioritizes humans. So there's already a ton of innovation happening in the space on Rollup Boost. And I think it's going to be one of the new innovation hubs for Ethereum Layer 2. So if you see me later, chat with me how we could build CR committees, multi-chain block builders, and multi-provers on Rollup Boost today. Thanks. All right. Thank you so much, Dan, for a very thorough presentation. We do have a few minutes to answer maybe a couple of questions. Oh, yeah. So let's start with, has it ever happened that innovation at Rollup or Layer 2 was later on adopted on Layer 1? I don't know of any scenarios this has happened. happened at innovation at roll-up or layer two was later on adopted on layer one? I don't know of any scenarios this has happened. So yeah, I think that's the big problem right now. Great. All right. Next is, what do you think about the fractional liquidity from L1 to L2? Would that be the main issue? Interesting. Fractional liquidity. Yeah, I guess it's like, yeah, the idea of fragmentation. So if my liquidity is on one roll-up, it doesn't work on the other roll-up. I think if we solve the seamless interoperability subtree, then you won't even notice this. All right. We do have still a few more minutes. We'd like to answer, does Arbitrum need a boost? Does Arbitrum need a boost? I think they're boosting time is what I've heard, which is a joke. I think they have an algorithm called time boost. I don't totally know what this question means. Okay. Let's see. What is next? Is it this? What about new innovations on L2s? Not transferred. Yeah, there are some cool innovations on Layer 2s. I think, as I mentioned, like ZK, ZK tech is like by far the coolest. Yeah, there are some people deploying like Solana, like the SVM and their architecture as a layer too, which I also think is cool. I think we should keep pushing the window on different blockchain clients and architectures on layer two. All right, last few seconds. What do you think of Solana? What do I think of Solana? Solana's pretty cool. I think Solana is what it looks like if you try to optimize a blockchain client for performance to the max. But I don't think it's what a blockchain client that optimizes for innovation looks like. Interesting take. And one last is, what is Rollup Boost? Okay, what is Rollup Boost? It is a sidecar you can use on Layer 2 blockchain clients to enable features without having to fork the underlying blockchain stack. Amazing. Thank you so much, Dan. And please give him a round of applause.", "eventId": "devcon-7", - "slot_start": 1731488400000, - "slot_end": 1731492000000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1JF1fLnBMiSF5FSuifcPd7xXZqFJpC793NAwW7MxdqhM" + "slot_start": 1731479400000, + "slot_end": 1731481200000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1B_rCk0bkXtF-tfbBfcDeRBqZxjx4AKThyOjuNnKCVhw", + "resources_slides": "https://drive.google.com/file/d/1i_7sRiZ93vorEEhDkZSPbBS4rflcWKoz/view", + "speakers": [ + "daniel-marzec" + ] }, "vector": [ 0, @@ -616262,8 +614541,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -616372,11 +614649,10 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -616581,6 +614857,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -616624,12 +614901,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -616685,6 +614957,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -616695,6 +614968,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -616703,7 +614977,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -616722,6 +614995,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -616786,7 +615060,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -616912,6 +615185,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -617146,11 +615420,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -617164,53 +615438,45 @@ }, { "session": { - "id": "realizing-the-rollup-centric-roadmap-with-rollup-boost", - "sourceId": "YRTHKH", - "title": "Realizing the Rollup Centric Roadmap with Rollup-Boost", - "description": "L2s are the future, but they're also the past. At this point it's clear that your phone is most likely an L6. Let's examine the feedback loops between L1, L2, and beyond and form community standards around multiprovers, distributed block building, inclusion guarantees and more that feed back into L1.", - "track": "Layer 2", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "reclaiming-our-dollar8-billion-funding-public-goods-with-stablecoin-profits", + "sourceId": "UCFEEN", + "title": "Reclaiming our $8 billion: funding public goods with stablecoin profits", + "description": "Ethereum is stuck in a dark deal with two companies. They control ~all stablecoins; facilitate 49% of DEX swaps; and can overrule all future hardforks:\r\n\r\nCircle & Tether.\r\n\r\nIn return, they reap $7.4B in stablecoin earnings (2023).\r\n\r\nBut wait—that’s the interest on OUR money! We should be in control.\r\n\r\nGiving to holders is illegal, but funding public goods isn’t.\r\n\r\nIf we coordinate, we can switch to nonprofit stablecoins and reclaim billions for eg Protocol Guild, R&D, DeFi infra, OSS—or other causes.", + "track": "Coordination", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Architecture", - "Protocol Design", - "Scalability", - "Appchains", - "Decentralization", - "User Experience", - "MEV", - "pre-confirmations", - "Appchains", - "Architecture", - "Decentralization", - "MEV", - "Protocol Design", - "Scalability", - "User Experience" + "Decentralization Improvements", + "Censorship Resistance", + "Open Source Software", + "stablecoin", + "Censorship Resistance", + "Decentralization Improvements", + "Open Source Software" ], "keywords": [ - "Preconfirmations" + "Stablecoins" ], - "duration": 1514, + "duration": 520, "language": "en", - "sources_swarmHash": "1c7523d53da2a837574c9984cb490286cc347f860d0cd7fa94d06b7d23bd592a", - "sources_youtubeId": "IYZiYFzIzKc", + "sources_swarmHash": "4760ed7b4ddcd4285ecd45c32a20bb206c281acfb98eb7a3d1b45e15e7e3f847", + "sources_youtubeId": "J2aw52g_OJI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673980981b0f83434d1be72e", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673980981b0f83434d1be72e.vtt", - "transcript_text": " Tanya Cushman Reviewer Reviewer Hello, hello. Awesome, wow. Cool to see so many people turn out to discuss the Rollup-centric roadmap. It's a pretty full house, so I'm going to have to ask everyone to really just squish in. I think there's a lot of people standing in the back for anyone watching the live stream. Cool. Well, let's get into it. I'm Dan, also DMARS online, and I'll be talking about how we can turbocharge the rollup centric roadmap with Rollup Boost. I'll be using a lot of triple R alliteration. If you see any of those, talk to me afterwards and I'll give you a special prize. Cool. So this will be structured in three parts. I'll go over a little reflection on the rollup centric roadmap. I'll then talk about what a layer two technology tree looks like and then I'll discuss talk about what a Layer 2 technology tree looks like, and then I'll discuss how we can use Rollup Boost to accelerate innovation on the Layer 2 technology tree. Cool. So let's get into the first section. So, yeah, we're four years into the Rollup-centric roadmap. If you're unfamiliar with what that is, the Rollup-centric roadmap was Ethereum's strategic shift towards scaling the network by embracing rollups as a primary solution. If you're unfamiliar with rollups as well, they are a way which we can process transactions off-chain and submit compressed proofs to Ethereum mainnet, which allows us to increase throughput of these off-chain components without necessarily sacrificing the decentralization of the underlying layer. In this vision, Ethereum becomes a more robust and secure settlement layer while most activity happens on these layer two roll-ups and we've already seen that a lot of activity has shifted to a lot of layer two rollups. Yeah, and there's a lot of benefits of this, and we'll be going into a lot of details. But one of the biggest benefits in my opinion, is that we can innovate on a separate layer than the layer one, which doesn't need to go through the entire core dev process or have tons and tons of research and formal analysis before we ever actually start to create a proof of concept or ship something. Because a lot of the times when you are trying to build innovative features, you typically don't discover... There's a lot of unknowns, unknowns you don't discover until you start building. And we've seen many such cases of that with MEV on Ethereum. Yeah, cool. So let's go into it. So we're four years in. How are we doing? Well, we had roughly three goals. One, increase the transaction per second of Ethereum. Two, reduce transaction costs. And three, outsource innovation to the layer two ecosystem. So let's go through each of those. First, how are we doing on TPS? Well, if you look at L2B, we have scaled Ethereum 26x in transactions per second, which is no small feat. That's more than double per year since we've announced the rollup centric roadmap. So that's pretty awesome. We can check that off. Oh, and on the way to scaling transactions per second, we also discovered maybe that's not the best metric, as there's a lot of activity on chains, which aren't just, which can't just be basically categorized via a transaction so a lot of chains simply just have a transfer and that's like very simple it only touches the part of the chain associated with your balance and someone else's balance but when you look at more complex activity it could be touching hundreds or thousands of accounts doing very complicated computation and so we also started to look at GPS as a metric, which is gas per second. And this is a bit small, but if you can see on conduits, rollup.wtf, we have increased the gas per second by almost 55x compared to the Ethereum layer one. So even more great progress. Cool. So we've checked that off. What about transaction costs? They should be lower just because we're also scaling. And yeah, the slide basically speaks for itself. Most of the top roll-ups are less than a cent sometimes to transact on. Cool. And so the last one, which is much more complicated and will be the segue into the rest of this talk, is how are we doing on outsourcing of innovation to the Layer 2 ecosystem? And I think this is really important because with the roll-up-centric roadmap, we essentially said, hey, everyone focusing on Layer 1, you keep doing that, but we are going to push the boundaries and the next version of Ethereum will be built on layer twos. And so it's really important that that actually succeeds. And we as a community lended all of our credibility to these layer twos. And so, you know, we should be checking in on how they're doing. So it's a little bit complicated to look at how innovation is transferring between these layers, but there's roughly three ways we can look at it. It's innovations ported from the layer one to the layer two, innovations from the layer two back to the layer one, and then innovation between the layer twos. And so we'll go through each of them. So yeah, what's been ported from layer one to layer two? Well, basically everything. Most of the popular blockchain clients, layer two blockchain clients, with the exception of some, actually use a minimized, a version of the Ethereum one blockchain clients that has like a very minimum diff to it. And so this means anytime there's an upgrade to the layer one client, layer twos can easily pull it in. You can see this with chains like Optimism where after EIP 1559 went live, they had it very soon after and all other upgrades. But this is not true for all chains. Layer two to layer one. Basically nothing. If you know of any improvements to the layer 1 that have been made, because as a result of research from layer 2s, please let me know. But there really haven't been any EIPs driven through the layer 2 process that have gone into the layer 1. There is one large notable exception that ZK proving and zk technology has like massive massively been accelerated by the fact that layer twos have poured tons of research in usually through the support of a token but again none of that has actually gone live on the layer one yet and then layer two to layer two there's almost nothing we are now starting to see some collaboration. I think I just saw the first cross L2 EIP slash roll-up improvement proposal like yesterday, which is pretty cool, but it was only two roll-ups. So we still have a lot of room to go. Yeah. And I think that just brings me to the sort of the main point of this is we have a lot of flourishing gardens on layer 2 ecosystems. But because of coordination failure and the lack of incentives for these layer 2s to collaborate with each other, we've essentially had coordination failure where we haven't been transferring innovation between each other. And this is my chat GPT rendition of the scenario. We have a lot of awesome spaces to do cool things, but none of them are really helping each other out. Yeah, so that's like a pretty serious concern because this is the future of the ecosystem and the future how can this be the future of the ecosystem if none of these participants are talking to each other? So I think we can start debugging a few things. One is peripheral tooling lock in. And so a lot of layer twos have complained that they can't do anything very customizable or unique because they need to satisfy the existing wallet API standards, the existing indexers. If you spin up a new type of block, now integrating with BlockScout or Etherscan is suddenly much harder. All of the tooling, like the SDKs that front-end developers use to integrate with these chains also become much harder. And on top of that, you know, it's one thing for a layer to modify the tooling to make it work with, like, say, a popular SDK. But then now the maintainers of that SDK need to also maintain this modified version of the SDK. And so the incentives also aren't great there. And what I think is potentially another reason for this is what I call an innovation death loop because of tokens and tribalisms. I think it roughly goes like people launch a token, tokens tie the incentives of groups and individuals together. These shared incentives create the appearance of in-groups and out groups and out groups which you can see on Twitter when we have a roll up teams fighting against each other about very small details and then going separate ways and not creating standards to address these differences. That then leads to all of our tooling being for a specific L2. Yeah, and I think because of this tribal rivalry, innovations are gatekept. And even the extreme of this is it manifests itself in restrictive licenses. Some of the most popular L2s today, you're not even allowed to fork or you are allowed to use them, but you need to give a certain percentage of sequencer fees back to basically the creators of the chain. And I get that incentives are very hard in open source ecosystems, but I think we should hold ourselves to a higher standard because we gave our credibility to these layer twos. Yeah. So I think for the rest of this talk, I'll identify four key areas to help improve Ethereum's innovation on layer twos. And I hope that next year someone or in two years, someone will give a talk addressing one of these areas as I think they're critical. Cool. So let's go into a layer two tech tree. If you're unfamiliar with what a tech tree is, it's, you know, you can see them in strategy games, a tech tree or a research tree is a hierarchical visual representation of the possible sequences or upgrades a player can unlock. You can think of this in civilization games where if you want to reach the iron age, you need to be able to mine iron, you need to be able to melt it, you need to be able to do that at scale. And there's another, so there's an institute called the foresight institute which solely is focused on basically creating these tech trees and then allocating funding to areas of the tech tree that are underdeveloped. And here's an example of one which is very fun of a longevity tech tree where one of the top things is body replacement and in order to to do that, we need head transplants, we need brain tissue replacement, we need organ replacement. And so we set out to do this for Ethereum layer 2 to try and help us get to the golden age of layer 2s. So this is the tech tree. I'm going to go through every single leaf. No, just kidding. There's way too much. If you want to follow along, there is a URL and a QR code you can scan to reach it. If this is very interesting to people, we may end up turning this into a website that you could easily modify and add to. Cool. So let's get into it. So this is the high-level tech tree. There's four subtrees in here. There's funds are safe. This deals with the safety properties of the roll-up in that, you know, basically if the roll-up were to go away, if it was to die, if the software was to go down, we want to make sure that the funds inherit the liveness of the layer one. There's web to scalability. And so the idea here is that we want to approximate the holy grail of everything on one computer. But obviously, we can't put everything on one computer. Don't let anyone let you think that. And yeah, it should be as easy as you going to a cloud provider and hitting the auto scale button. And now you have compute for all of your use cases as long as you keep putting money in or as long as there's someone funding this software, then it should be able to grow and have abundant compute space. A big requirement of that is seamless interoperability. That means you don't need to think about the fact that you're on one chain and your assets are on one chain, but you want to do compute on another. And then another one is also like plentiful compute environment. So we don't want to just have the EVM. There's, you know, by developers, the largest ecosystem in the world is JavaScript. There's like hundreds of millions of JavaScript developers. There's like only a couple million Solidity developers. So if we really want wanna reach the next billion, we need to expand. Cool, so let's get into it. I'm gonna go into the left side of this funds is safe subtree first. And then I'm gonna focus on the censorship resistance portion of liveness. And so under that, under liveness, there is censorship resistance and ledger progression. Ledger progression is just the idea that the blocks keep coming. They keep going. The system doesn't go down. And censorship resistance is really about getting either inclusion guarantees or execution guarantees. And so some stacks actually already have a very basic version of inclusion guarantees where you can force include your transaction on the L2 from the L1. The only problem with that is it's actually at a greater than the L2 or even L1 block time granularity. So you only get the guarantee that your transaction will be included in the, like, you know, like in the epoch of L2 blocks, which could be, you know, tens, hundreds of blocks. Yeah, and so there's a lot of areas here. As well, a very popular topic is multiple concurrent proposers on Ethereum L1 right now, as well as pre-conformations, encrypted mempools. These are all things we need to improve these inclusion and execution guarantees, but we actually have no one working on them on L2s. And specifically, no one is creating shared standards across L2s to enable these. So I think, yeah, I think there's a heroic opportunity here where you can just skip the politics. Just go build multiple concurrent proposers on a layer two. Yeah, you can also experiment with fossil other censorship resistance research proposals, as well as encrypted mempools. Cool, the next part of the tree, the right side of this funds is safe, is under validity. And this one is also very exciting. Yeah, cool. So under validity, we developed a multi-prover. We developed this term, multi-prover. It's been a known term. But the idea is no rollup will likely ever trust a single proving system. And so the idea here is rollups want a proving system so that they can have faster finality. Right now most rollups are optimistic rollups, meaning they get pushed to the L1, and it takes some amount of time, which is very annoying to users, for someone to say, like, there's no fraud here. This state transition was good. You didn't have your funds stolen. But validity proofs allow us to be confident in that state transition. And so the thing is, a lot of people have developed validity proofs, but none of these teams are talking to each other. And on top of that, it's very unlikely that we will ever trust just a single proving system because of how new this crypto and technology is. Some of these proving systems are 80K lines of circuit code, and good luck auditing that. So I think probably for the next two to four years, we will not trust a single proving system, so we need multiple. And under that family, there are ZK proofs, and then also very recently, there's been a lot of teams developing TE proofs. One specifically is we've been collaborating with Automata to develop TE validity proofs, and the performance is looking pretty good, actually. We've been able to prove in TDX in 0.2 milliseconds specific blocks, and so we think this should be an additional layer of proving. But yeah, I think the main thing here is there's a lot of teams, and none of them are talking to each other. So we need a standardized interface to align proof system development teams under one multi-prover. And also we need this to enable prover markets so that anyone is able to provide these proofs for these layer twos. Cool. We'll get into the next part of this tech tree, which is Web2 scalability. As I said, the idea here is you want auto-sc scaling on these servers. You don't want to think about it. I don't want to, you know, get woken up by DevOps at 2 a.m. and like, oh, we need more capacity. And I need to go like manually deploy script to launch another chain. And, you know, all of the configurations associated with that. But in order to get there in this world where we could have multiple chains for a single application, we need seamless interoperability and this is just a massive mess. There are so many things here. This tree is broken up into out of cluster and in cluster. The idea here is that stacks like Optimism are creating a cluster, meaning they have multiple roll-ups that have some type of shared settlement and shared infrastructure and governance that allow them to make assumptions which make it easier for them to communicate with each other. And this doesn't mean you can't communicate with the outside world, but what it does mean is if I'm developing an application and I have a neighbor that I need to interact with frequently, I can put the walls down a little bit and transact with them more easily. And then if I need to call out to some more advanced or some other specialized use case, then I can just go through the regular Internet. Yeah. So that's out of cluster and in cluster. I think honestly the biggest thing here is literally just getting the layer to use to talk to each other. This doesn't even need to require, like you don't need to develop anything for this opportunity or spec anything. Just go talk to all of them, figure out what they want, write that down somewhere. That would already be a huge improvement. And then lastly I'll go through plentiful compute environments. And so this one's pretty fun I think because recently the Ethereum Foundation has launched a Manhattan project to formally verify a RISC-V VM. And so I think that's something you could easily do on like top of the OP stack or any other stack is start experimenting with different VM types. On top of that we could be experimenting with other privacy-enhanced computation as well, such as like FHE VMs, ZK VMs. There are some experimenting, but there's no open standards. They're all either partially closed source or in a very obfuscated code base that you need to be like an engineer on the team to understand. Cool. And so that's opportunity number four, experimenting with virtual machines on open source and open license. Don't be fooled by open source being the only requirement. Some are open source, not all of them are open license. And two popular ones here are like RETH and RollupGeth. Cool, so those are the four areas of improvement I think we can make today is experiment with inclusion and execution guarantees, build a multi-prover interface, help coordinate interop standards and build open source and open license code bases with different VMs. And so I'll get into the last section on rollup boost. So this is something this is a product we've been developing at Flashbots. It's open source, open license. And so I'll start with the basics. So in Ethereum, there is something called the engine API. It's how the consensus layer and the execution layer talk to each other. In order to be like maximally Ethereum compatible, you actually also ideally are using this engine API in your layer two architecture. And so, yeah, the ideal scenario is that we also use this in the CL and EL, and there are some stacks that do. And so we at Flashbot developed the software to just sit in between the CL and the EL. It uses the additional API that these two clients use to talk to each other, and we're able to send these requests between each other. We're able to proxy them to other components in the ecosystem. The example we've built today is a block builder, so you can add customizations to your chain, like revert protection or faster pre-confirmations, all through this block builder so you can add customizations to your chain like revert protection or faster pre-conformations all through this block builder. But in the future, we also see this component as being critical in outsourcing proof production to like prover markets and even multiple different types of prover markets. So yeah, I think the really cool thing about this is you don't need to fork the OP stack or the other stacks you're dealing with. If they use the engine API, this works out of the box and you can start adding customizations to the chain. And it's permissionless to innovate. And I think if we're able to innovate without these, like, you know, if I go to the repo and I don't need to sign a contract to advance the code base, and the code base is also maximally compatible with the layer one code bases, then this is like how we recreate this innovation loop. Yeah, also on top of this, Rollup Boost is powering the latest Unichain launch. And then on top of that, WorldCoin also recently announced that they're using it to enable a new type of block building algorithm that prioritizes humans. So there's already a ton of innovation happening in the space on Rollup Boost. And I think it's going to be one of the new innovation hubs for Ethereum Layer 2. So if you see me later, chat with me how we could build CR committees, multi-chain block builders, and multi-provers on Rollup Boost today. Thanks. All right. Thank you so much, Dan, for a very thorough presentation. We do have a few minutes to answer maybe a couple of questions. Oh, yeah. So let's start with, has it ever happened that innovation at Rollup or Layer 2 was later on adopted on Layer 1? I don't know of any scenarios this has happened. happened at innovation at roll-up or layer two was later on adopted on layer one? I don't know of any scenarios this has happened. So yeah, I think that's the big problem right now. Great. All right. Next is, what do you think about the fractional liquidity from L1 to L2? Would that be the main issue? Interesting. Fractional liquidity. Yeah, I guess it's like, yeah, the idea of fragmentation. So if my liquidity is on one roll-up, it doesn't work on the other roll-up. I think if we solve the seamless interoperability subtree, then you won't even notice this. All right. We do have still a few more minutes. We'd like to answer, does Arbitrum need a boost? Does Arbitrum need a boost? I think they're boosting time is what I've heard, which is a joke. I think they have an algorithm called time boost. I don't totally know what this question means. Okay. Let's see. What is next? Is it this? What about new innovations on L2s? Not transferred. Yeah, there are some cool innovations on Layer 2s. I think, as I mentioned, like ZK, ZK tech is like by far the coolest. Yeah, there are some people deploying like Solana, like the SVM and their architecture as a layer too, which I also think is cool. I think we should keep pushing the window on different blockchain clients and architectures on layer two. All right, last few seconds. What do you think of Solana? What do I think of Solana? Solana's pretty cool. I think Solana is what it looks like if you try to optimize a blockchain client for performance to the max. But I don't think it's what a blockchain client that optimizes for innovation looks like. Interesting take. And one last is, what is Rollup Boost? Okay, what is Rollup Boost? It is a sidecar you can use on Layer 2 blockchain clients to enable features without having to fork the underlying blockchain stack. Amazing. Thank you so much, Dan. And please give him a round of applause.", + "sources_streamethId": "6735dbd39dbb7a90e16545d3", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731479400000, - "slot_end": 1731481200000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1B_rCk0bkXtF-tfbBfcDeRBqZxjx4AKThyOjuNnKCVhw", - "resources_slides": null, + "slot_start": 1731582000000, + "slot_end": 1731582600000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1AC1UEYubPRYIH9AzVy-E905hMuR67GeAMdfpHpaGm0g", + "resources_slides": "https://drive.google.com/file/d/1vrDVm4VXC4hVQJYZmvsFQ7ZsPWsvo7XU/view", "speakers": [ - "daniel-marzec" + "garm" ] }, "vector": [ @@ -617221,11 +615487,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -617967,6 +616233,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -617981,7 +616248,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -618011,7 +616277,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -618027,7 +616292,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -618062,12 +616326,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -618078,7 +616342,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -618105,7 +616368,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -618114,6 +616376,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -618296,7 +616559,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -618416,6 +616678,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -618531,7 +616794,6 @@ 2, 0, 0, - 0, 2, 0, 0, @@ -618544,51 +616806,50 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "reclaiming-our-dollar8-billion-funding-public-goods-with-stablecoin-profits", - "sourceId": "UCFEEN", - "title": "Reclaiming our $8 billion: funding public goods with stablecoin profits", - "description": "Ethereum is stuck in a dark deal with two companies. They control ~all stablecoins; facilitate 49% of DEX swaps; and can overrule all future hardforks:\r\n\r\nCircle & Tether.\r\n\r\nIn return, they reap $7.4B in stablecoin earnings (2023).\r\n\r\nBut wait—that’s the interest on OUR money! We should be in control.\r\n\r\nGiving to holders is illegal, but funding public goods isn’t.\r\n\r\nIf we coordinate, we can switch to nonprofit stablecoins and reclaim billions for eg Protocol Guild, R&D, DeFi infra, OSS—or other causes.", - "track": "Coordination", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Research", + "id": "redefined-interactions-transforming-user-experience-with-intents", + "sourceId": "Q3SF8Q", + "title": "Redefined Interactions: Transforming User Experience with Intents", + "description": "Intents are on their way to improving users' interactions with DeFi. This panel of experts from leading protocols will discuss the impact of Intents on user experience, focusing on streamlining processes, enhancing security, increasing decentralization, and making DeFi more accessible. Explore the future of user interactions in DeFi and the collaborative efforts driving these advancements.", + "track": "Usability", + "type": "Panel", + "expertise": "Intermediate", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Decentralization Improvements", - "Censorship Resistance", - "Open Source Software", - "stablecoin", - "Censorship Resistance", - "Decentralization Improvements", - "Open Source Software" + "User Experience", + "Intents", + "defi", + "Intents", + "User Experience" ], "keywords": [ - "Stablecoins" + "DeFi" ], - "duration": 520, + "duration": 2896, "language": "en", - "sources_swarmHash": "4760ed7b4ddcd4285ecd45c32a20bb206c281acfb98eb7a3d1b45e15e7e3f847", - "sources_youtubeId": "J2aw52g_OJI", + "sources_swarmHash": "b62c90d88cd31739d845fd3c69549705e18eb151c919421eddbcaa08ad72ab94", + "sources_youtubeId": "hId-FQUOpJ0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735dbd39dbb7a90e16545d3", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731582000000, - "slot_end": 1731582600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1AC1UEYubPRYIH9AzVy-E905hMuR67GeAMdfpHpaGm0g", - "resources_slides": null, + "slot_start": 1731406200000, + "slot_end": 1731409800000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1pQP77cQCgded-4Om05CtsNholtmf6N8hdDeVEVTDvKU", + "resources_slides": "https://drive.google.com/file/d/1Dhpszofn8VleUqzXfNDiIh0QAnU7aaMC/view", "speakers": [ - "garm" + "agustin-grosso", + "juli-corti", + "ran-hammer", + "dominik-hell", + "shawn-odonaghue" ] }, "vector": [ @@ -618600,9 +616861,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -619139,12 +617397,14 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, + 6, + 6, + 6, + 6, + 6, 0, 0, 0, @@ -619349,7 +617609,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -619358,6 +617617,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -619396,6 +617656,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -619441,7 +617702,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -619491,7 +617751,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -619533,6 +617792,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -619794,7 +618054,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -619903,12 +618162,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 2, @@ -619920,54 +618179,35 @@ 0, 0, 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "redefined-interactions-transforming-user-experience-with-intents", - "sourceId": "Q3SF8Q", - "title": "Redefined Interactions: Transforming User Experience with Intents", - "description": "Intents are on their way to improving users' interactions with DeFi. This panel of experts from leading protocols will discuss the impact of Intents on user experience, focusing on streamlining processes, enhancing security, increasing decentralization, and making DeFi more accessible. Explore the future of user interactions in DeFi and the collaborative efforts driving these advancements.", - "track": "Usability", - "type": "Panel", - "expertise": "Intermediate", - "audience": "Product", + "id": "redefining-boundaries-in-the-infinite-garden", + "sourceId": "FUZDNX", + "title": "Redefining boundaries in the Infinite Garden", + "description": "Don’t miss the Devcon Opening Ceremony, where we’ll set the stage for an incredible event ahead, with talks from Vitalik Buterin (Founder of Ethereum), Aya Miyaguchi (Executive Director of the Ethereum Foundation), Josh Stark (Ethereum Foundation Leadership), Skylar Weaver (Devcon Team Lead), and more surprise guests.", + "track": "Real World Ethereum", + "type": "Talk", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "User Experience", - "Intents", - "defi", - "Intents", - "User Experience" - ], - "keywords": [ - "DeFi" - ], - "duration": 2896, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "b62c90d88cd31739d845fd3c69549705e18eb151c919421eddbcaa08ad72ab94", - "sources_youtubeId": "hId-FQUOpJ0", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731406200000, - "slot_end": 1731409800000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1pQP77cQCgded-4Om05CtsNholtmf6N8hdDeVEVTDvKU", - "resources_slides": null, "speakers": [ - "agustin-grosso", - "juli-corti", - "ran-hammer", - "dominik-hell", - "shawn-odonaghue" - ] + "aya-miyaguchi" + ], + "eventId": "devcon-7", + "slot_start": 1731382800000, + "slot_end": 1731384000000, + "slot_roomId": "main-stage", + "sources_youtubeId": "SE15rsPVHz0", + "sources_swarmHash": "ad356189d2834782997d461c0a3e4b34ad700af2998a1d88369a28e185b406d0", + "resources_presentation": "https://docs.google.com/presentation/d/1K5z-RKHIToQZFNAHUKMDOmuOsAKkNs4VPNUiIqAAe9M", + "resources_slides": "" }, "vector": [ 0, @@ -619976,9 +618216,9 @@ 0, 0, 0, + 6, 0, 0, - 6, 0, 0, 0, @@ -620151,6 +618391,37 @@ 0, 0, 0, + 6, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -620518,11 +618789,6 @@ 0, 0, 0, - 6, - 6, - 6, - 6, - 6, 0, 0, 0, @@ -620737,7 +619003,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -620776,7 +619041,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -620912,34 +619176,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -621287,13 +619523,12 @@ 2, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -621306,44 +619541,51 @@ }, { "session": { - "id": "redefining-boundaries-in-the-infinite-garden", - "sourceId": "FUZDNX", - "title": "Redefining boundaries in the Infinite Garden", - "description": "Don’t miss the Devcon Opening Ceremony, where we’ll set the stage for an incredible event ahead, with talks from Vitalik Buterin (Founder of Ethereum), Aya Miyaguchi (Executive Director of the Ethereum Foundation), Josh Stark (Ethereum Foundation Leadership), Skylar Weaver (Devcon Team Lead), and more surprise guests.", - "track": "Real World Ethereum", - "type": "Talk", - "expertise": "", - "audience": "Engineering", + "id": "redefining-daos-state-of-daos-in-asia", + "sourceId": "PUMYRH", + "title": "Redefining DAOs: State of DAOs in Asia", + "description": "We are a team from Metagov and DAOstar, advancing the global DAO movement through standards like ERC-4824 and exploring diverse DAO narratives worldwide. We've commissioned multiple reports on the “State of DAOs” in Asia, covering Japan, South Korea, Taiwan, Singapore, Greater China, and SEA. Our panel will discuss these findings, focusing on DAO narratives, regulations, opportunities, and differences between Eastern and Western DAOs, aiming to bridge the gap in the global DAO discourse.", + "track": "Coordination", + "type": "Panel", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [ - "aya-miyaguchi" + "tags": [ + "Coordination", + "DAO", + "Governance", + "asia", + "Coordination", + "DAO", + "Governance" + ], + "keywords": [ + "Standards", + "Asia" ], + "duration": 3320, + "language": "en", + "sources_swarmHash": "b35d788dd5f9fe24db1d7b79ac09e50c80bc87072fc3500fd92bf56c3d47ded0", + "sources_youtubeId": "8zy3C3pYh48", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67343a1e9dbb7a90e18a29e8", "eventId": "devcon-7", - "slot_start": 1731382800000, - "slot_end": 1731384000000, - "slot_roomId": "main-stage", - "sources_youtubeId": "SE15rsPVHz0", - "sources_swarmHash": "ad356189d2834782997d461c0a3e4b34ad700af2998a1d88369a28e185b406d0", - "resources_presentation": "https://docs.google.com/presentation/d/1K5z-RKHIToQZFNAHUKMDOmuOsAKkNs4VPNUiIqAAe9M" + "slot_start": 1731472200000, + "slot_end": 1731475800000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1ieI7X9rFpOPzhR32w8gT6d_tE2y-xDKaSS2cr_K6lgE", + "resources_slides": "https://drive.google.com/file/d/1Hk2UfE5WN2V92GOwzmJjNvuY4AgA96eX/view", + "speakers": [ + "joseph-low", + "dev-lewis", + "hazel-devjani", + "gen", + "yvonne" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -621355,6 +619597,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -621511,7 +619754,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -621896,6 +620138,11 @@ 0, 0, 0, + 6, + 6, + 6, + 6, + 6, 0, 0, 0, @@ -622183,10 +620430,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -622240,6 +620489,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -622535,6 +620785,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -622649,12 +620900,11 @@ 2, 0, 0, - 2, - 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -622667,48 +620917,46 @@ }, { "session": { - "id": "redefining-daos-state-of-daos-in-asia", - "sourceId": "PUMYRH", - "title": "Redefining DAOs: State of DAOs in Asia", - "description": "We are a team from Metagov and DAOstar, advancing the global DAO movement through standards like ERC-4824 and exploring diverse DAO narratives worldwide. We've commissioned multiple reports on the “State of DAOs” in Asia, covering Japan, South Korea, Taiwan, Singapore, Greater China, and SEA. Our panel will discuss these findings, focusing on DAO narratives, regulations, opportunities, and differences between Eastern and Western DAOs, aiming to bridge the gap in the global DAO discourse.", - "track": "Coordination", - "type": "Panel", - "expertise": "Beginner", - "audience": "Community", + "id": "redis-evm-supercharging-ethereum-calls-with-in-memory-execution", + "sourceId": "FKVE9X", + "title": "Redis EVM: Supercharging Ethereum calls with in-memory execution", + "description": "Redis EVM is a research project that embeds an Ethereum Virtual Machine interpreter within Redis using Lua-based Functions. By enabling Redis to directly interpret EVM opcodes, this innovation aims to drastically reduce SLOAD latency for eth_call operations. We'll explore the architecture, implementation challenges, and potential performance gains of this novel approach. Come discover how Redis EVM could reshape Ethereum execution environments, enhancing scalability and efficiency for dApps.", + "track": "Core Protocol", + "type": "Lightning Talk", + "expertise": "Expert", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Coordination", - "DAO", - "Governance", - "asia", - "Coordination", - "DAO", - "Governance" + "Scalability", + "EVM-equivalent", + "Tooling", + "execution", + "EVM-equivalent", + "Scalability", + "Tooling" ], "keywords": [ - "Standards", - "Asia" + "RPC", + "Execution" ], - "duration": 3320, + "duration": 582, "language": "en", - "sources_swarmHash": "b35d788dd5f9fe24db1d7b79ac09e50c80bc87072fc3500fd92bf56c3d47ded0", - "sources_youtubeId": "8zy3C3pYh48", + "sources_swarmHash": "8fe541e016c7b1993021578ad41a232562bea38e64e3e5058a25a84e327a5385", + "sources_youtubeId": "8EexwGNrxYQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67343a1e9dbb7a90e18a29e8", + "sources_streamethId": "6735a10d9dbb7a90e1ad95c0", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735a10d9dbb7a90e1ad95c0.vtt", + "transcript_text": " Thank you all for coming. Can everybody hear me, had the honor and privilege to do so. Now, I help customers exclusively from the Web3 space to deploy and scale on AWS, which gives me visibility to some of the problems that they face, right? Okay, big one, challenges on RPC scaling. That's super fun. So to begin with, whenever you have a consumer-facing application, let's think about a Wi-Fi router, right? You have to serve all those RPC requests to millions of people. The traffic can be unpredictable, and you might have some huge surges in traffic. So some strategies that folks typically do is horizontal scalability of nodes. Super fair. But it comes with some challenges, right? So first of all is reaction time. The time it takes for you to deploy a new instance. So take the snapshot, load it, copy files, and load the chain data, and also synchronize the difference from that time. Second, that's not a trivial challenge though, by the way. Second, forecasting to understand your user patterns and see how on weekends I have lower usage, and then on Tuesdays I might have more access, and then you react accordingly, proactively. But that is a very specialized task, right? So the caveat is of over-provisioning, which happens a lot. Basically, people paying for more servers with idle capacity. That's not good for any business. Final boss, consistency, right? So when you scale horizontally, well, you suffer from that. You have potentially nodes that are behind the chain. You have, you know, different states across, and people use typically the latest data, right? So you do not scale horizontally to serve archive data. Some businesses, they operate only on current data. So, consistency, I would say, that is the final boss that typically makes people delegate the task of managing these RPC nodes management to infrastructure providers, some of the ones that you know, right? But they suffer from the same problem. So they need to get really good at forecasting reaction time, and that's all the perpetuating and putting lots of pressure on those players. Well, a typical strategy to deal with a lot of read access is caching. Some of those RPC methods are pretty easy. Chain ID would never change. Block by hash, one parameter. Get logs. You can offload the data to some database index, but the list goes on and it becomes even harder. ETH call is the main villain because it is typically used for over 70% compared. So if you want to address this issue, we should first and foremost tackle the ETH call problem. Word of the day, externalization. So can we, is there any opportunity to externalize the processing of EVM opcodes to a very fast engine? So I'm introducing today the EVM Lua project. It is technical validation mode. It is a micro EVM interpreted, implemented in Lua, and it executes inside Redis with minimal storage read latency. And it is able to process, ultimately, EVM operations. So ETH calls, estimate gas in the future, of course, and others. How it works, you can actually select what are the contracts you have there, and then you load the attributes from that contract. So code balance, no storage, all the storage keys, there are scripts for you to do that. And we have some phases. So R&D stage, this is where we're at so loading the entire storage from selected contracts keeping up with the state if processing EVM code strips next up EVM compliance so implementing all the opcodes including transient storage and adding EVM metering, so gas metering. And further steps include deployment, so benchmarks, optimization, feedback loop, and building for catering to user-specific features. So those are baseline numbers from Amazon ElastiCache benchmark using, of course, the simplest data structures. This is not our project yet. But 1.2 million requests per second for a single instance. That is a lot, right? And in a cluster, you can have over 5 million requests per second if you scale those Redis nodes horizontally. So here's the project. Have fun. Please start it. And, well, thank you very much. Thank you so much, Everton. So now we'll move on to a quick Q&A. So we have one question thus far. What are some potential security risks of embedding an EVM interpreter within Redis? All right. Interesting question. So you can have several guardrails for that. But because you have control of the opcodes that are being executed, that would be only for reads and not to write. So you can separate who writes and who reads at a user level and have several folks only reading from that in-memory database. Does that answer the question? Whoever asked? I think so. Okay, just on time, we have another question. How large is the RAM size needed? That's another great question. So to begin with, only, you know, just a couple megabytes, and it is already enough to start playing. And it depends on the contract storage you want to load up there, right? So on AWS, the instances go up to 2 terabytes of memory within a single instance. So the sky can be really the limit here. It could fit the entire state. All right, and I accidentally pressed answer for one of the questions, so it's not showing up, but I'll read it out. Why don't we access state DB remotely? Why don't we access DB state remotely? Great question. I thought about that too. So you can create a compute unit that is external to the node and scale that out. However, they will all be executing get storage at sequentially as you execute the EVM instructions, and you will get the penalty of latency, right? So the thesis here is to put together both storage and the execution in an scalable way. All right, now we have a lot of questions. Next, what happens if the EVM hits an unimplemented opcode? It breaks really hard. Okay, next one. I'm assuming, can this run on Redis community? Absolutely, yes. All right. Yeah, so there are even novel engines there. One of them is called Valky. That is a drop-in replacement for Redis. Should be the same thing, right? And there's a developing space. Okay, and our last question, does it need archive notes to feed the data? Is Redis sorting all archive data? Great question. So the project is currently focused on current state. You don't need to have an archive node to feed that in, but your RPC source needs to have some specific debug methods available. So the way to export the entire state of a contract is one of them. All right, we just have one last question. When can we use this for full block execution? Oh, wow. I don't question. When can we use this for full block execution? Oh wow. I don't know. Soon. Please help me. Alright, I think unfortunately that's all the time we have. I know we have one more question, but I believe Everton will be hanging around here. So yeah, you can catch up with him after this session. Thank you so much, Everton. Let's give him another round of applause. Thank you.", "eventId": "devcon-7", - "slot_start": 1731472200000, - "slot_end": 1731475800000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1ieI7X9rFpOPzhR32w8gT6d_tE2y-xDKaSS2cr_K6lgE", - "resources_slides": null, + "slot_start": 1731565200000, + "slot_end": 1731565800000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1fF69WpIZk0d5kqOiGISG9maJgrmsuKxAcyzfYSedRsw", + "resources_slides": "https://drive.google.com/file/d/1_lASvqz9yYvG8zbCS8H8MnI5HrA3K4y-/view", "speakers": [ - "joseph-low", - "dev-lewis", - "hazel-devjani", - "gen", - "yvonne" + "everton-fraga" ] }, "vector": [ @@ -622716,6 +620964,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -622723,7 +620972,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -623265,15 +621513,11 @@ 0, 0, 0, - 6, - 6, - 6, - 6, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -623485,6 +621729,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -623559,12 +621804,10 @@ 0, 0, 0, - 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -623605,6 +621848,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -623618,7 +621862,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -623660,6 +621903,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -623789,6 +622033,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -623915,7 +622160,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -624029,14 +622273,13 @@ 0, 0, 2, + 2, 0, 0, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -624048,46 +622291,47 @@ }, { "session": { - "id": "redis-evm-supercharging-ethereum-calls-with-in-memory-execution", - "sourceId": "FKVE9X", - "title": "Redis EVM: Supercharging Ethereum calls with in-memory execution", - "description": "Redis EVM is a research project that embeds an Ethereum Virtual Machine interpreter within Redis using Lua-based Functions. By enabling Redis to directly interpret EVM opcodes, this innovation aims to drastically reduce SLOAD latency for eth_call operations. We'll explore the architecture, implementation challenges, and potential performance gains of this novel approach. Come discover how Redis EVM could reshape Ethereum execution environments, enhancing scalability and efficiency for dApps.", - "track": "Core Protocol", - "type": "Lightning Talk", - "expertise": "Expert", - "audience": "Engineering", + "id": "reimagining-layer-0-new-worlds-and-ancient-philosophies", + "sourceId": "JPHQYQ", + "title": "Reimagining Layer 0: New Worlds and Ancient Philosophies", + "description": "Where the early internet was an expression of freedom, liberty, and democratising virtual spaces, etc. Today, our digital spaces are breaking and have not met that promise. The Web3 space also faces scams, degen behaviour, and capture by centralised actors. How do we guide Ethereum to stay aligned with human values as we build a new world? Revisiting ancient Asian philosophies can help us reimagine a new world from Layer0.", + "track": "Real World Ethereum", + "type": "Talk", + "expertise": "Beginner", + "audience": "Academic", "featured": false, "doNotRecord": false, "tags": [ - "Scalability", - "EVM-equivalent", - "Tooling", - "execution", - "EVM-equivalent", - "Scalability", - "Tooling" + "Coordination", + "Political systems", + "Solarpunk", + "Regenative Ethereum", + "value", + "asian", + "Coordination", + "Political systems", + "Regenative Ethereum", + "Solarpunk" ], "keywords": [ - "RPC", - "Execution" + "asian", + "values" ], - "duration": 582, + "duration": 1568, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "3d225064900625c44f6ace62cf5e21ef0505517583e3365f6e57b9cebb8ddb67", + "sources_youtubeId": "rhDemdcnVVE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735a10d9dbb7a90e1ad95c0", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735a10d9dbb7a90e1ad95c0.vtt", - "transcript_text": " Thank you all for coming. Can everybody hear me, had the honor and privilege to do so. Now, I help customers exclusively from the Web3 space to deploy and scale on AWS, which gives me visibility to some of the problems that they face, right? Okay, big one, challenges on RPC scaling. That's super fun. So to begin with, whenever you have a consumer-facing application, let's think about a Wi-Fi router, right? You have to serve all those RPC requests to millions of people. The traffic can be unpredictable, and you might have some huge surges in traffic. So some strategies that folks typically do is horizontal scalability of nodes. Super fair. But it comes with some challenges, right? So first of all is reaction time. The time it takes for you to deploy a new instance. So take the snapshot, load it, copy files, and load the chain data, and also synchronize the difference from that time. Second, that's not a trivial challenge though, by the way. Second, forecasting to understand your user patterns and see how on weekends I have lower usage, and then on Tuesdays I might have more access, and then you react accordingly, proactively. But that is a very specialized task, right? So the caveat is of over-provisioning, which happens a lot. Basically, people paying for more servers with idle capacity. That's not good for any business. Final boss, consistency, right? So when you scale horizontally, well, you suffer from that. You have potentially nodes that are behind the chain. You have, you know, different states across, and people use typically the latest data, right? So you do not scale horizontally to serve archive data. Some businesses, they operate only on current data. So, consistency, I would say, that is the final boss that typically makes people delegate the task of managing these RPC nodes management to infrastructure providers, some of the ones that you know, right? But they suffer from the same problem. So they need to get really good at forecasting reaction time, and that's all the perpetuating and putting lots of pressure on those players. Well, a typical strategy to deal with a lot of read access is caching. Some of those RPC methods are pretty easy. Chain ID would never change. Block by hash, one parameter. Get logs. You can offload the data to some database index, but the list goes on and it becomes even harder. ETH call is the main villain because it is typically used for over 70% compared. So if you want to address this issue, we should first and foremost tackle the ETH call problem. Word of the day, externalization. So can we, is there any opportunity to externalize the processing of EVM opcodes to a very fast engine? So I'm introducing today the EVM Lua project. It is technical validation mode. It is a micro EVM interpreted, implemented in Lua, and it executes inside Redis with minimal storage read latency. And it is able to process, ultimately, EVM operations. So ETH calls, estimate gas in the future, of course, and others. How it works, you can actually select what are the contracts you have there, and then you load the attributes from that contract. So code balance, no storage, all the storage keys, there are scripts for you to do that. And we have some phases. So R&D stage, this is where we're at so loading the entire storage from selected contracts keeping up with the state if processing EVM code strips next up EVM compliance so implementing all the opcodes including transient storage and adding EVM metering, so gas metering. And further steps include deployment, so benchmarks, optimization, feedback loop, and building for catering to user-specific features. So those are baseline numbers from Amazon ElastiCache benchmark using, of course, the simplest data structures. This is not our project yet. But 1.2 million requests per second for a single instance. That is a lot, right? And in a cluster, you can have over 5 million requests per second if you scale those Redis nodes horizontally. So here's the project. Have fun. Please start it. And, well, thank you very much. Thank you so much, Everton. So now we'll move on to a quick Q&A. So we have one question thus far. What are some potential security risks of embedding an EVM interpreter within Redis? All right. Interesting question. So you can have several guardrails for that. But because you have control of the opcodes that are being executed, that would be only for reads and not to write. So you can separate who writes and who reads at a user level and have several folks only reading from that in-memory database. Does that answer the question? Whoever asked? I think so. Okay, just on time, we have another question. How large is the RAM size needed? That's another great question. So to begin with, only, you know, just a couple megabytes, and it is already enough to start playing. And it depends on the contract storage you want to load up there, right? So on AWS, the instances go up to 2 terabytes of memory within a single instance. So the sky can be really the limit here. It could fit the entire state. All right, and I accidentally pressed answer for one of the questions, so it's not showing up, but I'll read it out. Why don't we access state DB remotely? Why don't we access DB state remotely? Great question. I thought about that too. So you can create a compute unit that is external to the node and scale that out. However, they will all be executing get storage at sequentially as you execute the EVM instructions, and you will get the penalty of latency, right? So the thesis here is to put together both storage and the execution in an scalable way. All right, now we have a lot of questions. Next, what happens if the EVM hits an unimplemented opcode? It breaks really hard. Okay, next one. I'm assuming, can this run on Redis community? Absolutely, yes. All right. Yeah, so there are even novel engines there. One of them is called Valky. That is a drop-in replacement for Redis. Should be the same thing, right? And there's a developing space. Okay, and our last question, does it need archive notes to feed the data? Is Redis sorting all archive data? Great question. So the project is currently focused on current state. You don't need to have an archive node to feed that in, but your RPC source needs to have some specific debug methods available. So the way to export the entire state of a contract is one of them. All right, we just have one last question. When can we use this for full block execution? Oh, wow. I don't question. When can we use this for full block execution? Oh wow. I don't know. Soon. Please help me. Alright, I think unfortunately that's all the time we have. I know we have one more question, but I believe Everton will be hanging around here. So yeah, you can catch up with him after this session. Thank you so much, Everton. Let's give him another round of applause. Thank you.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731565200000, - "slot_end": 1731565800000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1fF69WpIZk0d5kqOiGISG9maJgrmsuKxAcyzfYSedRsw", - "resources_slides": null, + "slot_start": 1731390600000, + "slot_end": 1731392400000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1hKiZ-7BNfUDp8MUrH21ufSaRDdB7UK0-A4X85CDWHvg", + "resources_slides": "https://drive.google.com/file/d/1A5UDE-8kpzerV4h2aQHSwIF0K4avv00j/view", "speakers": [ - "everton-fraga" + "dev-lewis" ] }, "vector": [ @@ -624095,6 +622339,8 @@ 0, 0, 0, + 0, + 0, 6, 0, 0, @@ -624642,6 +622888,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -624649,7 +622896,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -624863,7 +623109,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -624944,6 +623189,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -624962,6 +623208,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -624982,7 +623229,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -624992,6 +623238,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -625037,7 +623284,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -625075,6 +623321,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -625168,7 +623415,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -625210,6 +623456,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -625288,6 +623535,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -625398,6 +623646,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -625409,16 +623658,6 @@ 0, 0, 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -625427,48 +623666,26 @@ }, { "session": { - "id": "reimagining-layer-0-new-worlds-and-ancient-philosophies", - "sourceId": "JPHQYQ", - "title": "Reimagining Layer 0: New Worlds and Ancient Philosophies", - "description": "Where the early internet was an expression of freedom, liberty, and democratising virtual spaces, etc. Today, our digital spaces are breaking and have not met that promise. The Web3 space also faces scams, degen behaviour, and capture by centralised actors. How do we guide Ethereum to stay aligned with human values as we build a new world? Revisiting ancient Asian philosophies can help us reimagine a new world from Layer0.", - "track": "Real World Ethereum", - "type": "Talk", - "expertise": "Beginner", - "audience": "Academic", + "id": "remix-jazz-and-blues-jam", + "sourceId": "P8DPWB", + "title": "Remix Jazz and Blues Jam", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Coordination", - "Political systems", - "Solarpunk", - "Regenative Ethereum", - "value", - "asian", - "Coordination", - "Political systems", - "Regenative Ethereum", - "Solarpunk" - ], - "keywords": [ - "asian", - "values" - ], - "duration": 1568, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "3d225064900625c44f6ace62cf5e21ef0505517583e3365f6e57b9cebb8ddb67", - "sources_youtubeId": "rhDemdcnVVE", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731390600000, - "slot_end": 1731392400000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1hKiZ-7BNfUDp8MUrH21ufSaRDdB7UK0-A4X85CDWHvg", - "resources_slides": null, - "speakers": [ - "dev-lewis" - ] + "slot_start": 1731391200000, + "slot_end": 1731398400000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1gubgQAcUzwO-7G0PuYDVVhtDoG62piEJsVzXp4Pfrgw", + "resources_slides": "" }, "vector": [ 0, @@ -625477,10 +623694,11 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -626025,7 +624243,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -626328,7 +624545,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -626347,7 +624563,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -626377,7 +624592,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -626460,7 +624674,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -626596,7 +624809,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -626675,7 +624887,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -626790,6 +625001,8 @@ 2, 0, 0, + 2, + 0, 0, 0, 0, @@ -626798,7 +625011,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -626807,9 +625019,9 @@ }, { "session": { - "id": "remix-jazz-and-blues-jam", - "sourceId": "P8DPWB", - "title": "Remix Jazz and Blues Jam", + "id": "remix-team-jazz-jam", + "sourceId": "DFPGS9", + "title": "Remix Team Jazz Jam", "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", "track": "Entertainment", "type": "Music", @@ -626822,10 +625034,11 @@ "language": "en", "speakers": [], "eventId": "devcon-7", - "slot_start": 1731391200000, - "slot_end": 1731398400000, + "slot_start": 1731556800000, + "slot_end": 1731564000000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1gubgQAcUzwO-7G0PuYDVVhtDoG62piEJsVzXp4Pfrgw" + "resources_presentation": "https://docs.google.com/presentation/d/15rbpsHykfj3g9nCDSuig1Spz-H0RvoW5Qg7cgHOO95M", + "resources_slides": "" }, "vector": [ 0, @@ -628138,11 +626351,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -628164,35 +626372,44 @@ }, { "session": { - "id": "remix-team-jazz-jam", - "sourceId": "DFPGS9", - "title": "Remix Team Jazz Jam", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", + "id": "resilience-to-global-catastrophes", + "sourceId": "PZFHQF", + "title": "Resilience to Global Catastrophes", + "description": "The risk of nuclear war or an extreme pandemic is frighteningly high. Little work has been done on resilience to these catastrophes. I’ll discuss resilient food sources that can be scaled up quickly, methods of maintaining the functioning of critical sectors in an extreme pandemic, and backup methods of meeting basic needs. I’ll discuss cost effectiveness of low-cost preparations, including piloting technologies, such as resilient satellite communications, and a resilience DAO.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Climate", + "DAO", + "Effective Altruism" + ], + "keywords": [ + "Resilience", + "global catastrophic risk" + ], + "duration": 700, "language": "en", - "speakers": [], + "sources_swarmHash": "d9de68893eeca8426b362d031574c484cd0ee2a4ec8b2054fcb360caf33f5cf8", + "sources_youtubeId": "hWVKnQWXui0", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735da889dbb7a90e132b668", "eventId": "devcon-7", - "slot_start": 1731556800000, - "slot_end": 1731564000000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/15rbpsHykfj3g9nCDSuig1Spz-H0RvoW5Qg7cgHOO95M" + "slot_start": 1731582600000, + "slot_end": 1731583200000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1PtsLokONL6PEJ91cRee5o3KxGN3fLCjZ34KzGRksS0U", + "resources_slides": "https://drive.google.com/file/d/1sVDimjDp8VsnyAULnG0rK6dJU63YXevs/view", + "speakers": [ + "david-denkenberger", + "yesh" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 6, 0, @@ -628751,6 +626968,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -629040,6 +627259,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -629073,6 +627293,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -629186,7 +627407,7 @@ 0, 0, 0, - 0, + 2, 0, 0, 0, @@ -629502,7 +627723,6 @@ 0, 2, 0, - 0, 2, 0, 0, @@ -629521,100 +627741,51 @@ }, { "session": { - "id": "resilience-to-global-catastrophes", - "sourceId": "PZFHQF", - "title": "Resilience to Global Catastrophes", - "description": "The risk of nuclear war or an extreme pandemic is frighteningly high. Little work has been done on resilience to these catastrophes. I’ll discuss resilient food sources that can be scaled up quickly, methods of maintaining the functioning of critical sectors in an extreme pandemic, and backup methods of meeting basic needs. I’ll discuss cost effectiveness of low-cost preparations, including piloting technologies, such as resilient satellite communications, and a resilience DAO.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "Beginner", + "id": "reth-10-how-did-we-get-here-and-what-is-next", + "sourceId": "UTDCDM", + "title": "Reth 1.0: How did we get here and what is next?", + "description": "Reth is an Ethereum Execution Layer in development since 2022, focused on contributor-friendliness, modularity and performance. \r\n\r\nIn 2024, after rigorous testing and security review, Reth had its first 1.0 prod-ready release. \r\n\r\nIn this talk, we review the process of shipping a state of the art & novel Ethereum node, and lay out Reth's plans for the next years.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Climate", - "DAO", - "Effective Altruism" + "Core Protocol", + "Developer Infrastructure", + "Tooling", + "rust", + "Core Protocol", + "Developer Infrastructure", + "Tooling" ], "keywords": [ - "Resilience", - "global catastrophic risk" + "rust" ], - "duration": 700, + "duration": 1535, "language": "en", - "sources_swarmHash": "d9de68893eeca8426b362d031574c484cd0ee2a4ec8b2054fcb360caf33f5cf8", - "sources_youtubeId": "hWVKnQWXui0", + "sources_swarmHash": "1eb58b04417a1a528d7cc630bdf462cbe06aebd7de0276370fa8c28db3227a32", + "sources_youtubeId": "10xaWE28WCM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735da889dbb7a90e132b668", + "sources_streamethId": "67346d4b9dbb7a90e1ceb852", "eventId": "devcon-7", - "slot_start": 1731582600000, - "slot_end": 1731583200000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1PtsLokONL6PEJ91cRee5o3KxGN3fLCjZ34KzGRksS0U", - "resources_slides": null, + "slot_start": 1731486600000, + "slot_end": 1731488400000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1UdyIubnyXa-jfQkQkNDBDIP68YwdvTL9o61nG4a3fFU", + "resources_slides": "https://drive.google.com/file/d/1d3v0ieahxoCTV3twrrXpAaqDPnX_LTMM/view", "speakers": [ - "david-denkenberger", - "yesh" + "georgios-konstantopoulos" ] }, "vector": [ - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -630118,8 +628289,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -630172,6 +628341,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -630378,7 +628548,9 @@ 0, 0, 0, + 2, 0, + 2, 0, 0, 0, @@ -630445,7 +628617,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -630559,7 +628730,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -630812,6 +628982,51 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -630877,6 +629092,8 @@ 0, 2, 0, + 0, + 0, 2, 0, 0, @@ -630895,43 +629112,45 @@ }, { "session": { - "id": "reth-10-how-did-we-get-here-and-what-is-next", - "sourceId": "UTDCDM", - "title": "Reth 1.0: How did we get here and what is next?", - "description": "Reth is an Ethereum Execution Layer in development since 2022, focused on contributor-friendliness, modularity and performance. \r\n\r\nIn 2024, after rigorous testing and security review, Reth had its first 1.0 prod-ready release. \r\n\r\nIn this talk, we review the process of shipping a state of the art & novel Ethereum node, and lay out Reth's plans for the next years.", + "id": "rethinking-ethereums-account-model", + "sourceId": "GEEQXS", + "title": "Rethinking Ethereum’s account model", + "description": "Account centric models are inherently faster.\r\n\r\nEthereum operates on a global account based model. This means a global lock occurs any time someone needs to touch a piece of global state, such as an ERC20.\r\n\r\nAn account centric model, instead, creates a new deterministic address or state for each account. This means calls into transfers on ERC20s and dexes can be made in parallel, accelerating speed drastically. It also is more secure.\r\n\r\nIt’s a forgotten mechanism to scale ETH.", "track": "Core Protocol", - "type": "Talk", - "expertise": "Intermediate", + "type": "Lightning Talk", + "expertise": "Expert", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ "Core Protocol", - "Developer Infrastructure", - "Tooling", - "rust", + "Layer 1", + "Ethereum Roadmap", + "model", + "account", "Core Protocol", - "Developer Infrastructure", - "Tooling" + "Ethereum Roadmap", + "Layer 1" ], "keywords": [ - "rust" + "Account", + "Models" ], - "duration": 1535, + "duration": 537, "language": "en", - "sources_swarmHash": "1eb58b04417a1a528d7cc630bdf462cbe06aebd7de0276370fa8c28db3227a32", - "sources_youtubeId": "10xaWE28WCM", + "sources_swarmHash": "de96bb25225be90b669409315ef858bc5a173e1424895908951a4f1344789bca", + "sources_youtubeId": "7B-ji-XrMio", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67346d4b9dbb7a90e1ceb852", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731486600000, - "slot_end": 1731488400000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1UdyIubnyXa-jfQkQkNDBDIP68YwdvTL9o61nG4a3fFU", - "resources_slides": null, + "slot_start": 1731465900000, + "slot_end": 1731466500000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1S8CtqAgd4RfP7bFHLKoa51ch_PX1Vkr5bs1-02-C3XE", + "resources_slides": "https://drive.google.com/file/d/1YBTf8ZTmOxhDG9BHwU5HBKAo3GDtjNzF/view", "speakers": [ - "georgios-konstantopoulos" + "will-villanueva" ] }, "vector": [ @@ -631698,16 +629917,12 @@ 0, 0, 0, - 0, - 0, - 0, - 0, + 6, 0, 0, 0, 2, 0, - 2, 0, 0, 0, @@ -631740,7 +629955,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -631880,6 +630094,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -632141,6 +630357,7 @@ 0, 0, 2, + 2, 0, 0, 0, @@ -632250,9 +630467,6 @@ 0, 0, 2, - 0, - 0, - 0, 2, 0, 0, @@ -632271,45 +630485,43 @@ }, { "session": { - "id": "rethinking-ethereums-account-model", - "sourceId": "GEEQXS", - "title": "Rethinking Ethereum’s account model", - "description": "Account centric models are inherently faster.\r\n\r\nEthereum operates on a global account based model. This means a global lock occurs any time someone needs to touch a piece of global state, such as an ERC20.\r\n\r\nAn account centric model, instead, creates a new deterministic address or state for each account. This means calls into transfers on ERC20s and dexes can be made in parallel, accelerating speed drastically. It also is more secure.\r\n\r\nIt’s a forgotten mechanism to scale ETH.", - "track": "Core Protocol", - "type": "Lightning Talk", - "expertise": "Expert", - "audience": "Engineering", + "id": "rethinking-usability-in-a-world-of-data-ownership", + "sourceId": "RKNJED", + "title": "Rethinking usability in a world of data ownership", + "description": "What makes something usable in a world where the internet is built on open source cryptography? This talk explores how we might consider choice a key factor in the usability of applications where we are owners of our data which we can port, wield, and disclose at our discretion with other data owners. I will illustrate how we are testing our hypothesis that cryptography can surface meaningful connections through demo applications that embrace choice as a key usability factor.", + "track": "Usability", + "type": "Talk", + "expertise": "Beginner", + "audience": "", "featured": false, "doNotRecord": false, "tags": [ - "Core Protocol", - "Layer 1", - "Ethereum Roadmap", - "model", - "account", - "Core Protocol", - "Ethereum Roadmap", - "Layer 1" + "data", + "ownership", + "Best Practices", + "Design Thinking", + "MPC" ], "keywords": [ - "Account", - "Models" + "applications", + "social graphs", + "data ownership" ], - "duration": 537, + "duration": 1390, "language": "en", - "sources_swarmHash": "de96bb25225be90b669409315ef858bc5a173e1424895908951a4f1344789bca", - "sources_youtubeId": "7B-ji-XrMio", + "sources_swarmHash": "3a0ef287d28a9cee43ea3e7397998aa6c6cd47df2b2d5f0010ba0e9da51827f6", + "sources_youtubeId": "elXvPDai80c", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673424cc9dbb7a90e18ec653", "eventId": "devcon-7", - "slot_start": 1731465900000, - "slot_end": 1731466500000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1S8CtqAgd4RfP7bFHLKoa51ch_PX1Vkr5bs1-02-C3XE", - "resources_slides": null, + "slot_start": 1731468600000, + "slot_end": 1731470400000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1J2Pvcrn11ngEmYIecAN4U40wGXlrktRwNsT9I3TM-YM", + "resources_slides": "https://drive.google.com/file/d/1a4Y4-WWJwbZE_tuu1kGtw_Z6FpEQyA_G/view", "speakers": [ - "will-villanueva" + "rachel" ] }, "vector": [ @@ -632317,11 +630529,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -633079,11 +631291,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 2, 0, 0, 0, @@ -633097,6 +631304,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -633154,6 +631362,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -633208,6 +631417,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -633256,7 +631466,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -633377,6 +631586,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -633520,7 +631730,6 @@ 0, 0, 2, - 2, 0, 0, 0, @@ -633627,10 +631836,6 @@ 0, 0, 0, - 0, - 0, - 0, - 2, 2, 0, 0, @@ -633643,52 +631848,56 @@ 0, 0, 0, + 2, + 0, 0, 0 ] }, { "session": { - "id": "rethinking-usability-in-a-world-of-data-ownership", - "sourceId": "RKNJED", - "title": "Rethinking usability in a world of data ownership", - "description": "What makes something usable in a world where the internet is built on open source cryptography? This talk explores how we might consider choice a key factor in the usability of applications where we are owners of our data which we can port, wield, and disclose at our discretion with other data owners. I will illustrate how we are testing our hypothesis that cryptography can surface meaningful connections through demo applications that embrace choice as a key usability factor.", - "track": "Usability", - "type": "Talk", - "expertise": "Beginner", - "audience": "", + "id": "rethinking-user-risks-at-l2beat", + "sourceId": "8YKV8H", + "title": "Rethinking user risks at L2BEAT", + "description": "We want to announce a new L2BEAT feature of viewing protocol risks that individuals are actually exposed to. When we researched risks in the past users didn't find the information relevant, because they weren't aware they were using a specific protocol. Bridges are one example where users forgot about escrow risk as soon as the funds were bridged. In this talk we'll show how rollup risks translate into risks associated with individual assets held by users.", + "track": "Security", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "data", - "ownership", - "Best Practices", - "Design Thinking", - "MPC" + "Layer 2s", + "Token bridging", + "Security", + "trusted", + "Layer 2s", + "Security", + "Token bridging" ], "keywords": [ - "applications", - "social graphs", - "data ownership" + "risk", + "trust" ], - "duration": 1390, + "duration": 523, "language": "en", - "sources_swarmHash": "3a0ef287d28a9cee43ea3e7397998aa6c6cd47df2b2d5f0010ba0e9da51827f6", - "sources_youtubeId": "elXvPDai80c", + "sources_swarmHash": "5426184a342e67741c5784af3fa3bad843721262e251fc34edd8c6527df7d9e9", + "sources_youtubeId": "CM6e_wwieeo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673424cc9dbb7a90e18ec653", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731468600000, - "slot_end": 1731470400000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1J2Pvcrn11ngEmYIecAN4U40wGXlrktRwNsT9I3TM-YM", - "resources_slides": null, + "slot_start": 1731406800000, + "slot_end": 1731407400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1eDeIVW8yw0TTm6i7x1PFeXMtab7BfMey3gIO056ytDw", + "resources_slides": "https://drive.google.com/file/d/1j1FSVwP2PC5EuMa4fndo7QchramS8SLe/view", "speakers": [ - "rachel" + "piotr-szlachciak" ] }, "vector": [ + 6, 0, 0, 0, @@ -633697,7 +631906,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -634437,6 +632645,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -634471,7 +632680,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -634503,6 +632711,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -634529,7 +632738,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -634584,7 +632792,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -634610,6 +632817,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -634639,6 +632848,27 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -634754,7 +632984,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -634897,26 +633126,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -634997,6 +633206,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -635013,57 +633223,40 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, 0 ] }, { "session": { - "id": "rethinking-user-risks-at-l2beat", - "sourceId": "8YKV8H", - "title": "Rethinking user risks at L2BEAT", - "description": "We want to announce a new L2BEAT feature of viewing protocol risks that individuals are actually exposed to. When we researched risks in the past users didn't find the information relevant, because they weren't aware they were using a specific protocol. Bridges are one example where users forgot about escrow risk as soon as the funds were bridged. In this talk we'll show how rollup risks translate into risks associated with individual assets held by users.", + "id": "reverse-engineering-evm-bytecode-with-ghidra", + "sourceId": "GSJ8EC", + "title": "Reverse Engineering EVM Bytecode with Ghidra", + "description": "Ghidra is a popular tool in reverse engineering. We developed Mothra, a Ghidra extension that enables it to work with EVM bytecode. Disassembly, CFG, and decompilation of EVM bytecode are now possible within Ghidra. In this workshop, we will discuss how Mothra is implemented and how to reverse engineer EVM smart contracts using Ghidra.", "track": "Security", - "type": "Lightning Talk", + "type": "Workshop", "expertise": "Intermediate", - "audience": "Community", + "audience": "Engineering", "featured": false, - "doNotRecord": false, + "doNotRecord": true, + "keywords": [ + "Security" + ], "tags": [ - "Layer 2s", - "Token bridging", - "Security", - "trusted", - "Layer 2s", "Security", - "Token bridging" - ], - "keywords": [ - "risk", - "trust" + "Reversing", + "Reversing" ], - "duration": 523, "language": "en", - "sources_swarmHash": "5426184a342e67741c5784af3fa3bad843721262e251fc34edd8c6527df7d9e9", - "sources_youtubeId": "CM6e_wwieeo", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731406800000, - "slot_end": 1731407400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1eDeIVW8yw0TTm6i7x1PFeXMtab7BfMey3gIO056ytDw", - "resources_slides": null, "speakers": [ - "piotr-szlachciak" - ] + "yuejie", + "louis-tsai" + ], + "eventId": "devcon-7", + "slot_start": 1731654000000, + "slot_end": 1731659400000, + "slot_roomId": "classroom-d", + "resources_presentation": "https://docs.google.com/presentation/d/1cpw84aROzg-pzvJ3BWMKjrp6Dqvqw_OF_Xga5Rc8UU0", + "resources_slides": "https://drive.google.com/file/d/141DSXLZVOIIL7gU_8AfFynbdzL6YkEzD/view" }, "vector": [ 6, @@ -635631,10 +633824,7 @@ 0, 0, 6, - 0, - 0, - 0, - 0, + 6, 0, 0, 0, @@ -635883,7 +634073,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -635989,7 +634178,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -636020,7 +634208,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -636277,6 +634464,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -636384,12 +634572,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -636402,46 +634590,54 @@ }, { "session": { - "id": "reverse-engineering-evm-bytecode-with-ghidra", - "sourceId": "GSJ8EC", - "title": "Reverse Engineering EVM Bytecode with Ghidra", - "description": "Ghidra is a popular tool in reverse engineering. We developed Mothra, a Ghidra extension that enables it to work with EVM bytecode. Disassembly, CFG, and decompilation of EVM bytecode are now possible within Ghidra. In this workshop, we will discuss how Mothra is implemented and how to reverse engineer EVM smart contracts using Ghidra.", - "track": "Security", - "type": "Workshop", + "id": "revm-endgame", + "sourceId": "VEEYFZ", + "title": "Revm Endgame", + "description": "Revm is a critical component of the Ethereum ecosystem, used by builders, toolings and clients. It is an audited and proven library that is both fast and easy to use.\r\n\r\nAs more projects adopt Revm, I feel the increasing burden of making breaking changes and the need to consolidate its functionality. That’s why I am thinking about Revm Endgame, a solution to support experimentation, Layer 2 features, and EIPs without the need for repository forks.", + "track": "Core Protocol", + "type": "Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, - "doNotRecord": true, - "keywords": [ - "Security" - ], + "doNotRecord": false, "tags": [ - "Security", - "Reversing", - "Reversing" + "Core Protocol", + "Architecture", + "Public good", + "execution", + "client", + "Architecture", + "Core Protocol", + "Public good" ], - "language": "en", - "speakers": [ - "yuejie", - "louis-tsai" + "keywords": [ + "EVM" ], + "duration": 1484, + "language": "en", + "sources_swarmHash": "362ae064054b976732ffdfbf8338aa3a0c5d0b44e11c212fdb8e6ec7389eff92", + "sources_youtubeId": "xRuDWTWuxKA", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735836e9dbb7a90e1c0a9ff", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735836e9dbb7a90e1c0a9ff.vtt", + "transcript_text": " The introduction. Yeah, in this talk I would like to talk about Endgame, the last step of RIVM project. It started a few years ago and I think I found the solution that's going to last for a while. Before that, let's talk about history and basic evolution of the project. It started in 2021. I took a break from the work. I basically went to the seaside, relaxed a little bit, and after I came back, I did basically want to do some project that's related to infrastructure, wanted to build something that uses EVM, and I started researching what's basically on the market. And there was like three things. Open Ethereum was the client that I worked on before and it was GPA license so it was not suitable. EVModim was the new one. It has some, it's built by an ex-colleague, it has some experimental and exotic Rust night features and it was like just interpreter. So in that So it was not enough for me. Sputnik VM, I was most interested in it. And it was older project, older EVM, but it was not that maintained. It was hard to use it. And if you want to do inspection, tracing, you need to basically build it in some different way. And that's very hard to use. So after making PR to the Sputnik to add some generics on the instruction set, basically after two weeks of no response, nothing, and seeing the project not moving anywhere, I closed that PR and made my own REVM. In the month I had some initial initial code that can be run. This PR is fun to look at that. By the end of 2021 I supported the latest forks, state tests for passing, it supported all precompiles, and most importantly, interface was simple. I wanted to have, from my experience with Open Ethereum, I wanted to have EVM where you settle the data and say, transect. And then basically it gives you the output of that execution. The database trait was the abstraction that allows you runtime fetching of account and storage. Environment was the way how you set transaction block, configuration, everything around it. And inspector was the good abstraction, very powerful one that allows you runtime inspection of your EVM when you it's run get it this PR it was optimized few times but this PR was most impactful it's basically 4x the performance it was no std from the start the idea was to use it in JavaScript or Wasm. It was MIT license, and by that year, I basically jumped to another project, and REVM became my hobby. First adopters came next year. Foundry was the biggest one, the first one. The way how I met Georgios was he tweeted if there is any EVMs out there and I responded on the tweet, hey, fresh feedback if you're interested and he was in the minutes my DMs, that's Georgios, he's basically amazing in that sense. They started integrated a few months, I think the Oliver did the first integration five months after the start, that was like March 2022. Harhead started to get their RAST backend, they published DDR this year. Builder Surges was one of the big users of the library and they would, the first one they sync from the Genesis to the tip of the mainnet and it was very nice to hear hey we are syncing everything is fine. By the end of 2022 all focus was supported, optimization was done, our REVM-GS was supported but removed by the end of the year, there is no traction. A few more optimizations were done and most importantly RET, the client, started to be made October 2022. At that point of time I joined Paradigm to work on the client. I think I had H on my open Ethereum times. Second year, RET are mostly basically focused on the client implementation. We needed to make something stable and we did it very fastly. Team was amazing. I planned to stay there just for a few months but I'm still there now in Intaka but either way same team. Shankar and Kakum were added and I moved on. This year, RET01 was released, big stability improvement, it was audited, it was the people liked it, people wanted to use it, it is amazing. REVM on its own hit its own milestone, it got audited. It was community driven by the company. And it's basically the guy that finds a lot of EVM bugs. There is blog post if you want to hear more. You have got supported. And start of the year, ZK EVM become a thing. This year, I could claim, I feel that it's okay to claim that, the REVM became the most popular EVM library out there. And it's become the critical component of Ethereum ecosystem. There's a few types of the REVM users that I can see. They're basically clients and chain. RET first, Helios, Delight client, Trin. As the execution clients, there are different chains, Optimus, Crawl, Binance, Polygon. They are all interested to have REVM in their code, or basically, in this sense, RET and in that connection, REVM in their code or basically in the sense RET and in that connection REVM. The tooling both the foundry hardheads are basically using REVM. Builder searches, they're a little bit private but always in my DMs are the big users of it and ZKEVM basically became the standard library that's used in that field. There is even a grant by EF Foundation that basically targets REVM formal verification with the usage of the RISC-V and ZKEVMs. This all affected the future of REV and how I look at it. So, the problems. First problem, how to do AIP testing. How to allow AIP champions to come to the code base, improve their thing, implement it, test it in the foundry if needed, and basically do their own thing and do their basically on their own time. One of the example is transient storage. I want to just move on that path. In July 24 I got issue in GitHub. Hey can you implement that? That was one of the AIP champions that wanted this included in Aravian for him to test it in the Foundry. AIP was not included in the any hard fork, there is no even notion for it to be included. So that was mostly it. Few months after, AAP got CFI'd so it potentially can be included in Shanghai, but either way there is not like strong guarantee. Shanghai happened, A AIP was not in sight. Next, Harfork, Cancun. July, PR was made for that AIP. August, it was merged. In March, the AIP landed on Manet. Approximately four months before that, the AIP should be made inside the clients for the testing, for the damn nets and for test nets at the end. Another example was 3074 and I want to say all the requests that champions made were reasonable. It is from their point of view but the question that I had is like should I have started working on the API right away when I first request comes? It's a little bit silly because I have different priorities. I cannot do everything. Should I just merge PR when it was made? The question with that is who is going to do maintenance? Who is going to remove it if the AIP was not included? Do I do it for every IP? That would be a lot of unnecessary code. And in the end who is going to maintain all of that? Second problem with the maintainers is how to add testing dev experimental features. One of those I always try to facilitate and enable big projects that depend on our EVM to give it support, to give a way to make tech functionality happen. Sometimes there's like features, Rust features that basically enable those things as configuration, but sometimes if this is only the EVM project simple EVM project our EVMC R55 integration that require different execution environment would not be possible and problem three is chain support most of the end chains have small difference and they're not like, they don't want to move away from EVM a lot. They're most like new chain spec hardbox, new transaction types, maybe few AIPs, but in general they're like small difference from the mainnet, the mainnet EVM. Only way to do that previously it was like to fork the project and that brings the maintenance issue and burden of maintaining to basically repos. So solution for all that is to make EVM framework. I think that's the the last stage of REVM and its endgame. What do you mean by that? Let's make the code extensible. Chain can use REVM as the library, override the functionality that they need, add new transactions that they need and just use it as a library. That makes the core of the library same across all the chains. Implementing new AIPs that are on the mainnet would become easier and all the chains can just reuse the code. Tooling could create their own custom way to inspect. Maybe they need more performant way. Maybe they need need RVNC, needed new execution environment. New API can be implemented and tested separately. And, for example, this is the first look how basically I'm not coming to the code a little bit. But, yeah, the main, Spectrum main, Optimize, Spectrum Optimize. You will have different types for your different EVM that you need idea came initial idea came few years ago I looked at TV and bomb and they had like array of functions and area functions that was different by fork. So my first idea was, hey, can we introduce custom instruction and make it like allow users to implement or add their own custom instruction? That idea expanded. So it's not just on instruction, but on full logic. And that full logic function overriding was the handler from the last year I think it was very great idea but implementation wise it was like box or a pointer of function that was not the greatest and more flexible way I liked the insights that I got while implementing that and the EVM framework introduced two things. It introduced generics on the data, on the section, block, config and it reworked the handler in a more trait-like way. So it's more easier to use few examples of that is basically how are basic of the optimism that was the first few that was first and it was included our EVM in sense of the it was included as the feature. It was very like, intrusive. With the handlers, I tried, I succeeded to extract some functionality to the handlers. But with EWM framework, that became even easier. Currently it stands on Crate. I still have work to do on EWM framework, but it became a possibility. Another chain is called, they did the similar thing on their own repo. Just imagine like every EVM chain doing their own creates and having Foundry, RET or all the tooling support those variants of REVM. And all be tested, all be done, and basically it eases the integration with all the stack. The split that was inside the head with the handler is there is split between the data that is currently context and the handler that is the logic part of REVM. Context contains intersection, block, usual things. It contains journaling state that allows you to revert things when the revert happens in the calls. And it contains database that fetches the runtime data. Transient storage and warming of account is all done in journaling. Handler on the other hand has four parts. Validator that validates the interaction between block, transaction and config. Config can have chain ID that needs to be checked with the transaction chain ID, for example. Prevalidation, do some warming, do some deduct the call balance, do AIP 7702. Yeah, change delegate code to the state. Execution is a little bit complex. It has two main loops. The frame is the loop around the calls and interpreter is loop around the bytecode, basically instructions. And in the end, we have post-execution that does refund of not spent gas. It reimbursed the caller, it basically rewards the beneficiary and creates the outcome of the execution. In code, as you see, context has a lot of generics and it allows you basically to have it generalize. and it allows you basically to have it generalized. There is some to-dos to do. Spec needs to be moved inside configuration, but in general, this is the first view of the new context in EVM framework. Validation bar, one of the four stages in the handler, has basically self-contained. It contains associated types of the context, an error, a few functions that need to be called. Other handler types are in similar fashion made. And you can have Ethereum validation that basically does the Ethereum-specific implementation. Many VMs become just the context and the handler. And to create your own EVR, this is an example of main EVM, it's still pending and it's probably going to be changed in small or maybe medium way. You basically specify everything you need. For example, you have your context with some predefined data. The section and, for example, the structure inside the REVM, but you can add your intersection limitation on it. And you can specify the handler that has their own functionality. Execution, Ethereum Executor has a few fields that are maybe interesting to people. They have a pre-compile provider and instruction provider where you can generate your basic list of pre-compiles and there is a trade that you need to implement for this to happen. On the other hand, this is an example of Inspector. Main EVM is still a work in progress, but the idea is you create your own Inspector types that implement those trades, and you can just override it. In the end, this inspector main uses the DB and inspector generics. And this is how you use it. There are probably going to be some helper functions that allow a little bit more flexibility and utility. But I wanted to show you just an example of how this could look. In the end, I would assume a lot of users are going to come and implement their own EVM or their own extensions and just use our EVM in that form. I think that's it. Thank you very much for having me and that's it. Thank you Dragan. It's really amazing what you have managed to do all those years with REVM. So let's start with our questions. The first question is about testing. So do you use any kind of equivalent testing or basically differential testing as part of your CI? Yeah. or basically differential testing as part of your CI? Yeah, on every PR, state tests are run. Basically, state tests made by Ethereum Foundation, the testing team, and it's like a good first line of defense if everything is okay, if everything works. Other than that, there is fuzzing team, fuzzing projects that are run in the background by Ethereum Foundation by I think Martin Razet from GoEVM lab from GoEthereum but GoEVM lab I think is project but yeah it's there are few stages of the testing done and this is based on every new PR? Yeah. Fuzzing is done in the background. It's not on every PR, but the state test, running it on the web assembly, some specific Rust target, running it on different targets, basically allows you to test all those things. So are there any early architectural decisions that you have regret? Yeah. I think I didn't understand how the calls and everything that works. I still struggle with that. I didn't find a good fit on top of it. The frame and how the frame basically works with other frames, it seems it's nice but still need to land on the good abstraction on top of it. Initially the calling, the sub calling was behind the basically you call your sub call and stack became the problem so I needed to move all that to the the loop so yeah there are a few of those things the next question is if there are any plans to natively support zkvms like SP1? Because currently you need to do some hacky patches to be able to run them. We should talk with them, basically. EVM framework is an idea that basically would allow and support this. Great. Another question, again related to ZK EVMs. So, REVM is now used on both CPUs and ZKVMs, and these two environments have very different performance profiles. How can you optimize for the one or the other or both? In general everything is Rust compiler so it would compile optimization of Darm basically on Rust compiler so the target is different zkvms use RISC-V while the CPUs has different instruction set that are made more performant in general I didn't do the testing on the on top of it so that would be like good way to check those. I guess a follow-up here is, and probably you haven't checked yourself, and that's more about the ZKVMs, but have you any intuitions that some LLVM optimizations might be better for ZKVMs or may be better to not use some optimizations? Compiler optimization, basically. I'm not sure. I didn't look at it. So what is missing in our EVM to be more performant than EVM1? I have some tasks in my backlog that require basically testing. Every change that happened on the performance side needs to be tested, measured, and validated that assumptions that I made are correct. So I need to look at tail call optimization to check it. I need to look at the stack verification if this is extracted from the instruction to before it starts with the main loop, if this is going to affect the performance. Is it easy to add new precompiles? Yeah, it was one of the use cases that Odyssey used a lot and it was very easy. It's one of the biggest use cases of how you can extend, easiest and biggest ways to extend the REMM. What's the most interesting use case that you have seen so far? ZKVMs was really, really unexpected, to be honest. It just landed, and it was like, hey, it can be used in that way. I didn't expect that. I made it nice to use it in maybe front-end so that JavaScript can be implemented on top of it. Maybe it can be used in Wasm as some project that does that way. But ZKVM was an unexpected surprise. And it was amazing in general. Let's thank again our speakers, Dragan, for the excellent presentation. Thank you.", "eventId": "devcon-7", - "slot_start": 1731654000000, - "slot_end": 1731659400000, - "slot_roomId": "classroom-d", - "resources_presentation": "https://docs.google.com/presentation/d/1cpw84aROzg-pzvJ3BWMKjrp6Dqvqw_OF_Xga5Rc8UU0" + "slot_start": 1731558600000, + "slot_end": 1731560400000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1Eqr32OyHNOUkt06oQXAiVNTwZse9uMoY_tw7Ag2SkQs", + "resources_slides": "https://drive.google.com/file/d/1HnfriRiCckDzm4cBDxZ0lpCUrSQhqOfC/view", + "speakers": [ + "dragan-rakita" + ] }, "vector": [ - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -636997,15 +635193,13 @@ 0, 0, 0, - 6, - 6, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -637183,7 +635377,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -637207,6 +635400,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -637249,6 +635443,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -637295,6 +635490,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -637380,6 +635576,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -637641,7 +635838,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -637768,46 +635964,40 @@ }, { "session": { - "id": "revm-endgame", - "sourceId": "VEEYFZ", - "title": "Revm Endgame", - "description": "Revm is a critical component of the Ethereum ecosystem, used by builders, toolings and clients. It is an audited and proven library that is both fast and easy to use.\r\n\r\nAs more projects adopt Revm, I feel the increasing burden of making breaking changes and the need to consolidate its functionality. That’s why I am thinking about Revm Endgame, a solution to support experimentation, Layer 2 features, and EIPs without the need for repository forks.", - "track": "Core Protocol", - "type": "Talk", + "id": "rip-7755-empowering-cross-chain-interactions", + "sourceId": "787TJ7", + "title": "RIP-7755: Empowering Cross-Chain Interactions", + "description": "Cross-chain interactions are becoming essential as Ethereum Layer 2 solutions multiply. RIP-7755 changes the game by trustlessly bridging the gap between L2 chains, allowing new use cases that rely solely on Ethereum and its rollups. In this workshop, we’ll explore RIP-7755 by building a cross-chain NFT minting app, focusing on nested storage proof implementation details to eliminate trust assumptions.", + "track": "Layer 2", + "type": "Workshop", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Core Protocol", - "Architecture", - "Public good", - "execution", - "client", - "Architecture", - "Core Protocol", - "Public good" + "Cross-L2", + "Rollups" ], "keywords": [ - "EVM" + "Interop" ], - "duration": 1484, + "duration": 5524, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "f335f509aad994029fa3bd29d0c69456d45499bee29aea62b1cd0877fa13e0c3", + "sources_youtubeId": "yw-lgjdg7FY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735836e9dbb7a90e1c0a9ff", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735836e9dbb7a90e1c0a9ff.vtt", - "transcript_text": " The introduction. Yeah, in this talk I would like to talk about Endgame, the last step of RIVM project. It started a few years ago and I think I found the solution that's going to last for a while. Before that, let's talk about history and basic evolution of the project. It started in 2021. I took a break from the work. I basically went to the seaside, relaxed a little bit, and after I came back, I did basically want to do some project that's related to infrastructure, wanted to build something that uses EVM, and I started researching what's basically on the market. And there was like three things. Open Ethereum was the client that I worked on before and it was GPA license so it was not suitable. EVModim was the new one. It has some, it's built by an ex-colleague, it has some experimental and exotic Rust night features and it was like just interpreter. So in that So it was not enough for me. Sputnik VM, I was most interested in it. And it was older project, older EVM, but it was not that maintained. It was hard to use it. And if you want to do inspection, tracing, you need to basically build it in some different way. And that's very hard to use. So after making PR to the Sputnik to add some generics on the instruction set, basically after two weeks of no response, nothing, and seeing the project not moving anywhere, I closed that PR and made my own REVM. In the month I had some initial initial code that can be run. This PR is fun to look at that. By the end of 2021 I supported the latest forks, state tests for passing, it supported all precompiles, and most importantly, interface was simple. I wanted to have, from my experience with Open Ethereum, I wanted to have EVM where you settle the data and say, transect. And then basically it gives you the output of that execution. The database trait was the abstraction that allows you runtime fetching of account and storage. Environment was the way how you set transaction block, configuration, everything around it. And inspector was the good abstraction, very powerful one that allows you runtime inspection of your EVM when you it's run get it this PR it was optimized few times but this PR was most impactful it's basically 4x the performance it was no std from the start the idea was to use it in JavaScript or Wasm. It was MIT license, and by that year, I basically jumped to another project, and REVM became my hobby. First adopters came next year. Foundry was the biggest one, the first one. The way how I met Georgios was he tweeted if there is any EVMs out there and I responded on the tweet, hey, fresh feedback if you're interested and he was in the minutes my DMs, that's Georgios, he's basically amazing in that sense. They started integrated a few months, I think the Oliver did the first integration five months after the start, that was like March 2022. Harhead started to get their RAST backend, they published DDR this year. Builder Surges was one of the big users of the library and they would, the first one they sync from the Genesis to the tip of the mainnet and it was very nice to hear hey we are syncing everything is fine. By the end of 2022 all focus was supported, optimization was done, our REVM-GS was supported but removed by the end of the year, there is no traction. A few more optimizations were done and most importantly RET, the client, started to be made October 2022. At that point of time I joined Paradigm to work on the client. I think I had H on my open Ethereum times. Second year, RET are mostly basically focused on the client implementation. We needed to make something stable and we did it very fastly. Team was amazing. I planned to stay there just for a few months but I'm still there now in Intaka but either way same team. Shankar and Kakum were added and I moved on. This year, RET01 was released, big stability improvement, it was audited, it was the people liked it, people wanted to use it, it is amazing. REVM on its own hit its own milestone, it got audited. It was community driven by the company. And it's basically the guy that finds a lot of EVM bugs. There is blog post if you want to hear more. You have got supported. And start of the year, ZK EVM become a thing. This year, I could claim, I feel that it's okay to claim that, the REVM became the most popular EVM library out there. And it's become the critical component of Ethereum ecosystem. There's a few types of the REVM users that I can see. They're basically clients and chain. RET first, Helios, Delight client, Trin. As the execution clients, there are different chains, Optimus, Crawl, Binance, Polygon. They are all interested to have REVM in their code, or basically, in this sense, RET and in that connection, REVM in their code or basically in the sense RET and in that connection REVM. The tooling both the foundry hardheads are basically using REVM. Builder searches, they're a little bit private but always in my DMs are the big users of it and ZKEVM basically became the standard library that's used in that field. There is even a grant by EF Foundation that basically targets REVM formal verification with the usage of the RISC-V and ZKEVMs. This all affected the future of REV and how I look at it. So, the problems. First problem, how to do AIP testing. How to allow AIP champions to come to the code base, improve their thing, implement it, test it in the foundry if needed, and basically do their own thing and do their basically on their own time. One of the example is transient storage. I want to just move on that path. In July 24 I got issue in GitHub. Hey can you implement that? That was one of the AIP champions that wanted this included in Aravian for him to test it in the Foundry. AIP was not included in the any hard fork, there is no even notion for it to be included. So that was mostly it. Few months after, AAP got CFI'd so it potentially can be included in Shanghai, but either way there is not like strong guarantee. Shanghai happened, A AIP was not in sight. Next, Harfork, Cancun. July, PR was made for that AIP. August, it was merged. In March, the AIP landed on Manet. Approximately four months before that, the AIP should be made inside the clients for the testing, for the damn nets and for test nets at the end. Another example was 3074 and I want to say all the requests that champions made were reasonable. It is from their point of view but the question that I had is like should I have started working on the API right away when I first request comes? It's a little bit silly because I have different priorities. I cannot do everything. Should I just merge PR when it was made? The question with that is who is going to do maintenance? Who is going to remove it if the AIP was not included? Do I do it for every IP? That would be a lot of unnecessary code. And in the end who is going to maintain all of that? Second problem with the maintainers is how to add testing dev experimental features. One of those I always try to facilitate and enable big projects that depend on our EVM to give it support, to give a way to make tech functionality happen. Sometimes there's like features, Rust features that basically enable those things as configuration, but sometimes if this is only the EVM project simple EVM project our EVMC R55 integration that require different execution environment would not be possible and problem three is chain support most of the end chains have small difference and they're not like, they don't want to move away from EVM a lot. They're most like new chain spec hardbox, new transaction types, maybe few AIPs, but in general they're like small difference from the mainnet, the mainnet EVM. Only way to do that previously it was like to fork the project and that brings the maintenance issue and burden of maintaining to basically repos. So solution for all that is to make EVM framework. I think that's the the last stage of REVM and its endgame. What do you mean by that? Let's make the code extensible. Chain can use REVM as the library, override the functionality that they need, add new transactions that they need and just use it as a library. That makes the core of the library same across all the chains. Implementing new AIPs that are on the mainnet would become easier and all the chains can just reuse the code. Tooling could create their own custom way to inspect. Maybe they need more performant way. Maybe they need need RVNC, needed new execution environment. New API can be implemented and tested separately. And, for example, this is the first look how basically I'm not coming to the code a little bit. But, yeah, the main, Spectrum main, Optimize, Spectrum Optimize. You will have different types for your different EVM that you need idea came initial idea came few years ago I looked at TV and bomb and they had like array of functions and area functions that was different by fork. So my first idea was, hey, can we introduce custom instruction and make it like allow users to implement or add their own custom instruction? That idea expanded. So it's not just on instruction, but on full logic. And that full logic function overriding was the handler from the last year I think it was very great idea but implementation wise it was like box or a pointer of function that was not the greatest and more flexible way I liked the insights that I got while implementing that and the EVM framework introduced two things. It introduced generics on the data, on the section, block, config and it reworked the handler in a more trait-like way. So it's more easier to use few examples of that is basically how are basic of the optimism that was the first few that was first and it was included our EVM in sense of the it was included as the feature. It was very like, intrusive. With the handlers, I tried, I succeeded to extract some functionality to the handlers. But with EWM framework, that became even easier. Currently it stands on Crate. I still have work to do on EWM framework, but it became a possibility. Another chain is called, they did the similar thing on their own repo. Just imagine like every EVM chain doing their own creates and having Foundry, RET or all the tooling support those variants of REVM. And all be tested, all be done, and basically it eases the integration with all the stack. The split that was inside the head with the handler is there is split between the data that is currently context and the handler that is the logic part of REVM. Context contains intersection, block, usual things. It contains journaling state that allows you to revert things when the revert happens in the calls. And it contains database that fetches the runtime data. Transient storage and warming of account is all done in journaling. Handler on the other hand has four parts. Validator that validates the interaction between block, transaction and config. Config can have chain ID that needs to be checked with the transaction chain ID, for example. Prevalidation, do some warming, do some deduct the call balance, do AIP 7702. Yeah, change delegate code to the state. Execution is a little bit complex. It has two main loops. The frame is the loop around the calls and interpreter is loop around the bytecode, basically instructions. And in the end, we have post-execution that does refund of not spent gas. It reimbursed the caller, it basically rewards the beneficiary and creates the outcome of the execution. In code, as you see, context has a lot of generics and it allows you basically to have it generalize. and it allows you basically to have it generalized. There is some to-dos to do. Spec needs to be moved inside configuration, but in general, this is the first view of the new context in EVM framework. Validation bar, one of the four stages in the handler, has basically self-contained. It contains associated types of the context, an error, a few functions that need to be called. Other handler types are in similar fashion made. And you can have Ethereum validation that basically does the Ethereum-specific implementation. Many VMs become just the context and the handler. And to create your own EVR, this is an example of main EVM, it's still pending and it's probably going to be changed in small or maybe medium way. You basically specify everything you need. For example, you have your context with some predefined data. The section and, for example, the structure inside the REVM, but you can add your intersection limitation on it. And you can specify the handler that has their own functionality. Execution, Ethereum Executor has a few fields that are maybe interesting to people. They have a pre-compile provider and instruction provider where you can generate your basic list of pre-compiles and there is a trade that you need to implement for this to happen. On the other hand, this is an example of Inspector. Main EVM is still a work in progress, but the idea is you create your own Inspector types that implement those trades, and you can just override it. In the end, this inspector main uses the DB and inspector generics. And this is how you use it. There are probably going to be some helper functions that allow a little bit more flexibility and utility. But I wanted to show you just an example of how this could look. In the end, I would assume a lot of users are going to come and implement their own EVM or their own extensions and just use our EVM in that form. I think that's it. Thank you very much for having me and that's it. Thank you Dragan. It's really amazing what you have managed to do all those years with REVM. So let's start with our questions. The first question is about testing. So do you use any kind of equivalent testing or basically differential testing as part of your CI? Yeah. or basically differential testing as part of your CI? Yeah, on every PR, state tests are run. Basically, state tests made by Ethereum Foundation, the testing team, and it's like a good first line of defense if everything is okay, if everything works. Other than that, there is fuzzing team, fuzzing projects that are run in the background by Ethereum Foundation by I think Martin Razet from GoEVM lab from GoEthereum but GoEVM lab I think is project but yeah it's there are few stages of the testing done and this is based on every new PR? Yeah. Fuzzing is done in the background. It's not on every PR, but the state test, running it on the web assembly, some specific Rust target, running it on different targets, basically allows you to test all those things. So are there any early architectural decisions that you have regret? Yeah. I think I didn't understand how the calls and everything that works. I still struggle with that. I didn't find a good fit on top of it. The frame and how the frame basically works with other frames, it seems it's nice but still need to land on the good abstraction on top of it. Initially the calling, the sub calling was behind the basically you call your sub call and stack became the problem so I needed to move all that to the the loop so yeah there are a few of those things the next question is if there are any plans to natively support zkvms like SP1? Because currently you need to do some hacky patches to be able to run them. We should talk with them, basically. EVM framework is an idea that basically would allow and support this. Great. Another question, again related to ZK EVMs. So, REVM is now used on both CPUs and ZKVMs, and these two environments have very different performance profiles. How can you optimize for the one or the other or both? In general everything is Rust compiler so it would compile optimization of Darm basically on Rust compiler so the target is different zkvms use RISC-V while the CPUs has different instruction set that are made more performant in general I didn't do the testing on the on top of it so that would be like good way to check those. I guess a follow-up here is, and probably you haven't checked yourself, and that's more about the ZKVMs, but have you any intuitions that some LLVM optimizations might be better for ZKVMs or may be better to not use some optimizations? Compiler optimization, basically. I'm not sure. I didn't look at it. So what is missing in our EVM to be more performant than EVM1? I have some tasks in my backlog that require basically testing. Every change that happened on the performance side needs to be tested, measured, and validated that assumptions that I made are correct. So I need to look at tail call optimization to check it. I need to look at the stack verification if this is extracted from the instruction to before it starts with the main loop, if this is going to affect the performance. Is it easy to add new precompiles? Yeah, it was one of the use cases that Odyssey used a lot and it was very easy. It's one of the biggest use cases of how you can extend, easiest and biggest ways to extend the REMM. What's the most interesting use case that you have seen so far? ZKVMs was really, really unexpected, to be honest. It just landed, and it was like, hey, it can be used in that way. I didn't expect that. I made it nice to use it in maybe front-end so that JavaScript can be implemented on top of it. Maybe it can be used in Wasm as some project that does that way. But ZKVM was an unexpected surprise. And it was amazing in general. Let's thank again our speakers, Dragan, for the excellent presentation. Thank you.", + "sources_streamethId": "673869701b0f83434dee5eaa", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673869701b0f83434dee5eaa.vtt", + "transcript_text": " . Okay. I think we're good to get started. Hello. Welcome to the workshop for RIP 7755. This is a proposed standard for empowering low-level cross-chain calls with minimal trust assumptions. My name is Jack Chuma. I'm a senior software engineer working in R&D on the base team at Coinbase. And I'm so excited to share with you all a little bit about this project. So a couple of goals for today. First and foremost, I want to promote a deeper understanding of RIP 7755, what exactly it is and how it works. Second, considerations for adding new chain support in the future, as I foresee that being one of the main opportunities for open source contribution. Number three, one of the main features of the standard is that it minimizes trust assumptions, and that's done via a mechanism called what we're calling nested storage proofs. So I'd like to do a deep dive there and promote a deeper understanding. And then lastly, integration details. So if you're an app developer and you would like to be able to facilitate some kind of cross-chain call between L2s and the Ethereum ecosystem, how would that work if you're integrating with the standard? So to give you some context and a high-level purpose on where we're coming from here, I included a screenshot here from L2Beat sometime last week. And what it's showing is activity in Ethereum L2s. And more specifically, how it's surged by over 500% in just the last year alone. And that's being spread out over many different networks. So that's why I chose this screenshot specifically. It shows a handful of networks. This is just a small subset of how many L2s there are already. And that's only just going to continue expanding. And this has been great for scaling Ethereum. But it has caused fragmentation in the ecosystem, where if you're a user and you want to interact with an app that's deployed to a specific chain that you maybe don't have funds on, there are certain hoops that you need to jump through to get funds to the correct location to be able to interact with that application, and that hurts the user experience. It's a critical problem that has to be solved, and so to solve that problem, we believe that there should be a standard for communication between chains that checks the following three boxes. Is public and decentralized in the spirit of Web3, relies solely on validation data that is trustlessly available on-chain, so minimal trust assumptions, and has built-in flexibility to support any arbitrary message. These three bullet points were the three North Stars that we kept in mind as we developed the RIP-7755 proposal, and we'll dive in now. So because Ethereum L2s post some sort of state representation to a shared execution environment, they are uniquely positioned to solve this problem with minimal trust assumptions. And this is done via a mechanism called nested storage proofs, as I previously mentioned. This allows us to prove state about one L2 from another L2, even though they don't have a direct line of communication. To understand storage proofs, I think it makes sense to do a quick refresher on Merkle trees. Of course, this is nothing new, but a required prereq to understand storage proofs and how they work under the hood. So as a quick recap here, a Merkle tree is basically a tree data structure where each node is a hash of its direct descendants. And so if you wanted to represent, like in this diagram here, I don't know if you can see my mouse, yeah. We have these four data blocks, A, B, C, and D. And if you wanted to convert that into a Merkle tree, each one gets hashed, respectively, to create a leaf node for the tree. You group the nodes into groups of two, concatenate them, hash them together. That creates their parent node. And you do that recursively until you reach the root node of the tree. and effectively what you've done is generated a unique identifier for the entire data set that is just a single hash. So this has a couple of interesting properties, one being that if any of the data blocks changes in any way whatsoever, the root hash is going to change completely. And then this also, like this property, allows us to efficiently prove inclusion or exclusion of data within the larger data set. So in this example, if we wanted to prove that data block A, it was in this larger data set of A, B, C, and D, we first would need a verifier to have trustless access to the root hash that is represented in this root node up here. If that's in place, all we'd have to supply to the verifier is data block A, and then this node of hash of B, and then this node hash of HCHD. And that data alone is all the verifier would need to recreate the root hash at the top of the tree. So the verifier would then hash A to create this hash A node, combine hash A with hash B, hash that together to create this hash H hb node and then do that again for this this final level to recreate the root node and if that is equivalent to this the stored root node that the verifier already had then that's a successful proof so how does that apply to ethereum storage basically all of ethereum's state is represented as a modified form of that merkle tree data structure called a merkle Patricia try. And the exact details in terms of the differences between the two data structures are out of scope for this talk, but it's basically a handful of optimizations for Ethereum-specific use cases. All we really need to know or care about for this application is that that Merkle-proof paradigm applies here. So for every block in Ethereum, there's a handful of block headers, one of which being a state root. This is the root hash for a Merkle-Patricia try for all of Ethereum state, where the values in that try are Ethereum accounts. These accounts could be EOAs, so like any off-chain wallet, like Coinbase Wallet or Metamask, or it could be smart contract accounts. These accounts, as they're represented in the try, are represented by an array of four pieces of metadata about the account, and that's what I have listed out here. So you have the account nonce, balance, storage route, and code hash. So for smart contract accounts, it's highly likely that they're managing some type of state. And if they are, that state is stored in that contract storage. And that contract storage is also represented as a Merkle Patricia try under the hood. And you guessed it, the root of that try is the storage root that is stored with the account. So if we have access to a state root for a network, we can supply a path down that state try to a specific account within the network. And then using that account's storage root, which can be extracted from the account's metadata, we can supply another path from the storage root to a specific location in that account's storage. And that's basically the high-level concept of how a storage proof works. So how does that apply to cross-chain messaging, though? Because that's basically the high-level concept of how a storage proof works. So how does that apply to cross-chain messaging, though? Because that's just proving state within a specific network. So to explain that, I have this diagram here. It's very simplified, obviously. But this is meant to represent two roll-ups in the Ethereum ecosystem that are both sharing state with a shared L1. So this L1 at the bottom would be like Ethereum mainnet, and then chain A and chain B are two L2 networks. What this diagram is depicting is bidirectional communication between the two layer 2 chains and the shared layer 1. For this downward arrow direction in both chains, an Ethereum L2 chain wouldn't be an Ethereum L2 chain if it wasn't sharing state with L1 in some context. So that's what this is representing here. It's requiring that both chains are sharing some representation of their state with what I'm calling a roll-up contract on layer one. This could be the state root directly, or it could be some other representation of state, the only requirement being that it has to be verifiably linkable to its state root, at least for the way that RIP 7755 is working thus far. And then in the other direction, we need trustless access to a layer one state representation within the L2 chains as well. So that's what these upward arrows pointing to the beacon roots Oracle contracts are. This is made possible by an improvement proposal that's live today in many networks called EIP-4788, which trustlessly exposes the most recent 8,191 beacon routes for the Ethereum consensus client within the L2 execution environment. It should be noted a beacon route is not the same as execution client state route, but it is verifiably linkable to the L1 execution client state root via a very similar process. So from chain A, if we were trying to prove something about the state of chain B, this diagram represents everything that needs to be true for that to work. So if chain A starts with a trustless access to a beacon root. It can supply like a Merkle Patricia try based proof to verify the L1 execution client state route. And then if we have a verified L1 execution client state route, that exact storage proof process that I just went through applies. So we could then prove anything about the state of layer one from chain A. In this context, the first step would be to go from the state route to like a path to an account within that state try, and the account here would be chain B rollup contract. And then using chain B rollup contract storage route, we can supply another path to a specific location in that contract. What's interesting here is if the value that you're verifying inside of Chain B roll-up contract is itself a state representation for Chain B, you can then recursively follow the same steps again to prove something about Chain B's state try. So then starting from Chain B's state route, you can supply a path to a specific account within Chain B, and then again a path from that account's storage route to a specific storage location. And that effectively allows us to prove verifiably a location and storage in an account on chain B from chain A, even though there's no direct line of communication. So that's cool, but how does that help us with cross-chain calls? This diagram is an overall architecture for how RIP-7755 is set up to work, and that should help answer this question here. So as you can see, we have two chains represented, an origin and a destination, and then we have both on-chain and off-chain components here. Every supported chain is going to have some sort of inbox and outbox contract that we're calling RIP 7755 inbox and outbox, but I'll just stick with inbox and outbox for the rest of the talk. The outbox contract is basically the entry point into the standard. So if a user wants to request a cross-chain call, they submit a request to the outbox contract. If the request settles properly, it'll emit an event that some off-chain actor that we're calling fulfillers should be listening for. And if there's sufficient incentive to respond to that request, then the fulfiller will. So that brings us back to a way to incentivize fulfillers to respond to the request. Another key piece of logic that happens here is the user will also lock some kind of reward bounty for the Fulfiller to respond to the request. If the reward bounty is sufficiently like Incentivized to the fufiller, then it will respond. So the fufiller then assuming it's a sufficient incentive Will submit the requested call to the destination chain over Here. That routes through an RIP 7755 inbox contract which will perform a handful of validation steps mainly confirming that the request is arriving to the correct chain and the correct location at the correct chain at that. There's also this custom like optional validation step called a pre-check contract where this could be absolutely anything as long as it adheres to a a pre-check contract, where this could be absolutely anything, as long as it adheres to a specific pre-check contract interface that the standard requires. And all this is meant to do is allow the user to encode any kind of arbitrary fulfillment condition that should be true in order for the fulfillment to work out. But like I said, again, it's totally optional, so if it's not being used, this step will be skipped. If all the validation steps are sufficiently checked out properly, then the requested calls will be routed from there. So this could be a batch of any low-level arbitrary call that goes to a handful of addresses with encoded call data and any native currency value that may be included. If all of those are successful, the main purpose of this inbox contract is to then store a receipt of successful execution. And this execution receipt gets stored in a deterministic location within this contract storage that is derivable from the origin chain without knowing anything about the state of the destination chain. So that's important, and we'll come back to it in a second. After the call has been successfully submitted, the fulfiller then comes back to the origin chain to say, hey, I did the job. Now can I have my payment? And what's unique about this standard is that the payment will only be released if the fulfiller can cryptographically prove that they did actually submit the request to the destination chain. And that's done via that nested storage proof concept that we just walked through. So from the origin chain, the origin chain would have to be able to verify a specific storage location in the inbox contract on destination chain. and because of the fact that the outbox contract can derive exactly where that location is supposed to be, then the outbox contract has everything it needs to be able to verify the successful fulfillment of the call. So only if this nested storage proof checks out will the outbox contract release the reward to the fulfiller. And that would close the loop on the full process for how RIP-7755 is working. Yeah, so for today's workshop, I have an example project for us to go through together. So if anyone's interested in coding along, there is a starter repository on my GitHub that I can show in a second. If you're not interested in following along, I'll be doing it up here as well, so we can go from there. So I'm going to start by walking through all of the contracts and services that are present in the starter project. This is going to be a self-contained system that is mocking a multi-roll-up ecosystem that will run locally on your machine. So once you clone the repository, you should have everything needed to run the entire app end-to-end locally. So after a brief walkthrough of all the services and how they're working together to make that work, we'll implement and test a nested storage-approved validation contract, as that is where the bulk of the effort is going to have to be applied to add new chain support in the future. I also just think it's really cool. And then once that is working properly, we'll integrate with an off-chain client application. So for this demo, it's just a simple NFT mint application where the NFT owner wants to be able to support users who don't necessarily have funds on the chain that the NFT contract lives on. So if they don't, it would be an RIP 7755 request to send the cross-chain call to still mint the NFT. So once that is all set up, we should be good to run the app end-to-end. Before we jump in, I'll leave you, well, I guess I'll leave this presentation piece with that. It's still very early in the research phase that a lot of these details are subject to change, but we have proven the concept on live networks of this nested storage proof and how that can be used to trustlessly verify cross-chain calls. So this seems to hold a lot of potential. It's something I'm very excited about, something the base team is very excited about. We do have an open source proof of concept repository on the base org github that I fully invite anyone and everyone to contribute to if you have interesting ideas. So with that, we should be good to dive into code, but as a quick gut check, are there any questions before doing so? And there will be time for questions at the end, too, and after this talk. Okay. So I didn't know the best way to share the link, but my GitHub is my name, Jack Chuma, and I have this DevCon 2024 RIP 7755 workshop project. If anyone's interested in coding along, you can clone this repository and follow along with me. I already have it cloned, so we can start with a brief walkthrough here. So once you have the project, what you'll notice is there's two main directories. We have contracts and services. So this is on-chain and off-chain components from that architecture diagram. We can start by going through the contracts. I'll start with the NFT contract because this is very simple. But this is what our demo client is going to be using. The main piece here is this mint function. The only reason I'm covering it is because this is going to be needed to set up the integration when we get there. But there's nothing too interesting happening here. Next up we have a rollups directory and this is used to mock the multi rollup system locally. I didn't want to have to rely on good internet connection for this to work so we got a mock system running locally. This rollup contract is what would be deployed to a mock L1 and this is what will be storing a state representation for the mock L2s. In this context, that state representation is a hash of the L2 block timestamp and the L2 state root. And then on the L2 side of things, we have this beacon Oracle contract, which is meant to mock the EIP-4788 interface to query one of the beacon roots that are being stored in the L2 execution environment. For this example, it's a simplified example, and this is directly storing the L1 execution client state route. So we are cutting out a step of verification going from beacon route to state route, but I think it still gets the message across. And then we have all of our RIP 7755 contracts. So as a brief walk through for what an actual request looks like, we have this RIP 7755 structs file. And this is exactly what a request would look like. So all the fields are we start with a requester. So this is the pretty self-explanatory, the address submitting the request. We have a batch of calls where each call is a low level description of the exact address that you'd like to send the call to, encoded call data, and then any native currency value that should be included with that call. And then we have a specified prover contract. The reason this is here is because there's no standard way for L2 chains to post their state representation to L1. And because of that, the exact implementation details for verifying state about a destination chain will vary depending on what that destination chain is. So right now we have this set up with the proving logic abstracted in two separate contracts. This very likely will change in the near future. If we baked that into the outbox contract, that would require multiple outbox contracts to be deployed to each chain. So it's a trade-off. But for right now, this is set up to be one outbox, one inbox, and then an array of prover contracts deployed to each chain that the user would have to specify which one should be used to verify fulfillment. And this contract is what we're going to be implementing in a few minutes. Then we have destination chain ID, which is pretty self-explanatory. Inbox contract is the address of the RIP 7755 inbox contract on the destination chain. L2 Oracle address, that is the address of the RIP 7755 inbox contract on the destination chain. L2 Oracle address, that is the address of the roll-up contract that would get deployed to L1 for the destination chain. So this is the user specifying where the prover contract should be looking for the state representation for the destination chain when it's verifying that the call was submitted. So the user is specifying the address where that should be located submitted. So the user is specifying the address where that should be located as well as the storage key within that address. And then we have a reward address. This could be an ERC-20 address or as specified by ERC-7528, there's a special address value that can be used to depict native currency. Then there's reward amount, which is the amount of the reward asset that should be locked within the request. It should be noted that reward amount should cover all of the value that's included in these calls, plus whatever the gas cost would be for submitting the call to the destination chain, plus an extra tip for the fulfiller. And that extra tip is what acts as the incentive for the fulfiller to respond to the request. Then we have finality delay seconds. This is the gap after the call is successfully fulfilled on the destination chain. That has to pass before the fulfiller is allowed to claim the reward. This is basically like destination chain reorg protection for the user. The higher this delay is, the more likely the destination Chain won't be reorged and ensure that the call was Actually submitted and will stay submitted. And we have a nonce value for ensuring that every request is Unique as the way we're identifying these requests is by Hashing this entire structure. An expiry field for when the Request should expire. This is relevant for the user being able to reclaim the reward if for whatever reason the call was never submitted to the destination chain. There should be a mechanism for the user to recover those funds, and that's what this expiry timestamp is. If the call's not submitted before the expiry timestamp, then that's when the user can reclaim. And then we have these last two fields for the pre-check contract. So a pre-check contract address and an arbitrary bytes array of encoded data for that pre-check. This is for that optional, like, arbitrary fulfillment condition that should be true. If this is the zero address, that is depicting that we're not going to use it. And we're not going to be doing a pre-check step in today's demo, but I figure it's worth covering that it's there anyways. So the next step, we'll do a quick walkthrough of the inbox and outbox contracts. So the outbox contract being the entry point to the system. We have one main function that we really care about here, request a cross-chain call. That's where the user would submit the request and where the event would be admitted that the fulfillers are listening for then we have a claim reward function which is where the fulfiller comes to claim their reward after successfully fulfilling the request and that calls into the prover contract here on this line and so this contract is again what we're about to implement it is expected to revert if the proof fails so this contract is again what we're about to implement. It's expected to revert if the proof fails. So there's no return value or anything here. And then lastly, the cancel request function. So this is like after the expiry timestamp, if the reward has not been claimed yet, then the user gets to reclaim it. On the destination chain side, we have the inbox contract. The main piece of information we care about here is this fulfillment info struct. So this is that execution receipt that should be created in storage, and that will be the target of the nested storage proof validation. The whole point of the storage proof there is to prove that this struct exists in storage for the specific request. And this is storing the timestamp at which the request was submitted, is to prove that this struct exists in storage for the specific request. And this is storing the timestamp at which the request was submitted, as well as the filler address that should be able to claim the reward back on the source chain. And that gets created during this fulfill function. So we have all our validation steps up here. And then we route the calls here. And if everything's successful, we're left with the created fulfillment infostruct. So this contract is fairly simple. Something a little bit more interesting, we have this state validator library. And if you in the future are working on setting up a new prover contract for a new destination chain that's not currently supported by the proposal, you likely would be utilizing the state validator library. The whole point of this is to abstract a lot of the complexity involved with storage proofs away from the developer. No need to reinvent the wheel every time. So the way that this is set up, there are two main functions that we care about. There's validate state, and then there's validate account storage. Validate state is for if you're starting from the beacon route on L1, and you're trying to verify the L1 execution client state route against that beacon route. That's what validate state would do. And then from there, using the verified state route, it would then prove storage location for an account within that state. Because our example today is not using beacon routes, we don't need to use this function, but I figured I'd briefly cover it. The function we care about is this validate account storage. So this takes in an account, which is a specific account within the network, a state route for the network, and then a handful of these account proof parameters. And using these account proof parameters, which are a specified storage key, an expected storage value, and then an account proof and a storage proof, the account proof can be thought of as the path down the state try from the state root to the specific account we care about, and then the storage proof would be thought of as a path from that account's storage root to the storage location that exists at storage key and should be storing storage value. So it should be noted that all of the values in the state try are keyed by the hash of the Ethereum address of that account. So that's what we're doing here. We're deriving the account key, and then using that, we can do a merkle try.get to return an encoded account. An encoded account is basically an encoded array of the account metadata that I went through in the slides earlier that have the nonce, the balance, the storage route, et cetera. So from that, we can extract the storage route, and then using the storage route, we can verify that storage location using the storage proof that exists in the account proof params struct that I just went through. So at a high level, that's how that's working. There's a directory in here called Provers. This is what we're about to implement, so we'll be coming back to that in a second. Just real quick before we dive into the implementation details there, I want to do just a quick summary of what off-chain services are running here. The demo is the app that is facilitating the NFT mints, so we'll be implementing some details in this directory. But then these other two directories are surrounding services that are needed for the full system to run locally. The sinker is in charge of sharing state representations bidirectionally between the mock L1 and the mock L2s. And then the fulfiller is the off-chain agent that's listening for requests and will validate the request and ensure that the incentive is enough to compensate them for their time. And we'll respond accordingly and submit the request. And then we'll be generating a full nested storage proof that gets validated against the contract we're about to implement. And if that checks out, we'll be able to see the fulfiller claiming its rewards in real time. So with all of that being said, we have enough here to dive into beginning to implement a prover contract. I have a handful of imports just set up in here already just to save the time from typing them out. You'll notice the first import is an iProver interface. So we can start here taking a look. And what it defines is a single function. And this is all the approver contract needs because this is the function that the reward claim function from the outbox contract is going to hit. So we can start by literally just copying this entire thing into our prover contract to initiate the implementation of that function. So I will replace this comment with that function declaration and add empty curly braces here. So, oh, and then we can extend that interface. This is iProver. To take a quick skim through the comments here that are explaining what validate proof should even be doing. It validates storage proofs and verifies fulfillment. Okay, makes sense. It should revert if the storage proof is invalid. Also makes sense. It should revert if fulfillment info is not found at inbox contract storage key on the specified inbox contract. That is kind of interesting here. It should be noted that the storage key, like I was saying before, is derivable from a network that doesn't actually have context of the destination chain. And because of that, this is being done in the outbox contract before this function is hit. So this is not coming from the off-chain fulfiller. We can trust this value. Lastly, it should revert if the fulfillment info timestamp is less than the finality delay seconds amount of time from the current destination chain block timestamp. So this is that destination chain reorg protection that I was mentioning earlier. We need to ensure that the finality delay seconds is not currently still in progress. Then we have a note about that the implementation should vary by destination L2. This is due to the lack of standardization around how L2s post their state representations to L1, like I was saying. And then a quick summary of the input parameters that we have to work with here. So inbox contract storage key, I just kind of mentioned, is the storage location in the inbox contract on the destination chain where we expect the execution receipt to be. Next up is the fulfillment info struct. So this is that exact execution receipt that should be existing at inbox contract storage key on the destination chain's inbox contract. Then we have the initial request that came from the user. And then we have an arbitrarily encoded proof data bytes array. This is because of the fact that, like I was saying before, the lack of standardization around the state posting, there could be subtle differences in the exact data that is needed for the prover to verify that state. So there's no enforced structure to this data at the outbox level. This is being implemented here within the prover. So we'll start to set that up in just a moment. Okay, so with all of that gone through, we have enough here to start to set this up, so we can think about what the steps are for validating a nested storage proof. For starters, we'll want to enforce a structure to proof data, so we can decode this into some defined structs that we'll define in storage up here. So decode proof data. Next with the decoded proof data, we can use some supplied data to trustlessly access the beacon route from L1. So I'll want to query L1 state representation. In a real network, this likely would be, or if the network is supporting EIP 4788, this would be a beacon route. For today's example, it's the state route directly. So I'll just make a brief comment explaining that. In real network, likely beacon root in two days demo state root. Okay, so then step three, once we have a state representation for L1, using L1 state root, we'll verify storage location on L1, USING L1 STATE ROUTE, WE'LL VERIFY STORAGE LOCATION ON L1 AND THAT STORAGE LOCATION WILL BE THE DESTINATION CHAINED ROLLUP CONTRACT. SO VER. STEP FOUR, WE'LL NEED TO VERIFIABLY LINK A DESTINATION CHAIN STATE ROUTE TO THAT, OH, YEAH, LET'S SEE, USING L1 STATE ROUTE VERIFY STORAGE LOCATION, THIS SHOULD BE DST CHAIN STATE REP. WE'LL NEED TO USE THAT VERIFIED VALUE TO LINK A STATE ROUTE FOR We will need to use that verified value to link a state root for the destination chain. So we can do that as a step here. Verify. We link to chain state rep. After that step, we should have a verified state route for the destination chain, so then we can essentially repeat step three again. So this would be using L2 state route. Verify execution receipt in inbox contract on DST chain. And then lastly the only step that we haven't covered is the this revert statement that's saying if finality delay seconds is still in progress this function should be reverting. So we can check that as our final step. Whoops. Step six. Revert if finality delay seconds in progress. Okay. So if we can successfully set up these six steps, then we should be good to go to verify these nested storage proofs. If we take a look up here, you might have noticed this contract is expecting to be deployed with an address in the constructor. This address is for the beacon roots Oracle contract. In a live network, you wouldn't have to do this because theIP 4788 specifies a deterministic precompile address that you could just hard code into your contract storage. But for this to work in both tests and deployed to our local network, I have it being deployed with the address specified here. So as our first step, we can store this as an immutable, and call it beacon Roots oracle. Something like that. Roots. And then assign that within the Constructor. And then if we start to think about how these steps are working, the first step being that we want to decode proof data into some specified structure that we're going to define, we can start by defining a struct, RIP 7755 proof. And the compiler is going to be mad about the struct being empty, but we'll come back to that in a second. We can set up this first step with that in place by decoding the proof data into a local variable that we can call proof that is adhering to this RIP 7755 proof struct. So if we copy this, paste it here, this will be in memory, is equal to ABI.decode proof data, and then pass in the name of the struct as the second argument here. This will decode the proof data bytes into whatever structure we define at the top of the file here. So for step two, we want to then query the L1 state representation. This is going to come from that beacon Oracle contract from the rollups directory that I covered briefly. So if we pull that up to take a look at the storage layout here of how exactly we should query that, this has a fallback function in here, and that's to mimic the interface that would be used to query a beacon root from the real beacon root Oracle contract on a live network. And all this takes is an encoded block timestamp. So that's actually the first piece of data that we need to add into our RIP 7755 proof struct. Is we need to know the L1 block timestamp that we're going to be using for the proof. So we can add that in as a Uint256. Call it L1 timestamp. And then using that, we can set up a static call into the beacon Oracle contract. If you remember, we have the address stored as the immutable variable here. So we can take this, copy that, and under step two, paste that. That'll be, yeah, beacon roots Oracle. You do dot static call, which is like a low-level call, but kind of like a view function where it's expected to not mutate any state in the destination address that you're calling. And then we're passing in the encoded block timestamp for the L1 chain, which we just added to proof, so this should be available via abi.encode with proof.L1 timestamp passed in. Whoops. This static call will return a tuple where the first value is a Boolean, so we can call that bool success. And the second value is bytes array in memory. So bytes, memory, data. With any low-level call in an EVM chain, you need to confirm that success comes back as true. Because if something weird happens with this address or the static call fails for some unexpected reason, and success comes back false, we need to ensure that we revert the transaction here. So for that case, we can add a custom error at the top of the file for if the static call fails. We can call it error. Beacon roots Oracle call failed. Copy that. And then underneath this line, we can do if not success, revert with that custom error. So if we get past this if statement, we have a returned Data bytes array where the data is representing an encoded Version of the state route for l1. After this if statement, we can decode data into a bytes 32 State route. This would be bytes 32 l1 State root. It's equal to another abi.D code. With data Passed in and then the data type is bytes 32. Okay. So that Should do it for step two. So at this point we have an L1 state route that we can then use in a storage proof to verify something about the state of L1. And that's exactly what step three is laying out here. So using L1 state route we want to verify the storage location in the destination chain's roll up contract ON L1. THIS IS GOING TO BE THE STATE REPRESENTATION FOR THE DESTINATION CHAIN. IN ORDER TO GET MORE CONTEXT FOR HOW THAT SHOULD WORK, LET'S TAKE A LOOK AT THE ROLLUP CONTRACT BECAUSE THAT'S WHERE THE DESTINATION CHAIN WILL BE POSTING ITS STATE REPRESENTATION. hosting its state representation. So that will be inside of this rollup contract. What's kind of interesting here is in a lot of live networks, the exact storage location that the state representation is going to exist is not necessarily known at the time of request. In that case, the request just knows the storage slot, maybe where the data structure is located. So like in this example, the mapping storing output routes exist at storage slot one. So in this context, the request specifying the destination chain's storage location for their roll-up contract on L1 would just be specifying storage slot one, and then we'd have to take that and derive the location of the value for the mapping based off of the destination chain's L2 block timestamp. So that brings us to the next piece of data that we need for this proof. It's going to be the block timestamp for the destination chain. So we can add that as another Uint256 instead of the proof. And this can be L2 block timestamp. By the way, for anyone following along, try to stick to the exact names I have here for the fulfiller proof to work properly. The logic by itself should work fine either way, but in order for this to be compatible with the surrounding system, we have to use the correct names. So if we have the L2 block timestamp here, then we have enough to derive the storage location of the output route associated with that block timestamp inside of the roll-up contract. So how do we do that? If we take a look down here, so for step three, we're going to Be using the l1 state route. This is the first example of an Actual storage proof that we're going to use. This is where we would maybe want to take a look at that State validator library again because this has a bunch of Utility functions for facilitating storage proofs Directly. Nam namely being this account proof parameter struct. This is going to come in handy. And then that second function I mentioned for validating account storage. So we'll actually be using this function here. This takes in an account, a state root, and then proof parameters that should be provided by the fulfiller. So we can start to set that up now, starting with state validator. And then dot validate account storage. My autocomplete added the names of the variables here. So for the account, what's the account here? This is the roll-up contract for the destination chain. And if you remember from the structs file that I walked through at the beginning, one of the fields that is specified by the requester is this L2 Oracle address. And this is exactly the account that we care about for this first storage proof. So we can pass that in here. This is going to come from request, which is being passed in. So this will be request.L2 Oracle for the account. The state route is the L1 state route that we just decoded here. So we can use this for state route. And then the account proof parameters, this is this account proof parameters struct inside of the state validator. This is going to be supplied by the off-chain fulfiller. So this is the next piece of information we need in our proof struct. So if we copy this, add it as a third argument in the proof struct or third field. Oops. This will be state validator dot account proof parameters. And we're going to call this one DSTL2 state root proof ramps. So then we can copy that variable name. And this will be the last argument passed into our validate account storage function. So this will be proof dot and then that copied field name. If you take a look at the validate account storage function here, you can see that it returns a Boolean value. So we need to ensure that the Boolean value returned is true. So we can capture that in a local variable here. We can call it bool is valid L1 state is equal to the state validator. Validate account storage. And then for when it's not a valid L1 state, we can define a custom error up here. This will be error invalid L1 state. Copy that. And then if not is valid L1 state, we'll revert with that custom error. And there we go. All right. So what we have happening here is after this step, we should have a verified storage location in L1. To jog your memory on the params that are being passed in by the fulfiller, there is something kind of fishy happening with the way we currently have it set up. These account proof parameters specify an exact storage location and expected value. So if this checks out and returns true, it means we've successfully validated that location. But what if that location's the wrong location? Like what if it's not the state representation for the destination chain? That would be a problem. So this is where we'd want to override the storage key. We could either derive what it should be and confirm that it's equivalent, or we could override it. For this example, we'll override it, but maybe there's a subtle gas optimization one way over the other. But for starters, we'll want to create a helper function for deriving what the L1 storage key should be. So we can create that down here as a private helper function. Derive L1 storage key. This will be a private peer returns bytes memory. Because as you see up here the storage key is a byte string not a bytes 32 or anything. So yeah. So this is the storage key of where the state representation should be in the roll-up contract on L1. So we can take a look at this roll-up contract again to see how that storage layout is set up. We have a mapping located at the first storage slot, which likely is going to be the inside of the structs file. This likely is going to be the L2 Oracle storage key. So I would expect this to be the bytes 32 representation of the number 1. And then we can take that and hash that with the L2 block timestamp, which under the hood is what Solidity is doing to generate the storage location for the value that should be keyed by the block time stamp. That's exactly what we can recreate here. In order to do that, we need a couple of pieces of information Here. One of them is the l2 oracle Storage key which comes from the request. We can pass in the request here. Copy this whole thing. Pass that in. And then the other piece of information is the L2 block time stamp, which if you'll remember we added to the proof struct up here. So we can pass that in as well. So if we copy this declaration, we can paste that here as a second input argument. And then, so what does this return? So this is going to return a derived value for the storage key where the L2's output route should exist. That is going to use, so we'll return an ABI.encode pact. With the block timestamp passed in. So this is going to be proof.L2 block timestamp. And then the storage key location. So then it's going to be request.L2 Oracle storage key. So this concatenates them together into a single bytes array. We now need to hash this to generate the storage key. This concatenates them together into a single bytes array. We need to hash this to generate the storage key. Wrap that whole thing in a catch act 256 hash function. And then one final step here because catch act 256 returns a bytes 32 and we need this to return bytes memory. We have to wrap this in one more abi. Encode. And that should be all that we need to derive the storage Location for the l1 storage key. Now we can close these and use This function to overwrite the storage key that gets passed Into this validation step. So this will be proof.dst L2 state root proof params dot storage key is equal to derive L1 storage key where we pass in the two input parameters of request and proof. Okay, sweet. So at this point we now have a verified value in the rollup contract for the destination chain on L1. We confirmed that it is in the correct location so we can trust that it's the proper state representation. And then we can move on from there. So the next step here is to verifiably link the destination chain state route to the destination chain state representation. If the destination chain is directly posting their state route, this is unnecessary. We just need to make sure that we have a verified state route here. So in this example, because we're not directly posting that, it's a hash of block timestamp and state root. We need to recreate what this output root should be. So we can re-derive that with bytes 32 for a local variable. This will be derived L2 output Root is equal to cat check 256. And what gets passed in here? We're basically just recreating this line over here in the roll up contract. So this is going to be an ABI.encode pact. And inside of the ABI.encode pact, we want to pass in the block time stamp and a state route. But at this point, we don't have a state route to use. So that would be the perfect, now is the perfect time to add that as the next field in our RIP 7755 proof struct. We would expect the fulfiller in this case to supply what the destination chain's state root is, and then we can use that to re-derive the state representation that was verified using the first storage proof, and if they're equivalent, then we can trust the passed in L2 state root. So this will be a bytes 32. L2 state root. And we need to pass these Arguments in in the same order they're passed in over here. So we'd start with the L2 block time stamp. This will be proof.L2 block time stamp. And then we want to pass in the state root. So proof.L2 state root. Okay. So now at this point we have a re-derived output root for the destination chain. In the case that this doesn't equal the value we just verified, we'll create another custom error for that to revert in that case. So we can call that an error invalid L2 state root. Copy that. And then we need to compare this to the storage value that we verified in this step. So that'll be if derived L2 output root does not equal proof.dst L2 state root proof params.storage value. Then we revert with that custom error. And this is yelling at me because the storage value is a bytes string and this is a bytes 32. Because we would be expecting them to be equivalent we can wrap this in a bytes 32 for the type safe properties of solidity. One other thing I'm noticing here, this is kind of just like a personal preference for me, but the state validator library up here, because we're using it on a specific account, for solidity purposes, we can actually bind that library to the account to just improve the legibility of this line, make it a little bit more succinct. So I'm going to do that, but that's totally just a personal preference. So we would add a line at the top of the prover contract that says using state validator for address. And then what that allows us to do is to copy this request.L2 oracle and remove it from the function call and then paste it here instead of state validator. And then this is doing the same thing, but it's just a little bit shorter. OK. So at this point, we have a verified destination chain state route. Now we can use that to basically redo step three, but now the account we care about is the inbox contract on the destination chain. So what that's going to look like is a bool is valid L2 state is equal to, and then to jog your memory again on the structure of a request, we actually have the inbox contract being defined by the user when they submit the request. So this address is going to be the address that we're verifying state against. This will be request.inboxcontract. Verify, what's it called again? Validate account storage. And in here we need to pass in a state root and another instance of the proof params. So the state root here is passed in from the proof struct and at this point we've verified that this can be trusted. So we'll use that value. So this will be proof.L2 state root. And then we need to add another instance of those proof params This time for the inbox contract on the destination chain. So we can duplicate this line and instead of calling it dst L2 state proof params we can call it dst l2 account proof Params. And copy that field name down Here for the second storage proof. We can pass that in as the second argument. So this looks like proof dot that destination account proof params field. And then for the case again where is valid L2 state comes back false, we need a custom error to throw as a reversion in that case. So we can define that up here. Error invalid L2 state. Copy that. And this will be so if not is valid L2 state, revert with that custom error. Cool. And then so if you remember from the first storage proof, we had an issue trusting the storage key that gets passed in from the fulfiller. You can assume that we have the same issue in the second storage proof, and we do. Luckily, it's a little bit easier to solve on this side of things because, like I said at the beginning of the walkthrough here, the outbox contract rederives where that storage key location should be already, and that gets passed in. So we can just reassign the storage key value with this passed in inbox contract storage key. We'll do that above step five. So this will be proof.dstl2 account proof params this time dot storage key is equal to inbox contract storage key. And what that's going to do is ensure that we're verifying against the correct storage location where the execution receipt should exist on the inbox contract on the destination chain. So at this point, we have a fully verifiable proof. The last step is we just have to confirm that finality delay seconds is not still in progress. We have the receipt being passed in here, so we can use that for this check. And for when it is still in progress, we can add a custom error at the top. This will be our last error. So this will be error, finality, delay seconds in progress. Copy that. And then down here at the bottom of this function, if fulfillment info dot timestamp plus request dot finality delay seconds is greater than, if you'll remember for the proof, we defined the L2 block timestamp up here. This is exactly what we need to use for this timestamp-based protection. So we can do proof.L2 block timestamp. If the timestamp at which it was submitted plus the configured finality delay seconds is greater than the time that we're using for this proof, then we can't accept this proof because finality delay seconds is still in progress. So that's what this line is doing. So we revert with that new custom error, finality delay seconds in progress. And then cool. That's all of our steps. But there is one final piece of data connection that we're forgetting here. And that is stemming because of, well, we confirmed a specific storage location in the inbox contract on the destination chain. And then we confirmed a passed in receipt satisfies that finality delay seconds requirement, but we did not confirm that that receipt that we're using for this final check is the same value as the one that we just verified. So we need to make sure that the storage value for the second storage proof is equal to the encoded execution receipt that we're using for this timestamp check in the last step. And so that represents the final piece that we still have to set up to secure this thing. We can set up the encoding of the fulfillment InfoStruct as a separate private helper function. So this will be, this will be, what do we want to call that? Encode. Fulfillment info. Okay. So for encoding fulfillment info, we need to pass in the struct that we're using for the validation up here. And inside of this, it's a simple ABI.encode packed. So return ABI.encode packed. With fulfillment info.filler and then fulfill info.timestamp passed in. This is because of the struct packing rules and solidity storage we have to custom encode this struct here. If I show you the definition of the struct in the inbox contract again, we see that it has two fields, timestamp and filler. The timestamp is a Uint 96 and the filler is an address, so this can get packed into a single Uint256 slot. But the way that it gets packed or the ordering that it gets packed is in the order of the defined fields, and it packs them into the lowest value bits first. So the timestamp actually ends up on the right side of the storage slot, and then the filler is on the left. So in order to recreate that alignment, we have to use an ABI.encode pact here where we pass in the fields in reverse order instead of just doing ABI.encode and passing in the entire struct. We would have it in reverse order in that case. So using this function now, we can override the storage value for this final storage proof. So this will be proof.dstl2accountproofparams.storagevalue is equal to encode fulfillment info. And we pass in fulfillment info. Excellent. So that should be our first pass at a full implementation for validating one of these nested storage proofs. We can now see if this is compiling. So if we CD into the contracts directory, can run a forge, well I like to format it properly first. And then we can do a forge build to check if it's compiling. It is. So now to check this, I have a test file in here with mock data from a working system. It's commented out just to prevent compiler issues with the initial structure of the project. We can uncomment this and run a forge test to make sure we did implement that properly. It will recompile this test file and then okay, cool, it's passing so this is using a previously generated proof um that adheres to the structure that we just set up in that rip 7755 proof struct and um it uses is being proven against a specific storage route or a specific state route that is being assigned here and this commit beacon route function in the beacon Oracle contract. So because this is passing, it's a good sign that our implementation is working properly. So that wraps up the on-chain implementation piece. At this point, we should be good to take a step into the off-chain world and look at the integration for this NFT minting site. So I'll close the contracts directory for now, although we'll be back here shortly to reference different function signatures. Inside of the services directory, we care about this demo directory. So if we take a look in here, a brief walkthrough, it's literally just like a back end server script to run. It's going to just tell you what it's doing. It will display your current NFT balance for the NFT contract that's going to get deployed. We're calling it deployed on a mock arbitrum chain and then the user is going to be minting it from mock base. So it will be displaying your NFT balance on mock arbitrum, your current ETH balance on mock base, what the price is of the NFT, which I think I have hard coded as one ETH in the deployment script. This is using local anvil nodes so using easy numbers is pretty easy. And then lastly, it'll prompt you for like once you hit enter, it'll trigger minting the NFT, and then we can watch the fulfiller in action generating its proof after the fact, which gets verified against that prover contract we just set up. There's not too much for us to change here. This is just, I just wanted to give you a rundown of what exactly it is that's happening. What we care about is inside of the SRC directory, there's a client file called clients.service. This is all in TypeScript, if anyone's familiar. There's a function called rip7755mint. That is completely empty. So this is what the developer needs here. We have to set this up to be an RIP 7755 request to be able to integrate with the cross-chain call and close the loop for this minting process. So we can start here by setting up what this request is going to look like. To do that, it would help to have the structs file up here as a reference. I'll open that up. Because actually before we dive into that, as a refresher in the outbox contract, the function that we care about as the entry point is this request cross-chain call, which accepts one input argument and it's a cross chain request. So really what we need to build here is a cross chain request with all the correct fields. So that's what I want to show you guys. So to start with this implementation, we can get an outline of what this request is going to look like as well as the low-level call that we need it to do to facilitate. So we can start with const calls. It's a batch, but it's just one call, but it still needs to be set up as an array because it's meant to be able to support a batch of calls. And then we'll have a const request, which will be an empty object. We can start by just getting the field names in here. I'm just going to assign them all as empty values to start, just to motor through this. And then we can go through each one individually to make sure that we're setting it up properly and explain it all as we go. So we need to add prover contracts. And again, I'm just copying everything to try to prevent some silly typo mistake. So just bear with me for a second. Pass in the inbox contract. L2 Oracle. L2 Oracle storage key, reward asset, reward amount, initialize at zero, finality delay seconds, initialize at zero, come back through this in a second, nonce is zero, expiry zero, and then the final two pre-check related fields, pre-check contract and pre-checked fields, pre-checked contract and pre-checked data. And then for the individual call that we want to set up here, this just takes in three parameters. There's two, data and value. So we can take two, empty string,, and then value. Okay, so we have our structure outlined here. Now we need to make sure we set up each of these fields properly, and it helps to have an understanding of exactly what each field is, which we now all should. In order to help with this, I have inside of this common directory, there's a constants file that has a couple of chain configs in here, one for mock arbitrum and one for mock base. So this is going to be very helpful for where we can pull the addresses from. Assuming everyone deploys the contracts using the deployment scripts that are provided, these addresses are, I mean, they're deterministic, so if they're deployed in the right order, then these addresses should all be correct, which is why they're pre-populated in this file, even though the contracts are not currently deployed on our local network. So, okay, let's start filling these out. For calls, the destination address here, again, what exactly is it that we're trying to do? We want to mint an NFT. So the to address is going to be the NFT address, which we have up here as mock arbitrum NFT address. So at the top of this client service file, we can import that. So let's see. Import mock arbitrum NFT address from dot dot slash common slash constants. And then we can copy that into our to field. Data is going to be the encoded call data for the mint function. I'm going to leave that for the last step. We'll come back to that in a second. Value, as you see here, this function is being called with an address. This is going to be the user address receiving the NFT. And then the mint price, which is the mint price of the NFT denominated in way. So we can just pass in this mint price as the value directly. Yeah, so this will be mint price. And then now as we start to fill out the request, the requester is pretty self-explanatory. Like I said, the address of the requester is being passed in, so we can use that. Calls is going to be this calls array. Prover contract is what we just implemented. This will be the contract address on the source chain, which in this context is mock base. Of the prover contract that has been implemented specifically to validate request or validate state about the destination chain which in this context is mock arbitrum. If we look over here, there's only one defined in each but this is set up in a way that we can define multiple prover contracts for each chain. This is pretty straight forward. We'll start with, well, first we have to import the chain configs here. So if we go back up to the top of the file, we can import that from the same file that we just imported the nft address. And then down here in our mint function, we can destructure the mock arbitrum and mock base configs from that outer object that's containing the chain configs. So this would look like const curly braces with mock arbitrum and mock base is equal to chain configs. And then now we can access all of these config fields directly on mock arbitrum and mock base. So for the prover contract, because this is the base side of the equation here, this will be mock base dot prover contracts dot prover. The destination chain ID is going to be mock arbitrum's chain ID, so we can go mock arbitrum's chain ID. So we can go mock arbitrum.chain ID. Inbox contract is going to be the address of the RIP 7755 inbox contract on mock arbitrum. So this will be mock arbitrum.contracts.inbox. L2 Oracle is the roll-up contract on L1 for mock arbitrum here. So this will be mock arbitrum.L2 Oracle is the rollup contract on L1 for mock arbitrum here. So this will be mock arbitrum.L2 Oracle. Same thing with the storage key or the L2 Oracle storage key. This is for the L2 Oracle contract for mock arbitrum. So we'll grab that from the same place. It will be mock arbitrum.l2 oracle storage key. And then for a reward asset, for this example, we're just going to use native currency because we're sending native currency for the mint function, but it doesn't necessarily have to be one-to-one like that. We could be passing in some ERC20 and then expect the fulfillers to do the necessary conversions off-chain to make sure that the ERC20 is enough value to account for the native currency that's being passed in here, but it's a little bit cleaner for the demo here to just keep it one-to-one like that. So that's what we're going to do. The ERC that I mentioned earlier that had a specific asset that represents native currency is hard-coded here. So that's what this like OXE address is. And this is another exported constant from this constants file. So we can copy that and add it to the import statement. And then pass that in for reward address. Reward amount, again, to remind you, is meant to cover all of the value from the calls plus whatever the destination chain gas cost is plus a tip for the fulfiller. The exact mechanism to calculate what that surplus should be can be pretty complicated. But for the sake of today's demo, doing it in a simplified context, if we add an extra 2% to the request, that should be more than enough for it to be profitable for the fulfiller in our local network. So we'll do it that way. So this will be reward amount is going to be the mint price, which is already in way, to remind you. We'll add a 2% buffer. So we can do that with big ints in TypeScript by multiplying it by something like 102n for a big int and then dividing it by 100. This results in the mint price having 2% added to it. All right. So then for finality delay seconds, in a live network, this will likely be something on the order of days to a week to ensure protection against reorgs on the destination chain. Because this is a self-contained system that is going to run locally on your machine, we have the flexibility and the freedom to make this really short. And because I want this to be a responsive demo and for us to see everything happening in real time, we'll make it just 10 seconds. So what this is going to result in is like a 10 to 15 second delay after the request is submitted before we'll see the fulfiller actually generate the proof and claim the reward. NONs doesn't matter because that's going to get overwritten in the outbox contract. It's just like a canonically incrementing NONs for every request. Expiry doesn't really matter too much for this demo. If you see over here in the constants file, we have a one week constant. We can just add one week to the current block timestamp. We just need like whatever this value is, it has to be greater than the finality delay seconds in the future from like now, plus some extra cancellation buffer period, which is hard-coded as a constant in the outbox contract storage as a full day. So for the demo, just to get a valid value in here, one week should be more than enough. So this will be date.now. So in JavaScript, if you're not familiar, it has a global date object that if you do date.now, will return the Unix time stamp but in milliseconds and in solidity it's in seconds so we have to divide that by a thousand but solidity doesn't like decimals so then we have to floor that to the nearest integer and then from here we can add the one week constant so we'll add that as another import from this constant file and then use it down here for the expiry time stamp. We'll add one week. Okay. And then the last two fields are this pre-check contract and pre-check data. Which like I said earlier is an optional like fulfillment condition that the requester wants to be true. Because it's optional we're not going to be using it today. But yeah, so in order to not use it we have to pass in the zero address and there's a helpful constant from the web pre library I'm using theme that is just called the zero address. So we can import that at the top and then use that as the pre-check contract address in here. And then last but not least, pre-check data. In order for it to pass the off-chain validation that Veeam does, we just have to add an OX. But this can be anything. It just has to be some kind of arbitrary byte string. Because we're not using this step, it doesn't really matter. Okay. So that just about covers all of the requests. The only thing we haven't done yet is encoded the call data for the mint function on the nft contract. So to reference the nft contract, let me pull that up. It's just a simple mint function that takes one input argument, which is the two addresses that should be receiving the nft. There's another helpful function from Veeam called encode function data. That we can use for encoding the call data here. We can define this as another local variable above the calls constant. So this will be const encoded call data is equal to encode function data. And this takes in, actually before we define that, we'll just add this in as the value for data, encoded call data. And then now for encode function data, this accepts one input parameter, which is an object with a couple of fields that are necessary here. The first one being the ABI for this NFT contract, which is actually already being imported as NFT ABI for some of the other helper functions that are defined in this class. So we can just use that. So this will be ABI colon NFT ABI. I don't know why I closed that contract. We still need it. Next up, we need to define the function name that we're encoding data for. So that's just simply mint. So if we copy that. This field name is function name. With mint passed in. And then because mint accepts one input parameter, we now have to specify that with a field name called args. So this will be args, which is an array of the input parameters. And in this context, it's just the two address, which is passed in here as address. And that should be it for encoding call data. So at this point, we have a fully set up RIP 7755 request. Now we need to set up the function call to submit the request. So what does that look like? To give you a little bit more context on how this class is set up, we have something called a wallet client as a local variable here. And if you're unfamiliar with Veeam as a Web3 library, it uses things called public clients and wallet clients. The public client is for reading state from the chain, and wallet client is for writing state to the chain. This wallet client is already defined in the constructor, so we can just use it as is for the target chain that we are submitting the request to, which is mock base here. So this is going to be an asynchronous call, so we'll start it with a wait. This.walletClient. And the function name that we care about here is writeContract. So under the hood, this, this creates your, um, your, your, your transaction and signs it and sends it to the network. This will return a transaction hash if it's successful. So we can store that as hash. And then we need to set up the configuration for like what contract are writing to, what function we're writing to, what parameters are needed. So that can be a single object in here where we define for reference I'm going to pull up the constants file again. We need to define the address that we're sending the transaction to. So address is going to be coming from mock base because we're submitting the request to mock base. The contract we care about is the outbox contract for the standard. So this will be mock base.contracts.outbox. Next we need to specify the ABI for the outbox. So this can be, we're going to have to import this at the top. So this will be import outbox ABI from dot dot slash ABI slash outbox. We already have that populated in this directory. So we define that as the ABI here. We then need to define the function name that we're requesting or sending the transaction to. So if we pull up the RIP 7755 outbox contract over here, the entry point, as I said earlier, for the standard is this request cross chain call function. So we can copy that into function name. We then need to define the input args. So as you see here, it's just one argument, which is the cross chain request. This is going to be the request that we just defined here. And then last but not least, we need to define any value that should be submitted with this transaction. So that's going to be a field called value. And in this context, it's not actually going to be mint price because if you remember, we added a 2% buffer to the mint price as the reward amount for this cross-chain call. So the value here should be request. setting up the contract send. The last piece here is using a public client from Veeam, we can wait for a transaction receipt to make sure that the request is confirmed or the transaction is confirmed. So we can do that with another await call. In the constants file for the chain configurations, one of the fields for mock base is a public client, so we can use that directly. So this will be await mock base dot public client dot the function name that we're going to use here is called wait for transaction receipt. And that takes in just one input object, which is the transaction hash. And in JavaScript, if the field name and the value name that you're setting it as are the same name, you can omit the colon and the value. So like this is the same thing as this. So I'll leave it like this for clarity. And if we get through this wait for transaction receipt call, then the transaction should be successful. So then we'll log something, console.log, transaction success. Okay. So that should be our full request. It's pretty straightforward. I just wanted to kind of give you a walk through on all the different field names and how they should be assigned and what the values mean and how they're fitting together. This should be enough in place to run the system end to end. So if we take a look in the root of the directory, the root readme here, there's a list of commands for spinning up all of these, all the infra here to deploy the contracts on top of and all of the services that are needed to get everything to work together. So we can start by running these commands. The first thing we need to do is spin up our local chains. So if we open new terminals that are each at the root of the project, we can open three in split screen side by side. And this is going to be meant to represent the mock L1 and then the two mock L2 chains. So we can start that with, there's a make file defining all these commands. If you want to see what they're actually doing under the hood, we can run make start mock L1 in the first terminal. So this is like our mock L1 chain. We can then run make start mock base. So we have our mock base chain in the middle. And then lastly, we can do make start mock arbitrum to run our mock arbitrum chain in the terminal on the right here. So now with these three chains running, we have our local chains running, so we can deploy our contracts on top of them. There's another make file command set up for deploying the contracts in the correct order that is required by the off-chain services here. So we can open a new terminal. The three block chains are still running in the background here. I just have a new terminal running on top of them. We can run make setup contracts, and that's going to compile and deploy all of the contracts to the local network. So that will just take a second. While that's going, we can copy the make start sinker command. So what this is going to do is start the off chain service for sharing state representations bidirectionally between the mock L1 and the mock L2s. And this one's pretty chatty. So we can now open a new terminal for the final two terminals that are needed here. It's a ton of terminals, I know. We need to start the off chain fulfiller to listen to in response to requests. So that's makechain fulfiller to listen to in response to requests. So that's make start fulfiller. So now we have the fulfiller listening. We can do a split screen here to now run the demo. So to run the demo app and mint the NFT, we would just run make demo. So if we type that in. Not too pretty, but logs to the console, what it's doing here, current state. So we have welcome to the demo, mint your NFT, the devcon NFT on mock arbitrum. We currently have zero in the wallet address that's being used for the demo here. The price is one ETH and the current base balance is almost 10,000 ETH because this is one of the default accounts on the local Anvil nodes. So now if we press enter, this is going to send that mint request to the local network and we can see if everything worked successfully. And it did. So the transaction went through. We actually, if we run this again, we already have the current NFT on the destination chain because the filler, if you saw over here, picked up on the request almost right away and submitted it because it validated that the incentive was sufficient. And then now in a second, we should see something else happen because the finality delay of 10 seconds has gone past. And what the fulfiller just did is it picked up on the fact that it waited long enough and it's now allowed to claim the reward for that request. And it generated this massive storage proof here and submitted that storage proof to the outbox contract on the mock base chain and everything was successful. So this was validated against the prover contract that we just implemented. And so if you take a look in the Fulfiller directory here, after that ran, it logged the proof into a JSON file. So you can see exactly what the proof was that it used to verify that the call was submitted. So that's what this file is. And then inside of the SRC directory, there's a database directory that is storing db.json, which this is the representation of the request that it picked up on that we just submitted. And then it has a rewards file that it's tracking how much ETH that it's claimed in rewards. So we have a locally running, like fully working system end to end. Woo. Woo. Yeah. And I'm realizing right now that the reward tracking is not accounting for gas cost on the destination chain, but nonetheless, you get the idea. So that just about does it. If you take a look here, yeah, like I said, if we run the demo again, we now see that the current NFT balance is one because the NFT was actually minted on the destination chain as defined by the encoded call data that we set up for the target calls. And then so if we run it again, now it would be just incrementing from there. So that does it for the demo. Thank you, everyone, for coming. And I'll be hanging around for a little bit if anyone has Questions or if you want to chat the standard a little bit more. Like I said, we have an open source repo for proof of concept Here, so if anyone feels compelled to contribute, we Fully invite you to contribute. So thank you. Thank you.", "eventId": "devcon-7", - "slot_start": 1731558600000, - "slot_end": 1731560400000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1Eqr32OyHNOUkt06oQXAiVNTwZse9uMoY_tw7Ag2SkQs", - "resources_slides": null, + "slot_start": 1731645000000, + "slot_end": 1731652200000, + "slot_roomId": "classroom-e", + "resources_presentation": "https://docs.google.com/presentation/d/1R-pN3is6_qjmy7k7gl3hHECFG1O_ZDuH33K5B6JQmGc", + "resources_slides": "https://drive.google.com/file/d/1X8YPz2wz4f6cXPeTKfzi0PXsw2DkxHd4/view", "speakers": [ - "dragan-rakita" + "jack-chuma" ] }, "vector": [ @@ -637815,10 +636005,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -638581,6 +636771,26 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 2, 0, 0, @@ -638624,7 +636834,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -638671,7 +636880,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -638706,65 +636914,42 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, 0, 0, 0, @@ -639147,40 +637332,48 @@ }, { "session": { - "id": "rip-7755-empowering-cross-chain-interactions", - "sourceId": "787TJ7", - "title": "RIP-7755: Empowering Cross-Chain Interactions", - "description": "Cross-chain interactions are becoming essential as Ethereum Layer 2 solutions multiply. RIP-7755 changes the game by trustlessly bridging the gap between L2 chains, allowing new use cases that rely solely on Ethereum and its rollups. In this workshop, we’ll explore RIP-7755 by building a cross-chain NFT minting app, focusing on nested storage proof implementation details to eliminate trust assumptions.", - "track": "Layer 2", + "id": "rlnv2-enhanced-spam-protection-for-all-peer-to-peer-networks", + "sourceId": "ZFJXFP", + "title": "RLNv2: enhanced spam protection for all peer-to-peer networks", + "description": "RLN is a protocol designed to prevent DoS attacks in a privacy-preserving manner. It uses zero-knowledge proof to limit the number of actions a user can take. In a p2p network, it can be used to limit messages sent over a period of time by one sender. RLN’s latest upgrade limits to N (instead of 1) messages per epoch. Also, the Merkle tree is now built on-chain, greatly improving the UX.\r\n\r\nCome learn how to use an implementation of RLNv2 to DoS protect a peer-to-peer network.", + "track": "Cypherpunk & Privacy", "type": "Workshop", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Cross-L2", - "Rollups" + "Privacy", + "Censorship Resistance", + "Decentralization", + "Zero-Knowledge", + "network", + "peer-to-peer", + "Censorship Resistance", + "Decentralization", + "Privacy", + "Zero-Knowledge" ], "keywords": [ - "Interop" + "Anonymity", + "peer-to-peer networks" ], - "duration": 5524, + "duration": 3144, "language": "en", - "sources_swarmHash": "f335f509aad994029fa3bd29d0c69456d45499bee29aea62b1cd0877fa13e0c3", - "sources_youtubeId": "yw-lgjdg7FY", + "sources_swarmHash": "6ea0528ba8f1725dea3e57b64456bbc3b2119584f9f8c6c02f8558bd98ae88e5", + "sources_youtubeId": "EH6zUu6AzlQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673869701b0f83434dee5eaa", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673869701b0f83434dee5eaa.vtt", - "transcript_text": " . Okay. I think we're good to get started. Hello. Welcome to the workshop for RIP 7755. This is a proposed standard for empowering low-level cross-chain calls with minimal trust assumptions. My name is Jack Chuma. I'm a senior software engineer working in R&D on the base team at Coinbase. And I'm so excited to share with you all a little bit about this project. So a couple of goals for today. First and foremost, I want to promote a deeper understanding of RIP 7755, what exactly it is and how it works. Second, considerations for adding new chain support in the future, as I foresee that being one of the main opportunities for open source contribution. Number three, one of the main features of the standard is that it minimizes trust assumptions, and that's done via a mechanism called what we're calling nested storage proofs. So I'd like to do a deep dive there and promote a deeper understanding. And then lastly, integration details. So if you're an app developer and you would like to be able to facilitate some kind of cross-chain call between L2s and the Ethereum ecosystem, how would that work if you're integrating with the standard? So to give you some context and a high-level purpose on where we're coming from here, I included a screenshot here from L2Beat sometime last week. And what it's showing is activity in Ethereum L2s. And more specifically, how it's surged by over 500% in just the last year alone. And that's being spread out over many different networks. So that's why I chose this screenshot specifically. It shows a handful of networks. This is just a small subset of how many L2s there are already. And that's only just going to continue expanding. And this has been great for scaling Ethereum. But it has caused fragmentation in the ecosystem, where if you're a user and you want to interact with an app that's deployed to a specific chain that you maybe don't have funds on, there are certain hoops that you need to jump through to get funds to the correct location to be able to interact with that application, and that hurts the user experience. It's a critical problem that has to be solved, and so to solve that problem, we believe that there should be a standard for communication between chains that checks the following three boxes. Is public and decentralized in the spirit of Web3, relies solely on validation data that is trustlessly available on-chain, so minimal trust assumptions, and has built-in flexibility to support any arbitrary message. These three bullet points were the three North Stars that we kept in mind as we developed the RIP-7755 proposal, and we'll dive in now. So because Ethereum L2s post some sort of state representation to a shared execution environment, they are uniquely positioned to solve this problem with minimal trust assumptions. And this is done via a mechanism called nested storage proofs, as I previously mentioned. This allows us to prove state about one L2 from another L2, even though they don't have a direct line of communication. To understand storage proofs, I think it makes sense to do a quick refresher on Merkle trees. Of course, this is nothing new, but a required prereq to understand storage proofs and how they work under the hood. So as a quick recap here, a Merkle tree is basically a tree data structure where each node is a hash of its direct descendants. And so if you wanted to represent, like in this diagram here, I don't know if you can see my mouse, yeah. We have these four data blocks, A, B, C, and D. And if you wanted to convert that into a Merkle tree, each one gets hashed, respectively, to create a leaf node for the tree. You group the nodes into groups of two, concatenate them, hash them together. That creates their parent node. And you do that recursively until you reach the root node of the tree. and effectively what you've done is generated a unique identifier for the entire data set that is just a single hash. So this has a couple of interesting properties, one being that if any of the data blocks changes in any way whatsoever, the root hash is going to change completely. And then this also, like this property, allows us to efficiently prove inclusion or exclusion of data within the larger data set. So in this example, if we wanted to prove that data block A, it was in this larger data set of A, B, C, and D, we first would need a verifier to have trustless access to the root hash that is represented in this root node up here. If that's in place, all we'd have to supply to the verifier is data block A, and then this node of hash of B, and then this node hash of HCHD. And that data alone is all the verifier would need to recreate the root hash at the top of the tree. So the verifier would then hash A to create this hash A node, combine hash A with hash B, hash that together to create this hash H hb node and then do that again for this this final level to recreate the root node and if that is equivalent to this the stored root node that the verifier already had then that's a successful proof so how does that apply to ethereum storage basically all of ethereum's state is represented as a modified form of that merkle tree data structure called a merkle Patricia try. And the exact details in terms of the differences between the two data structures are out of scope for this talk, but it's basically a handful of optimizations for Ethereum-specific use cases. All we really need to know or care about for this application is that that Merkle-proof paradigm applies here. So for every block in Ethereum, there's a handful of block headers, one of which being a state root. This is the root hash for a Merkle-Patricia try for all of Ethereum state, where the values in that try are Ethereum accounts. These accounts could be EOAs, so like any off-chain wallet, like Coinbase Wallet or Metamask, or it could be smart contract accounts. These accounts, as they're represented in the try, are represented by an array of four pieces of metadata about the account, and that's what I have listed out here. So you have the account nonce, balance, storage route, and code hash. So for smart contract accounts, it's highly likely that they're managing some type of state. And if they are, that state is stored in that contract storage. And that contract storage is also represented as a Merkle Patricia try under the hood. And you guessed it, the root of that try is the storage root that is stored with the account. So if we have access to a state root for a network, we can supply a path down that state try to a specific account within the network. And then using that account's storage root, which can be extracted from the account's metadata, we can supply another path from the storage root to a specific location in that account's storage. And that's basically the high-level concept of how a storage proof works. So how does that apply to cross-chain messaging, though? Because that's basically the high-level concept of how a storage proof works. So how does that apply to cross-chain messaging, though? Because that's just proving state within a specific network. So to explain that, I have this diagram here. It's very simplified, obviously. But this is meant to represent two roll-ups in the Ethereum ecosystem that are both sharing state with a shared L1. So this L1 at the bottom would be like Ethereum mainnet, and then chain A and chain B are two L2 networks. What this diagram is depicting is bidirectional communication between the two layer 2 chains and the shared layer 1. For this downward arrow direction in both chains, an Ethereum L2 chain wouldn't be an Ethereum L2 chain if it wasn't sharing state with L1 in some context. So that's what this is representing here. It's requiring that both chains are sharing some representation of their state with what I'm calling a roll-up contract on layer one. This could be the state root directly, or it could be some other representation of state, the only requirement being that it has to be verifiably linkable to its state root, at least for the way that RIP 7755 is working thus far. And then in the other direction, we need trustless access to a layer one state representation within the L2 chains as well. So that's what these upward arrows pointing to the beacon roots Oracle contracts are. This is made possible by an improvement proposal that's live today in many networks called EIP-4788, which trustlessly exposes the most recent 8,191 beacon routes for the Ethereum consensus client within the L2 execution environment. It should be noted a beacon route is not the same as execution client state route, but it is verifiably linkable to the L1 execution client state root via a very similar process. So from chain A, if we were trying to prove something about the state of chain B, this diagram represents everything that needs to be true for that to work. So if chain A starts with a trustless access to a beacon root. It can supply like a Merkle Patricia try based proof to verify the L1 execution client state route. And then if we have a verified L1 execution client state route, that exact storage proof process that I just went through applies. So we could then prove anything about the state of layer one from chain A. In this context, the first step would be to go from the state route to like a path to an account within that state try, and the account here would be chain B rollup contract. And then using chain B rollup contract storage route, we can supply another path to a specific location in that contract. What's interesting here is if the value that you're verifying inside of Chain B roll-up contract is itself a state representation for Chain B, you can then recursively follow the same steps again to prove something about Chain B's state try. So then starting from Chain B's state route, you can supply a path to a specific account within Chain B, and then again a path from that account's storage route to a specific storage location. And that effectively allows us to prove verifiably a location and storage in an account on chain B from chain A, even though there's no direct line of communication. So that's cool, but how does that help us with cross-chain calls? This diagram is an overall architecture for how RIP-7755 is set up to work, and that should help answer this question here. So as you can see, we have two chains represented, an origin and a destination, and then we have both on-chain and off-chain components here. Every supported chain is going to have some sort of inbox and outbox contract that we're calling RIP 7755 inbox and outbox, but I'll just stick with inbox and outbox for the rest of the talk. The outbox contract is basically the entry point into the standard. So if a user wants to request a cross-chain call, they submit a request to the outbox contract. If the request settles properly, it'll emit an event that some off-chain actor that we're calling fulfillers should be listening for. And if there's sufficient incentive to respond to that request, then the fulfiller will. So that brings us back to a way to incentivize fulfillers to respond to the request. Another key piece of logic that happens here is the user will also lock some kind of reward bounty for the Fulfiller to respond to the request. If the reward bounty is sufficiently like Incentivized to the fufiller, then it will respond. So the fufiller then assuming it's a sufficient incentive Will submit the requested call to the destination chain over Here. That routes through an RIP 7755 inbox contract which will perform a handful of validation steps mainly confirming that the request is arriving to the correct chain and the correct location at the correct chain at that. There's also this custom like optional validation step called a pre-check contract where this could be absolutely anything as long as it adheres to a a pre-check contract, where this could be absolutely anything, as long as it adheres to a specific pre-check contract interface that the standard requires. And all this is meant to do is allow the user to encode any kind of arbitrary fulfillment condition that should be true in order for the fulfillment to work out. But like I said, again, it's totally optional, so if it's not being used, this step will be skipped. If all the validation steps are sufficiently checked out properly, then the requested calls will be routed from there. So this could be a batch of any low-level arbitrary call that goes to a handful of addresses with encoded call data and any native currency value that may be included. If all of those are successful, the main purpose of this inbox contract is to then store a receipt of successful execution. And this execution receipt gets stored in a deterministic location within this contract storage that is derivable from the origin chain without knowing anything about the state of the destination chain. So that's important, and we'll come back to it in a second. After the call has been successfully submitted, the fulfiller then comes back to the origin chain to say, hey, I did the job. Now can I have my payment? And what's unique about this standard is that the payment will only be released if the fulfiller can cryptographically prove that they did actually submit the request to the destination chain. And that's done via that nested storage proof concept that we just walked through. So from the origin chain, the origin chain would have to be able to verify a specific storage location in the inbox contract on destination chain. and because of the fact that the outbox contract can derive exactly where that location is supposed to be, then the outbox contract has everything it needs to be able to verify the successful fulfillment of the call. So only if this nested storage proof checks out will the outbox contract release the reward to the fulfiller. And that would close the loop on the full process for how RIP-7755 is working. Yeah, so for today's workshop, I have an example project for us to go through together. So if anyone's interested in coding along, there is a starter repository on my GitHub that I can show in a second. If you're not interested in following along, I'll be doing it up here as well, so we can go from there. So I'm going to start by walking through all of the contracts and services that are present in the starter project. This is going to be a self-contained system that is mocking a multi-roll-up ecosystem that will run locally on your machine. So once you clone the repository, you should have everything needed to run the entire app end-to-end locally. So after a brief walkthrough of all the services and how they're working together to make that work, we'll implement and test a nested storage-approved validation contract, as that is where the bulk of the effort is going to have to be applied to add new chain support in the future. I also just think it's really cool. And then once that is working properly, we'll integrate with an off-chain client application. So for this demo, it's just a simple NFT mint application where the NFT owner wants to be able to support users who don't necessarily have funds on the chain that the NFT contract lives on. So if they don't, it would be an RIP 7755 request to send the cross-chain call to still mint the NFT. So once that is all set up, we should be good to run the app end-to-end. Before we jump in, I'll leave you, well, I guess I'll leave this presentation piece with that. It's still very early in the research phase that a lot of these details are subject to change, but we have proven the concept on live networks of this nested storage proof and how that can be used to trustlessly verify cross-chain calls. So this seems to hold a lot of potential. It's something I'm very excited about, something the base team is very excited about. We do have an open source proof of concept repository on the base org github that I fully invite anyone and everyone to contribute to if you have interesting ideas. So with that, we should be good to dive into code, but as a quick gut check, are there any questions before doing so? And there will be time for questions at the end, too, and after this talk. Okay. So I didn't know the best way to share the link, but my GitHub is my name, Jack Chuma, and I have this DevCon 2024 RIP 7755 workshop project. If anyone's interested in coding along, you can clone this repository and follow along with me. I already have it cloned, so we can start with a brief walkthrough here. So once you have the project, what you'll notice is there's two main directories. We have contracts and services. So this is on-chain and off-chain components from that architecture diagram. We can start by going through the contracts. I'll start with the NFT contract because this is very simple. But this is what our demo client is going to be using. The main piece here is this mint function. The only reason I'm covering it is because this is going to be needed to set up the integration when we get there. But there's nothing too interesting happening here. Next up we have a rollups directory and this is used to mock the multi rollup system locally. I didn't want to have to rely on good internet connection for this to work so we got a mock system running locally. This rollup contract is what would be deployed to a mock L1 and this is what will be storing a state representation for the mock L2s. In this context, that state representation is a hash of the L2 block timestamp and the L2 state root. And then on the L2 side of things, we have this beacon Oracle contract, which is meant to mock the EIP-4788 interface to query one of the beacon roots that are being stored in the L2 execution environment. For this example, it's a simplified example, and this is directly storing the L1 execution client state route. So we are cutting out a step of verification going from beacon route to state route, but I think it still gets the message across. And then we have all of our RIP 7755 contracts. So as a brief walk through for what an actual request looks like, we have this RIP 7755 structs file. And this is exactly what a request would look like. So all the fields are we start with a requester. So this is the pretty self-explanatory, the address submitting the request. We have a batch of calls where each call is a low level description of the exact address that you'd like to send the call to, encoded call data, and then any native currency value that should be included with that call. And then we have a specified prover contract. The reason this is here is because there's no standard way for L2 chains to post their state representation to L1. And because of that, the exact implementation details for verifying state about a destination chain will vary depending on what that destination chain is. So right now we have this set up with the proving logic abstracted in two separate contracts. This very likely will change in the near future. If we baked that into the outbox contract, that would require multiple outbox contracts to be deployed to each chain. So it's a trade-off. But for right now, this is set up to be one outbox, one inbox, and then an array of prover contracts deployed to each chain that the user would have to specify which one should be used to verify fulfillment. And this contract is what we're going to be implementing in a few minutes. Then we have destination chain ID, which is pretty self-explanatory. Inbox contract is the address of the RIP 7755 inbox contract on the destination chain. L2 Oracle address, that is the address of the RIP 7755 inbox contract on the destination chain. L2 Oracle address, that is the address of the roll-up contract that would get deployed to L1 for the destination chain. So this is the user specifying where the prover contract should be looking for the state representation for the destination chain when it's verifying that the call was submitted. So the user is specifying the address where that should be located submitted. So the user is specifying the address where that should be located as well as the storage key within that address. And then we have a reward address. This could be an ERC-20 address or as specified by ERC-7528, there's a special address value that can be used to depict native currency. Then there's reward amount, which is the amount of the reward asset that should be locked within the request. It should be noted that reward amount should cover all of the value that's included in these calls, plus whatever the gas cost would be for submitting the call to the destination chain, plus an extra tip for the fulfiller. And that extra tip is what acts as the incentive for the fulfiller to respond to the request. Then we have finality delay seconds. This is the gap after the call is successfully fulfilled on the destination chain. That has to pass before the fulfiller is allowed to claim the reward. This is basically like destination chain reorg protection for the user. The higher this delay is, the more likely the destination Chain won't be reorged and ensure that the call was Actually submitted and will stay submitted. And we have a nonce value for ensuring that every request is Unique as the way we're identifying these requests is by Hashing this entire structure. An expiry field for when the Request should expire. This is relevant for the user being able to reclaim the reward if for whatever reason the call was never submitted to the destination chain. There should be a mechanism for the user to recover those funds, and that's what this expiry timestamp is. If the call's not submitted before the expiry timestamp, then that's when the user can reclaim. And then we have these last two fields for the pre-check contract. So a pre-check contract address and an arbitrary bytes array of encoded data for that pre-check. This is for that optional, like, arbitrary fulfillment condition that should be true. If this is the zero address, that is depicting that we're not going to use it. And we're not going to be doing a pre-check step in today's demo, but I figure it's worth covering that it's there anyways. So the next step, we'll do a quick walkthrough of the inbox and outbox contracts. So the outbox contract being the entry point to the system. We have one main function that we really care about here, request a cross-chain call. That's where the user would submit the request and where the event would be admitted that the fulfillers are listening for then we have a claim reward function which is where the fulfiller comes to claim their reward after successfully fulfilling the request and that calls into the prover contract here on this line and so this contract is again what we're about to implement it is expected to revert if the proof fails so this contract is again what we're about to implement. It's expected to revert if the proof fails. So there's no return value or anything here. And then lastly, the cancel request function. So this is like after the expiry timestamp, if the reward has not been claimed yet, then the user gets to reclaim it. On the destination chain side, we have the inbox contract. The main piece of information we care about here is this fulfillment info struct. So this is that execution receipt that should be created in storage, and that will be the target of the nested storage proof validation. The whole point of the storage proof there is to prove that this struct exists in storage for the specific request. And this is storing the timestamp at which the request was submitted, is to prove that this struct exists in storage for the specific request. And this is storing the timestamp at which the request was submitted, as well as the filler address that should be able to claim the reward back on the source chain. And that gets created during this fulfill function. So we have all our validation steps up here. And then we route the calls here. And if everything's successful, we're left with the created fulfillment infostruct. So this contract is fairly simple. Something a little bit more interesting, we have this state validator library. And if you in the future are working on setting up a new prover contract for a new destination chain that's not currently supported by the proposal, you likely would be utilizing the state validator library. The whole point of this is to abstract a lot of the complexity involved with storage proofs away from the developer. No need to reinvent the wheel every time. So the way that this is set up, there are two main functions that we care about. There's validate state, and then there's validate account storage. Validate state is for if you're starting from the beacon route on L1, and you're trying to verify the L1 execution client state route against that beacon route. That's what validate state would do. And then from there, using the verified state route, it would then prove storage location for an account within that state. Because our example today is not using beacon routes, we don't need to use this function, but I figured I'd briefly cover it. The function we care about is this validate account storage. So this takes in an account, which is a specific account within the network, a state route for the network, and then a handful of these account proof parameters. And using these account proof parameters, which are a specified storage key, an expected storage value, and then an account proof and a storage proof, the account proof can be thought of as the path down the state try from the state root to the specific account we care about, and then the storage proof would be thought of as a path from that account's storage root to the storage location that exists at storage key and should be storing storage value. So it should be noted that all of the values in the state try are keyed by the hash of the Ethereum address of that account. So that's what we're doing here. We're deriving the account key, and then using that, we can do a merkle try.get to return an encoded account. An encoded account is basically an encoded array of the account metadata that I went through in the slides earlier that have the nonce, the balance, the storage route, et cetera. So from that, we can extract the storage route, and then using the storage route, we can verify that storage location using the storage proof that exists in the account proof params struct that I just went through. So at a high level, that's how that's working. There's a directory in here called Provers. This is what we're about to implement, so we'll be coming back to that in a second. Just real quick before we dive into the implementation details there, I want to do just a quick summary of what off-chain services are running here. The demo is the app that is facilitating the NFT mints, so we'll be implementing some details in this directory. But then these other two directories are surrounding services that are needed for the full system to run locally. The sinker is in charge of sharing state representations bidirectionally between the mock L1 and the mock L2s. And then the fulfiller is the off-chain agent that's listening for requests and will validate the request and ensure that the incentive is enough to compensate them for their time. And we'll respond accordingly and submit the request. And then we'll be generating a full nested storage proof that gets validated against the contract we're about to implement. And if that checks out, we'll be able to see the fulfiller claiming its rewards in real time. So with all of that being said, we have enough here to dive into beginning to implement a prover contract. I have a handful of imports just set up in here already just to save the time from typing them out. You'll notice the first import is an iProver interface. So we can start here taking a look. And what it defines is a single function. And this is all the approver contract needs because this is the function that the reward claim function from the outbox contract is going to hit. So we can start by literally just copying this entire thing into our prover contract to initiate the implementation of that function. So I will replace this comment with that function declaration and add empty curly braces here. So, oh, and then we can extend that interface. This is iProver. To take a quick skim through the comments here that are explaining what validate proof should even be doing. It validates storage proofs and verifies fulfillment. Okay, makes sense. It should revert if the storage proof is invalid. Also makes sense. It should revert if fulfillment info is not found at inbox contract storage key on the specified inbox contract. That is kind of interesting here. It should be noted that the storage key, like I was saying before, is derivable from a network that doesn't actually have context of the destination chain. And because of that, this is being done in the outbox contract before this function is hit. So this is not coming from the off-chain fulfiller. We can trust this value. Lastly, it should revert if the fulfillment info timestamp is less than the finality delay seconds amount of time from the current destination chain block timestamp. So this is that destination chain reorg protection that I was mentioning earlier. We need to ensure that the finality delay seconds is not currently still in progress. Then we have a note about that the implementation should vary by destination L2. This is due to the lack of standardization around how L2s post their state representations to L1, like I was saying. And then a quick summary of the input parameters that we have to work with here. So inbox contract storage key, I just kind of mentioned, is the storage location in the inbox contract on the destination chain where we expect the execution receipt to be. Next up is the fulfillment info struct. So this is that exact execution receipt that should be existing at inbox contract storage key on the destination chain's inbox contract. Then we have the initial request that came from the user. And then we have an arbitrarily encoded proof data bytes array. This is because of the fact that, like I was saying before, the lack of standardization around the state posting, there could be subtle differences in the exact data that is needed for the prover to verify that state. So there's no enforced structure to this data at the outbox level. This is being implemented here within the prover. So we'll start to set that up in just a moment. Okay, so with all of that gone through, we have enough here to start to set this up, so we can think about what the steps are for validating a nested storage proof. For starters, we'll want to enforce a structure to proof data, so we can decode this into some defined structs that we'll define in storage up here. So decode proof data. Next with the decoded proof data, we can use some supplied data to trustlessly access the beacon route from L1. So I'll want to query L1 state representation. In a real network, this likely would be, or if the network is supporting EIP 4788, this would be a beacon route. For today's example, it's the state route directly. So I'll just make a brief comment explaining that. In real network, likely beacon root in two days demo state root. Okay, so then step three, once we have a state representation for L1, using L1 state root, we'll verify storage location on L1, USING L1 STATE ROUTE, WE'LL VERIFY STORAGE LOCATION ON L1 AND THAT STORAGE LOCATION WILL BE THE DESTINATION CHAINED ROLLUP CONTRACT. SO VER. STEP FOUR, WE'LL NEED TO VERIFIABLY LINK A DESTINATION CHAIN STATE ROUTE TO THAT, OH, YEAH, LET'S SEE, USING L1 STATE ROUTE VERIFY STORAGE LOCATION, THIS SHOULD BE DST CHAIN STATE REP. WE'LL NEED TO USE THAT VERIFIED VALUE TO LINK A STATE ROUTE FOR We will need to use that verified value to link a state root for the destination chain. So we can do that as a step here. Verify. We link to chain state rep. After that step, we should have a verified state route for the destination chain, so then we can essentially repeat step three again. So this would be using L2 state route. Verify execution receipt in inbox contract on DST chain. And then lastly the only step that we haven't covered is the this revert statement that's saying if finality delay seconds is still in progress this function should be reverting. So we can check that as our final step. Whoops. Step six. Revert if finality delay seconds in progress. Okay. So if we can successfully set up these six steps, then we should be good to go to verify these nested storage proofs. If we take a look up here, you might have noticed this contract is expecting to be deployed with an address in the constructor. This address is for the beacon roots Oracle contract. In a live network, you wouldn't have to do this because theIP 4788 specifies a deterministic precompile address that you could just hard code into your contract storage. But for this to work in both tests and deployed to our local network, I have it being deployed with the address specified here. So as our first step, we can store this as an immutable, and call it beacon Roots oracle. Something like that. Roots. And then assign that within the Constructor. And then if we start to think about how these steps are working, the first step being that we want to decode proof data into some specified structure that we're going to define, we can start by defining a struct, RIP 7755 proof. And the compiler is going to be mad about the struct being empty, but we'll come back to that in a second. We can set up this first step with that in place by decoding the proof data into a local variable that we can call proof that is adhering to this RIP 7755 proof struct. So if we copy this, paste it here, this will be in memory, is equal to ABI.decode proof data, and then pass in the name of the struct as the second argument here. This will decode the proof data bytes into whatever structure we define at the top of the file here. So for step two, we want to then query the L1 state representation. This is going to come from that beacon Oracle contract from the rollups directory that I covered briefly. So if we pull that up to take a look at the storage layout here of how exactly we should query that, this has a fallback function in here, and that's to mimic the interface that would be used to query a beacon root from the real beacon root Oracle contract on a live network. And all this takes is an encoded block timestamp. So that's actually the first piece of data that we need to add into our RIP 7755 proof struct. Is we need to know the L1 block timestamp that we're going to be using for the proof. So we can add that in as a Uint256. Call it L1 timestamp. And then using that, we can set up a static call into the beacon Oracle contract. If you remember, we have the address stored as the immutable variable here. So we can take this, copy that, and under step two, paste that. That'll be, yeah, beacon roots Oracle. You do dot static call, which is like a low-level call, but kind of like a view function where it's expected to not mutate any state in the destination address that you're calling. And then we're passing in the encoded block timestamp for the L1 chain, which we just added to proof, so this should be available via abi.encode with proof.L1 timestamp passed in. Whoops. This static call will return a tuple where the first value is a Boolean, so we can call that bool success. And the second value is bytes array in memory. So bytes, memory, data. With any low-level call in an EVM chain, you need to confirm that success comes back as true. Because if something weird happens with this address or the static call fails for some unexpected reason, and success comes back false, we need to ensure that we revert the transaction here. So for that case, we can add a custom error at the top of the file for if the static call fails. We can call it error. Beacon roots Oracle call failed. Copy that. And then underneath this line, we can do if not success, revert with that custom error. So if we get past this if statement, we have a returned Data bytes array where the data is representing an encoded Version of the state route for l1. After this if statement, we can decode data into a bytes 32 State route. This would be bytes 32 l1 State root. It's equal to another abi.D code. With data Passed in and then the data type is bytes 32. Okay. So that Should do it for step two. So at this point we have an L1 state route that we can then use in a storage proof to verify something about the state of L1. And that's exactly what step three is laying out here. So using L1 state route we want to verify the storage location in the destination chain's roll up contract ON L1. THIS IS GOING TO BE THE STATE REPRESENTATION FOR THE DESTINATION CHAIN. IN ORDER TO GET MORE CONTEXT FOR HOW THAT SHOULD WORK, LET'S TAKE A LOOK AT THE ROLLUP CONTRACT BECAUSE THAT'S WHERE THE DESTINATION CHAIN WILL BE POSTING ITS STATE REPRESENTATION. hosting its state representation. So that will be inside of this rollup contract. What's kind of interesting here is in a lot of live networks, the exact storage location that the state representation is going to exist is not necessarily known at the time of request. In that case, the request just knows the storage slot, maybe where the data structure is located. So like in this example, the mapping storing output routes exist at storage slot one. So in this context, the request specifying the destination chain's storage location for their roll-up contract on L1 would just be specifying storage slot one, and then we'd have to take that and derive the location of the value for the mapping based off of the destination chain's L2 block timestamp. So that brings us to the next piece of data that we need for this proof. It's going to be the block timestamp for the destination chain. So we can add that as another Uint256 instead of the proof. And this can be L2 block timestamp. By the way, for anyone following along, try to stick to the exact names I have here for the fulfiller proof to work properly. The logic by itself should work fine either way, but in order for this to be compatible with the surrounding system, we have to use the correct names. So if we have the L2 block timestamp here, then we have enough to derive the storage location of the output route associated with that block timestamp inside of the roll-up contract. So how do we do that? If we take a look down here, so for step three, we're going to Be using the l1 state route. This is the first example of an Actual storage proof that we're going to use. This is where we would maybe want to take a look at that State validator library again because this has a bunch of Utility functions for facilitating storage proofs Directly. Nam namely being this account proof parameter struct. This is going to come in handy. And then that second function I mentioned for validating account storage. So we'll actually be using this function here. This takes in an account, a state root, and then proof parameters that should be provided by the fulfiller. So we can start to set that up now, starting with state validator. And then dot validate account storage. My autocomplete added the names of the variables here. So for the account, what's the account here? This is the roll-up contract for the destination chain. And if you remember from the structs file that I walked through at the beginning, one of the fields that is specified by the requester is this L2 Oracle address. And this is exactly the account that we care about for this first storage proof. So we can pass that in here. This is going to come from request, which is being passed in. So this will be request.L2 Oracle for the account. The state route is the L1 state route that we just decoded here. So we can use this for state route. And then the account proof parameters, this is this account proof parameters struct inside of the state validator. This is going to be supplied by the off-chain fulfiller. So this is the next piece of information we need in our proof struct. So if we copy this, add it as a third argument in the proof struct or third field. Oops. This will be state validator dot account proof parameters. And we're going to call this one DSTL2 state root proof ramps. So then we can copy that variable name. And this will be the last argument passed into our validate account storage function. So this will be proof dot and then that copied field name. If you take a look at the validate account storage function here, you can see that it returns a Boolean value. So we need to ensure that the Boolean value returned is true. So we can capture that in a local variable here. We can call it bool is valid L1 state is equal to the state validator. Validate account storage. And then for when it's not a valid L1 state, we can define a custom error up here. This will be error invalid L1 state. Copy that. And then if not is valid L1 state, we'll revert with that custom error. And there we go. All right. So what we have happening here is after this step, we should have a verified storage location in L1. To jog your memory on the params that are being passed in by the fulfiller, there is something kind of fishy happening with the way we currently have it set up. These account proof parameters specify an exact storage location and expected value. So if this checks out and returns true, it means we've successfully validated that location. But what if that location's the wrong location? Like what if it's not the state representation for the destination chain? That would be a problem. So this is where we'd want to override the storage key. We could either derive what it should be and confirm that it's equivalent, or we could override it. For this example, we'll override it, but maybe there's a subtle gas optimization one way over the other. But for starters, we'll want to create a helper function for deriving what the L1 storage key should be. So we can create that down here as a private helper function. Derive L1 storage key. This will be a private peer returns bytes memory. Because as you see up here the storage key is a byte string not a bytes 32 or anything. So yeah. So this is the storage key of where the state representation should be in the roll-up contract on L1. So we can take a look at this roll-up contract again to see how that storage layout is set up. We have a mapping located at the first storage slot, which likely is going to be the inside of the structs file. This likely is going to be the L2 Oracle storage key. So I would expect this to be the bytes 32 representation of the number 1. And then we can take that and hash that with the L2 block timestamp, which under the hood is what Solidity is doing to generate the storage location for the value that should be keyed by the block time stamp. That's exactly what we can recreate here. In order to do that, we need a couple of pieces of information Here. One of them is the l2 oracle Storage key which comes from the request. We can pass in the request here. Copy this whole thing. Pass that in. And then the other piece of information is the L2 block time stamp, which if you'll remember we added to the proof struct up here. So we can pass that in as well. So if we copy this declaration, we can paste that here as a second input argument. And then, so what does this return? So this is going to return a derived value for the storage key where the L2's output route should exist. That is going to use, so we'll return an ABI.encode pact. With the block timestamp passed in. So this is going to be proof.L2 block timestamp. And then the storage key location. So then it's going to be request.L2 Oracle storage key. So this concatenates them together into a single bytes array. We now need to hash this to generate the storage key. This concatenates them together into a single bytes array. We need to hash this to generate the storage key. Wrap that whole thing in a catch act 256 hash function. And then one final step here because catch act 256 returns a bytes 32 and we need this to return bytes memory. We have to wrap this in one more abi. Encode. And that should be all that we need to derive the storage Location for the l1 storage key. Now we can close these and use This function to overwrite the storage key that gets passed Into this validation step. So this will be proof.dst L2 state root proof params dot storage key is equal to derive L1 storage key where we pass in the two input parameters of request and proof. Okay, sweet. So at this point we now have a verified value in the rollup contract for the destination chain on L1. We confirmed that it is in the correct location so we can trust that it's the proper state representation. And then we can move on from there. So the next step here is to verifiably link the destination chain state route to the destination chain state representation. If the destination chain is directly posting their state route, this is unnecessary. We just need to make sure that we have a verified state route here. So in this example, because we're not directly posting that, it's a hash of block timestamp and state root. We need to recreate what this output root should be. So we can re-derive that with bytes 32 for a local variable. This will be derived L2 output Root is equal to cat check 256. And what gets passed in here? We're basically just recreating this line over here in the roll up contract. So this is going to be an ABI.encode pact. And inside of the ABI.encode pact, we want to pass in the block time stamp and a state route. But at this point, we don't have a state route to use. So that would be the perfect, now is the perfect time to add that as the next field in our RIP 7755 proof struct. We would expect the fulfiller in this case to supply what the destination chain's state root is, and then we can use that to re-derive the state representation that was verified using the first storage proof, and if they're equivalent, then we can trust the passed in L2 state root. So this will be a bytes 32. L2 state root. And we need to pass these Arguments in in the same order they're passed in over here. So we'd start with the L2 block time stamp. This will be proof.L2 block time stamp. And then we want to pass in the state root. So proof.L2 state root. Okay. So now at this point we have a re-derived output root for the destination chain. In the case that this doesn't equal the value we just verified, we'll create another custom error for that to revert in that case. So we can call that an error invalid L2 state root. Copy that. And then we need to compare this to the storage value that we verified in this step. So that'll be if derived L2 output root does not equal proof.dst L2 state root proof params.storage value. Then we revert with that custom error. And this is yelling at me because the storage value is a bytes string and this is a bytes 32. Because we would be expecting them to be equivalent we can wrap this in a bytes 32 for the type safe properties of solidity. One other thing I'm noticing here, this is kind of just like a personal preference for me, but the state validator library up here, because we're using it on a specific account, for solidity purposes, we can actually bind that library to the account to just improve the legibility of this line, make it a little bit more succinct. So I'm going to do that, but that's totally just a personal preference. So we would add a line at the top of the prover contract that says using state validator for address. And then what that allows us to do is to copy this request.L2 oracle and remove it from the function call and then paste it here instead of state validator. And then this is doing the same thing, but it's just a little bit shorter. OK. So at this point, we have a verified destination chain state route. Now we can use that to basically redo step three, but now the account we care about is the inbox contract on the destination chain. So what that's going to look like is a bool is valid L2 state is equal to, and then to jog your memory again on the structure of a request, we actually have the inbox contract being defined by the user when they submit the request. So this address is going to be the address that we're verifying state against. This will be request.inboxcontract. Verify, what's it called again? Validate account storage. And in here we need to pass in a state root and another instance of the proof params. So the state root here is passed in from the proof struct and at this point we've verified that this can be trusted. So we'll use that value. So this will be proof.L2 state root. And then we need to add another instance of those proof params This time for the inbox contract on the destination chain. So we can duplicate this line and instead of calling it dst L2 state proof params we can call it dst l2 account proof Params. And copy that field name down Here for the second storage proof. We can pass that in as the second argument. So this looks like proof dot that destination account proof params field. And then for the case again where is valid L2 state comes back false, we need a custom error to throw as a reversion in that case. So we can define that up here. Error invalid L2 state. Copy that. And this will be so if not is valid L2 state, revert with that custom error. Cool. And then so if you remember from the first storage proof, we had an issue trusting the storage key that gets passed in from the fulfiller. You can assume that we have the same issue in the second storage proof, and we do. Luckily, it's a little bit easier to solve on this side of things because, like I said at the beginning of the walkthrough here, the outbox contract rederives where that storage key location should be already, and that gets passed in. So we can just reassign the storage key value with this passed in inbox contract storage key. We'll do that above step five. So this will be proof.dstl2 account proof params this time dot storage key is equal to inbox contract storage key. And what that's going to do is ensure that we're verifying against the correct storage location where the execution receipt should exist on the inbox contract on the destination chain. So at this point, we have a fully verifiable proof. The last step is we just have to confirm that finality delay seconds is not still in progress. We have the receipt being passed in here, so we can use that for this check. And for when it is still in progress, we can add a custom error at the top. This will be our last error. So this will be error, finality, delay seconds in progress. Copy that. And then down here at the bottom of this function, if fulfillment info dot timestamp plus request dot finality delay seconds is greater than, if you'll remember for the proof, we defined the L2 block timestamp up here. This is exactly what we need to use for this timestamp-based protection. So we can do proof.L2 block timestamp. If the timestamp at which it was submitted plus the configured finality delay seconds is greater than the time that we're using for this proof, then we can't accept this proof because finality delay seconds is still in progress. So that's what this line is doing. So we revert with that new custom error, finality delay seconds in progress. And then cool. That's all of our steps. But there is one final piece of data connection that we're forgetting here. And that is stemming because of, well, we confirmed a specific storage location in the inbox contract on the destination chain. And then we confirmed a passed in receipt satisfies that finality delay seconds requirement, but we did not confirm that that receipt that we're using for this final check is the same value as the one that we just verified. So we need to make sure that the storage value for the second storage proof is equal to the encoded execution receipt that we're using for this timestamp check in the last step. And so that represents the final piece that we still have to set up to secure this thing. We can set up the encoding of the fulfillment InfoStruct as a separate private helper function. So this will be, this will be, what do we want to call that? Encode. Fulfillment info. Okay. So for encoding fulfillment info, we need to pass in the struct that we're using for the validation up here. And inside of this, it's a simple ABI.encode packed. So return ABI.encode packed. With fulfillment info.filler and then fulfill info.timestamp passed in. This is because of the struct packing rules and solidity storage we have to custom encode this struct here. If I show you the definition of the struct in the inbox contract again, we see that it has two fields, timestamp and filler. The timestamp is a Uint 96 and the filler is an address, so this can get packed into a single Uint256 slot. But the way that it gets packed or the ordering that it gets packed is in the order of the defined fields, and it packs them into the lowest value bits first. So the timestamp actually ends up on the right side of the storage slot, and then the filler is on the left. So in order to recreate that alignment, we have to use an ABI.encode pact here where we pass in the fields in reverse order instead of just doing ABI.encode and passing in the entire struct. We would have it in reverse order in that case. So using this function now, we can override the storage value for this final storage proof. So this will be proof.dstl2accountproofparams.storagevalue is equal to encode fulfillment info. And we pass in fulfillment info. Excellent. So that should be our first pass at a full implementation for validating one of these nested storage proofs. We can now see if this is compiling. So if we CD into the contracts directory, can run a forge, well I like to format it properly first. And then we can do a forge build to check if it's compiling. It is. So now to check this, I have a test file in here with mock data from a working system. It's commented out just to prevent compiler issues with the initial structure of the project. We can uncomment this and run a forge test to make sure we did implement that properly. It will recompile this test file and then okay, cool, it's passing so this is using a previously generated proof um that adheres to the structure that we just set up in that rip 7755 proof struct and um it uses is being proven against a specific storage route or a specific state route that is being assigned here and this commit beacon route function in the beacon Oracle contract. So because this is passing, it's a good sign that our implementation is working properly. So that wraps up the on-chain implementation piece. At this point, we should be good to take a step into the off-chain world and look at the integration for this NFT minting site. So I'll close the contracts directory for now, although we'll be back here shortly to reference different function signatures. Inside of the services directory, we care about this demo directory. So if we take a look in here, a brief walkthrough, it's literally just like a back end server script to run. It's going to just tell you what it's doing. It will display your current NFT balance for the NFT contract that's going to get deployed. We're calling it deployed on a mock arbitrum chain and then the user is going to be minting it from mock base. So it will be displaying your NFT balance on mock arbitrum, your current ETH balance on mock base, what the price is of the NFT, which I think I have hard coded as one ETH in the deployment script. This is using local anvil nodes so using easy numbers is pretty easy. And then lastly, it'll prompt you for like once you hit enter, it'll trigger minting the NFT, and then we can watch the fulfiller in action generating its proof after the fact, which gets verified against that prover contract we just set up. There's not too much for us to change here. This is just, I just wanted to give you a rundown of what exactly it is that's happening. What we care about is inside of the SRC directory, there's a client file called clients.service. This is all in TypeScript, if anyone's familiar. There's a function called rip7755mint. That is completely empty. So this is what the developer needs here. We have to set this up to be an RIP 7755 request to be able to integrate with the cross-chain call and close the loop for this minting process. So we can start here by setting up what this request is going to look like. To do that, it would help to have the structs file up here as a reference. I'll open that up. Because actually before we dive into that, as a refresher in the outbox contract, the function that we care about as the entry point is this request cross-chain call, which accepts one input argument and it's a cross chain request. So really what we need to build here is a cross chain request with all the correct fields. So that's what I want to show you guys. So to start with this implementation, we can get an outline of what this request is going to look like as well as the low-level call that we need it to do to facilitate. So we can start with const calls. It's a batch, but it's just one call, but it still needs to be set up as an array because it's meant to be able to support a batch of calls. And then we'll have a const request, which will be an empty object. We can start by just getting the field names in here. I'm just going to assign them all as empty values to start, just to motor through this. And then we can go through each one individually to make sure that we're setting it up properly and explain it all as we go. So we need to add prover contracts. And again, I'm just copying everything to try to prevent some silly typo mistake. So just bear with me for a second. Pass in the inbox contract. L2 Oracle. L2 Oracle storage key, reward asset, reward amount, initialize at zero, finality delay seconds, initialize at zero, come back through this in a second, nonce is zero, expiry zero, and then the final two pre-check related fields, pre-check contract and pre-checked fields, pre-checked contract and pre-checked data. And then for the individual call that we want to set up here, this just takes in three parameters. There's two, data and value. So we can take two, empty string,, and then value. Okay, so we have our structure outlined here. Now we need to make sure we set up each of these fields properly, and it helps to have an understanding of exactly what each field is, which we now all should. In order to help with this, I have inside of this common directory, there's a constants file that has a couple of chain configs in here, one for mock arbitrum and one for mock base. So this is going to be very helpful for where we can pull the addresses from. Assuming everyone deploys the contracts using the deployment scripts that are provided, these addresses are, I mean, they're deterministic, so if they're deployed in the right order, then these addresses should all be correct, which is why they're pre-populated in this file, even though the contracts are not currently deployed on our local network. So, okay, let's start filling these out. For calls, the destination address here, again, what exactly is it that we're trying to do? We want to mint an NFT. So the to address is going to be the NFT address, which we have up here as mock arbitrum NFT address. So at the top of this client service file, we can import that. So let's see. Import mock arbitrum NFT address from dot dot slash common slash constants. And then we can copy that into our to field. Data is going to be the encoded call data for the mint function. I'm going to leave that for the last step. We'll come back to that in a second. Value, as you see here, this function is being called with an address. This is going to be the user address receiving the NFT. And then the mint price, which is the mint price of the NFT denominated in way. So we can just pass in this mint price as the value directly. Yeah, so this will be mint price. And then now as we start to fill out the request, the requester is pretty self-explanatory. Like I said, the address of the requester is being passed in, so we can use that. Calls is going to be this calls array. Prover contract is what we just implemented. This will be the contract address on the source chain, which in this context is mock base. Of the prover contract that has been implemented specifically to validate request or validate state about the destination chain which in this context is mock arbitrum. If we look over here, there's only one defined in each but this is set up in a way that we can define multiple prover contracts for each chain. This is pretty straight forward. We'll start with, well, first we have to import the chain configs here. So if we go back up to the top of the file, we can import that from the same file that we just imported the nft address. And then down here in our mint function, we can destructure the mock arbitrum and mock base configs from that outer object that's containing the chain configs. So this would look like const curly braces with mock arbitrum and mock base is equal to chain configs. And then now we can access all of these config fields directly on mock arbitrum and mock base. So for the prover contract, because this is the base side of the equation here, this will be mock base dot prover contracts dot prover. The destination chain ID is going to be mock arbitrum's chain ID, so we can go mock arbitrum's chain ID. So we can go mock arbitrum.chain ID. Inbox contract is going to be the address of the RIP 7755 inbox contract on mock arbitrum. So this will be mock arbitrum.contracts.inbox. L2 Oracle is the roll-up contract on L1 for mock arbitrum here. So this will be mock arbitrum.L2 Oracle is the rollup contract on L1 for mock arbitrum here. So this will be mock arbitrum.L2 Oracle. Same thing with the storage key or the L2 Oracle storage key. This is for the L2 Oracle contract for mock arbitrum. So we'll grab that from the same place. It will be mock arbitrum.l2 oracle storage key. And then for a reward asset, for this example, we're just going to use native currency because we're sending native currency for the mint function, but it doesn't necessarily have to be one-to-one like that. We could be passing in some ERC20 and then expect the fulfillers to do the necessary conversions off-chain to make sure that the ERC20 is enough value to account for the native currency that's being passed in here, but it's a little bit cleaner for the demo here to just keep it one-to-one like that. So that's what we're going to do. The ERC that I mentioned earlier that had a specific asset that represents native currency is hard-coded here. So that's what this like OXE address is. And this is another exported constant from this constants file. So we can copy that and add it to the import statement. And then pass that in for reward address. Reward amount, again, to remind you, is meant to cover all of the value from the calls plus whatever the destination chain gas cost is plus a tip for the fulfiller. The exact mechanism to calculate what that surplus should be can be pretty complicated. But for the sake of today's demo, doing it in a simplified context, if we add an extra 2% to the request, that should be more than enough for it to be profitable for the fulfiller in our local network. So we'll do it that way. So this will be reward amount is going to be the mint price, which is already in way, to remind you. We'll add a 2% buffer. So we can do that with big ints in TypeScript by multiplying it by something like 102n for a big int and then dividing it by 100. This results in the mint price having 2% added to it. All right. So then for finality delay seconds, in a live network, this will likely be something on the order of days to a week to ensure protection against reorgs on the destination chain. Because this is a self-contained system that is going to run locally on your machine, we have the flexibility and the freedom to make this really short. And because I want this to be a responsive demo and for us to see everything happening in real time, we'll make it just 10 seconds. So what this is going to result in is like a 10 to 15 second delay after the request is submitted before we'll see the fulfiller actually generate the proof and claim the reward. NONs doesn't matter because that's going to get overwritten in the outbox contract. It's just like a canonically incrementing NONs for every request. Expiry doesn't really matter too much for this demo. If you see over here in the constants file, we have a one week constant. We can just add one week to the current block timestamp. We just need like whatever this value is, it has to be greater than the finality delay seconds in the future from like now, plus some extra cancellation buffer period, which is hard-coded as a constant in the outbox contract storage as a full day. So for the demo, just to get a valid value in here, one week should be more than enough. So this will be date.now. So in JavaScript, if you're not familiar, it has a global date object that if you do date.now, will return the Unix time stamp but in milliseconds and in solidity it's in seconds so we have to divide that by a thousand but solidity doesn't like decimals so then we have to floor that to the nearest integer and then from here we can add the one week constant so we'll add that as another import from this constant file and then use it down here for the expiry time stamp. We'll add one week. Okay. And then the last two fields are this pre-check contract and pre-check data. Which like I said earlier is an optional like fulfillment condition that the requester wants to be true. Because it's optional we're not going to be using it today. But yeah, so in order to not use it we have to pass in the zero address and there's a helpful constant from the web pre library I'm using theme that is just called the zero address. So we can import that at the top and then use that as the pre-check contract address in here. And then last but not least, pre-check data. In order for it to pass the off-chain validation that Veeam does, we just have to add an OX. But this can be anything. It just has to be some kind of arbitrary byte string. Because we're not using this step, it doesn't really matter. Okay. So that just about covers all of the requests. The only thing we haven't done yet is encoded the call data for the mint function on the nft contract. So to reference the nft contract, let me pull that up. It's just a simple mint function that takes one input argument, which is the two addresses that should be receiving the nft. There's another helpful function from Veeam called encode function data. That we can use for encoding the call data here. We can define this as another local variable above the calls constant. So this will be const encoded call data is equal to encode function data. And this takes in, actually before we define that, we'll just add this in as the value for data, encoded call data. And then now for encode function data, this accepts one input parameter, which is an object with a couple of fields that are necessary here. The first one being the ABI for this NFT contract, which is actually already being imported as NFT ABI for some of the other helper functions that are defined in this class. So we can just use that. So this will be ABI colon NFT ABI. I don't know why I closed that contract. We still need it. Next up, we need to define the function name that we're encoding data for. So that's just simply mint. So if we copy that. This field name is function name. With mint passed in. And then because mint accepts one input parameter, we now have to specify that with a field name called args. So this will be args, which is an array of the input parameters. And in this context, it's just the two address, which is passed in here as address. And that should be it for encoding call data. So at this point, we have a fully set up RIP 7755 request. Now we need to set up the function call to submit the request. So what does that look like? To give you a little bit more context on how this class is set up, we have something called a wallet client as a local variable here. And if you're unfamiliar with Veeam as a Web3 library, it uses things called public clients and wallet clients. The public client is for reading state from the chain, and wallet client is for writing state to the chain. This wallet client is already defined in the constructor, so we can just use it as is for the target chain that we are submitting the request to, which is mock base here. So this is going to be an asynchronous call, so we'll start it with a wait. This.walletClient. And the function name that we care about here is writeContract. So under the hood, this, this creates your, um, your, your, your transaction and signs it and sends it to the network. This will return a transaction hash if it's successful. So we can store that as hash. And then we need to set up the configuration for like what contract are writing to, what function we're writing to, what parameters are needed. So that can be a single object in here where we define for reference I'm going to pull up the constants file again. We need to define the address that we're sending the transaction to. So address is going to be coming from mock base because we're submitting the request to mock base. The contract we care about is the outbox contract for the standard. So this will be mock base.contracts.outbox. Next we need to specify the ABI for the outbox. So this can be, we're going to have to import this at the top. So this will be import outbox ABI from dot dot slash ABI slash outbox. We already have that populated in this directory. So we define that as the ABI here. We then need to define the function name that we're requesting or sending the transaction to. So if we pull up the RIP 7755 outbox contract over here, the entry point, as I said earlier, for the standard is this request cross chain call function. So we can copy that into function name. We then need to define the input args. So as you see here, it's just one argument, which is the cross chain request. This is going to be the request that we just defined here. And then last but not least, we need to define any value that should be submitted with this transaction. So that's going to be a field called value. And in this context, it's not actually going to be mint price because if you remember, we added a 2% buffer to the mint price as the reward amount for this cross-chain call. So the value here should be request. setting up the contract send. The last piece here is using a public client from Veeam, we can wait for a transaction receipt to make sure that the request is confirmed or the transaction is confirmed. So we can do that with another await call. In the constants file for the chain configurations, one of the fields for mock base is a public client, so we can use that directly. So this will be await mock base dot public client dot the function name that we're going to use here is called wait for transaction receipt. And that takes in just one input object, which is the transaction hash. And in JavaScript, if the field name and the value name that you're setting it as are the same name, you can omit the colon and the value. So like this is the same thing as this. So I'll leave it like this for clarity. And if we get through this wait for transaction receipt call, then the transaction should be successful. So then we'll log something, console.log, transaction success. Okay. So that should be our full request. It's pretty straightforward. I just wanted to kind of give you a walk through on all the different field names and how they should be assigned and what the values mean and how they're fitting together. This should be enough in place to run the system end to end. So if we take a look in the root of the directory, the root readme here, there's a list of commands for spinning up all of these, all the infra here to deploy the contracts on top of and all of the services that are needed to get everything to work together. So we can start by running these commands. The first thing we need to do is spin up our local chains. So if we open new terminals that are each at the root of the project, we can open three in split screen side by side. And this is going to be meant to represent the mock L1 and then the two mock L2 chains. So we can start that with, there's a make file defining all these commands. If you want to see what they're actually doing under the hood, we can run make start mock L1 in the first terminal. So this is like our mock L1 chain. We can then run make start mock base. So we have our mock base chain in the middle. And then lastly, we can do make start mock arbitrum to run our mock arbitrum chain in the terminal on the right here. So now with these three chains running, we have our local chains running, so we can deploy our contracts on top of them. There's another make file command set up for deploying the contracts in the correct order that is required by the off-chain services here. So we can open a new terminal. The three block chains are still running in the background here. I just have a new terminal running on top of them. We can run make setup contracts, and that's going to compile and deploy all of the contracts to the local network. So that will just take a second. While that's going, we can copy the make start sinker command. So what this is going to do is start the off chain service for sharing state representations bidirectionally between the mock L1 and the mock L2s. And this one's pretty chatty. So we can now open a new terminal for the final two terminals that are needed here. It's a ton of terminals, I know. We need to start the off chain fulfiller to listen to in response to requests. So that's makechain fulfiller to listen to in response to requests. So that's make start fulfiller. So now we have the fulfiller listening. We can do a split screen here to now run the demo. So to run the demo app and mint the NFT, we would just run make demo. So if we type that in. Not too pretty, but logs to the console, what it's doing here, current state. So we have welcome to the demo, mint your NFT, the devcon NFT on mock arbitrum. We currently have zero in the wallet address that's being used for the demo here. The price is one ETH and the current base balance is almost 10,000 ETH because this is one of the default accounts on the local Anvil nodes. So now if we press enter, this is going to send that mint request to the local network and we can see if everything worked successfully. And it did. So the transaction went through. We actually, if we run this again, we already have the current NFT on the destination chain because the filler, if you saw over here, picked up on the request almost right away and submitted it because it validated that the incentive was sufficient. And then now in a second, we should see something else happen because the finality delay of 10 seconds has gone past. And what the fulfiller just did is it picked up on the fact that it waited long enough and it's now allowed to claim the reward for that request. And it generated this massive storage proof here and submitted that storage proof to the outbox contract on the mock base chain and everything was successful. So this was validated against the prover contract that we just implemented. And so if you take a look in the Fulfiller directory here, after that ran, it logged the proof into a JSON file. So you can see exactly what the proof was that it used to verify that the call was submitted. So that's what this file is. And then inside of the SRC directory, there's a database directory that is storing db.json, which this is the representation of the request that it picked up on that we just submitted. And then it has a rewards file that it's tracking how much ETH that it's claimed in rewards. So we have a locally running, like fully working system end to end. Woo. Woo. Yeah. And I'm realizing right now that the reward tracking is not accounting for gas cost on the destination chain, but nonetheless, you get the idea. So that just about does it. If you take a look here, yeah, like I said, if we run the demo again, we now see that the current NFT balance is one because the NFT was actually minted on the destination chain as defined by the encoded call data that we set up for the target calls. And then so if we run it again, now it would be just incrementing from there. So that does it for the demo. Thank you, everyone, for coming. And I'll be hanging around for a little bit if anyone has Questions or if you want to chat the standard a little bit more. Like I said, we have an open source repo for proof of concept Here, so if anyone feels compelled to contribute, we Fully invite you to contribute. So thank you. Thank you.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731645000000, - "slot_end": 1731652200000, - "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/1R-pN3is6_qjmy7k7gl3hHECFG1O_ZDuH33K5B6JQmGc", - "resources_slides": null, + "slot_start": 1731483000000, + "slot_end": 1731488400000, + "slot_roomId": "classroom-c", + "resources_presentation": "https://docs.google.com/presentation/d/1ab7Dm_NLmbdVl-rQdbpavpCT-nXILHwBPKMRvciyvFQ", + "resources_slides": "https://drive.google.com/file/d/17x2lMNSNLoeu0Q0z9SNHtLI-zBk_DdIB/view", "speakers": [ - "jack-chuma" + "franck-royer", + "alvaro" ] }, "vector": [ @@ -639189,8 +637382,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -639294,6 +637485,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -639752,16 +637944,8 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -639953,6 +638137,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -639977,7 +638162,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -640029,6 +638213,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -640042,6 +638227,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -640055,6 +638241,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -640086,6 +638273,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -640135,7 +638323,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -640396,6 +638583,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -640502,9 +638690,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -640520,57 +638708,55 @@ }, { "session": { - "id": "rlnv2-enhanced-spam-protection-for-all-peer-to-peer-networks", - "sourceId": "ZFJXFP", - "title": "RLNv2: enhanced spam protection for all peer-to-peer networks", - "description": "RLN is a protocol designed to prevent DoS attacks in a privacy-preserving manner. It uses zero-knowledge proof to limit the number of actions a user can take. In a p2p network, it can be used to limit messages sent over a period of time by one sender. RLN’s latest upgrade limits to N (instead of 1) messages per epoch. Also, the Merkle tree is now built on-chain, greatly improving the UX.\r\n\r\nCome learn how to use an implementation of RLNv2 to DoS protect a peer-to-peer network.", - "track": "Cypherpunk & Privacy", - "type": "Workshop", + "id": "road-to-effective-public-goods-funding-through-quantitative-cross-comparative-analysis-of-grants-programs", + "sourceId": "NHERZE", + "title": "Road to Effective Public Goods Funding through Quantitative Cross-Comparative Analysis of Grants Programs", + "description": "I aim to achieve effective public goods funding by comparing grants models. Grants programs are key in the crypto ecosystem, but comparative studies are rare. Our study compares Uniswap, dYdX, Optimism, Gitcoin, and more, categorizing them into \"top-down,\" \"bottom-up,\" and \"QF (algorithmic)\" types. Findings suggest bottom-up and QF types distribute funds more evenly with smaller variability and grant amounts, while top-down types show greater variability with larger grants for fewer grantees.", + "track": "Coordination", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Privacy", - "Censorship Resistance", - "Decentralization", - "Zero-Knowledge", - "network", - "peer-to-peer", - "Censorship Resistance", - "Decentralization", - "Privacy", - "Zero-Knowledge" + "Coordination", + "DAO", + "Governance", + "Regenative Ethereum", + "Public good", + "funding", + "public", + "goods", + "Coordination", + "DAO", + "Governance", + "Public good", + "Regenative Ethereum" ], "keywords": [ - "Anonymity", - "peer-to-peer networks" + "Grants Program", + "Public Goods Funding" ], - "duration": 3144, + "duration": 488, "language": "en", - "sources_swarmHash": "6ea0528ba8f1725dea3e57b64456bbc3b2119584f9f8c6c02f8558bd98ae88e5", - "sources_youtubeId": "EH6zUu6AzlQ", + "sources_swarmHash": "53bd35c89f9ae07e4dc208c1d9b3b8fcc5c864d3afe99e7362260a3dbf804082", + "sources_youtubeId": "YyoQSc4iDPk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6736f10974749a4b891f39c8", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731483000000, - "slot_end": 1731488400000, - "slot_roomId": "classroom-c", - "resources_presentation": "https://docs.google.com/presentation/d/1ab7Dm_NLmbdVl-rQdbpavpCT-nXILHwBPKMRvciyvFQ", - "resources_slides": null, + "slot_start": 1731640800000, + "slot_end": 1731641400000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1el9pBQpo_PXoaMz4cdOtMT4cXnCNpLdicORmmniTBK4", + "resources_slides": "https://drive.google.com/file/d/1uKI_zsMoBbvB-UFJsjuJ_Oe-HF9Naa8r/view", "speakers": [ - "franck-royer", - "alvaro" + "shinya-mori" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 6, 0, 0, 0, @@ -640582,6 +638768,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -640673,7 +638860,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -641134,14 +639320,13 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -641328,7 +639513,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -641404,7 +639588,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -641423,15 +639606,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -641440,30 +639614,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -641510,6 +639660,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -641592,6 +639743,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -641750,6 +639902,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -641775,27 +639928,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -641832,6 +639964,38 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -641879,12 +640043,34 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, 2, 0, 0, @@ -641896,57 +640082,49 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "road-to-effective-public-goods-funding-through-quantitative-cross-comparative-analysis-of-grants-programs", - "sourceId": "NHERZE", - "title": "Road to Effective Public Goods Funding through Quantitative Cross-Comparative Analysis of Grants Programs", - "description": "I aim to achieve effective public goods funding by comparing grants models. Grants programs are key in the crypto ecosystem, but comparative studies are rare. Our study compares Uniswap, dYdX, Optimism, Gitcoin, and more, categorizing them into \"top-down,\" \"bottom-up,\" and \"QF (algorithmic)\" types. Findings suggest bottom-up and QF types distribute funds more evenly with smaller variability and grant amounts, while top-down types show greater variability with larger grants for fewer grantees.", - "track": "Coordination", + "id": "rohingya-decentralized-identity-and-community-building", + "sourceId": "G8W8MU", + "title": "Rohingya Decentralized Identity and Community Building", + "description": "The Rohingya Project is a transformative digital platform addressing the critical needs of the Rohingya community, focusing on empowerment and cultural preservation. Key services include R-ID, a decentralized identity verification system ensuring privacy and access to opportunities, and R-Academy, which offers courses on Rohingya culture and personal development. The Heritage Archive provides access to cultural resources, while the Community Exchange fosters collaboration & economic development.", + "track": "Real World Ethereum", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Coordination", - "DAO", - "Governance", - "Regenative Ethereum", - "Public good", - "funding", - "public", - "goods", - "Coordination", - "DAO", - "Governance", - "Public good", - "Regenative Ethereum" + "Decentralization", + "Digital Sovereignty", + "Ethereum for Good" ], "keywords": [ - "Grants Program", - "Public Goods Funding" + "Rohingya", + "Decentralized Identity", + "inclusion" ], - "duration": 488, + "duration": 667, "language": "en", - "sources_swarmHash": "53bd35c89f9ae07e4dc208c1d9b3b8fcc5c864d3afe99e7362260a3dbf804082", - "sources_youtubeId": "YyoQSc4iDPk", + "sources_swarmHash": "78d7bdda2dbaf97a4154df19ae2900b51e6fc485197e9aa0abdc505f8628ca2d", + "sources_youtubeId": "3GBkaOSBuT0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736f10974749a4b891f39c8", + "sources_streamethId": "6735b6219dbb7a90e16c11bf", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731640800000, - "slot_end": 1731641400000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1el9pBQpo_PXoaMz4cdOtMT4cXnCNpLdicORmmniTBK4", - "resources_slides": null, + "slot_start": 1731572400000, + "slot_end": 1731573000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1UYUaHo5Qavbvjs-V4IY1wgEZga3-zWvPCG7PXENX-k4", + "resources_slides": "https://drive.google.com/file/d/11A5-7-t8giOFThBmze9Jv5u2eeVmWIjN/view", "speakers": [ - "shinya-mori" + "muhammad-noor" ] }, "vector": [ @@ -641956,12 +640134,12 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -642748,6 +640926,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -642797,12 +640976,11 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -642810,7 +640988,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -642825,6 +641002,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -642856,7 +641034,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -642939,7 +641116,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -643099,7 +641275,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -643161,8 +641336,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -643269,11 +641442,11 @@ 0, 0, 0, - 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -643286,43 +641459,47 @@ }, { "session": { - "id": "rohingya-decentralized-identity-and-community-building", - "sourceId": "G8W8MU", - "title": "Rohingya Decentralized Identity and Community Building", - "description": "The Rohingya Project is a transformative digital platform addressing the critical needs of the Rohingya community, focusing on empowerment and cultural preservation. Key services include R-ID, a decentralized identity verification system ensuring privacy and access to opportunities, and R-Academy, which offers courses on Rohingya culture and personal development. The Heritage Archive provides access to cultural resources, while the Community Exchange fosters collaboration & economic development.", + "id": "running-ethereum-node-in-africa", + "sourceId": "XT8ZWL", + "title": "Running Ethereum Node In Africa", + "description": "Running an Ethereum node in Africa presents both challenges and opportunities. It enables participation in the global blockchain ecosystem while contributing to network security and decentralization. Key points to highlight include overcoming infrastructure limitations, leveraging community support, the potential for economic empowerment through staking, and fostering local innovation and adoption. Emphasize the importance of education, collaboration, and strategic partnerships to", "track": "Real World Ethereum", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Stakers/Validators", "featured": false, "doNotRecord": false, "tags": [ + "Home staking", + "Distributed validator technology", "Decentralization", - "Digital Sovereignty", - "Ethereum for Good" + "diversity", + "geographical", + "Decentralization", + "Distributed validator technology", + "Home staking" ], "keywords": [ - "Rohingya", - "Decentralized Identity", - "inclusion" + "Geographical", + "Diversity" ], - "duration": 667, + "duration": 611, "language": "en", - "sources_swarmHash": "78d7bdda2dbaf97a4154df19ae2900b51e6fc485197e9aa0abdc505f8628ca2d", - "sources_youtubeId": "3GBkaOSBuT0", + "sources_swarmHash": "17771851685309b8c91301fcc0013484c2d9732fb2aacfc27bbefa76c6c204d5", + "sources_youtubeId": "_AywwOgu2zY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735b6219dbb7a90e16c11bf", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6735c1c39dbb7a90e1f43d28", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735c1c39dbb7a90e1f43d28.vtt", + "transcript_text": " Hi. Okay. Okay, I want to start by asking a question. How many of us want to be here in the next, let's say, 10 years, 20 years, to see that Ethereum has gotten to the great potential it should be? How many of us? Okay, good. Awesome. So I think we are all in one spirit. I'm David. I'm David Uzotuku from Nigeria and I'm here to really spoil our hearts to see how we all come together to make Ethereum truly decentralized, right? And I'll be talking on from my own perspective as a participant and as a user and someone who who believes in the future of the ATM network what I'm trying to do and how I'm doing it so I live in Africa in Nigeria Africa and Africa has some few unique challenges and it has a lot of potentials. So today I'll be talking about running nodes in Africa and other few things. So why geographic decentralization is vital for Ethereum? If Ethereum has to be truly decentralized, It means we need to have these nodes, the information infrastructure points, distributed. And it has to be distributed from a geographical perspective. So currently right now in the world, we have like seven continents. And Africa is one of it. And just to also add, I believe in one day that some of us, one day, possible me or you, might go to Antarctica to actually run a node. Right now, in Africa, we have less than 1% of node operators in Africa. But in other parts of the world, we have more than 30% in other side part of the world. And that's not true decentralization. True decentralization is where we have a good balance to these infrastructures running together. The interesting thing about Ethereum vision is it has a vision of security, trust, resilience, but all of this can happen if we don't have true decentralization. Because one, if we have an imbalance, right, it means the majority will actually have high rules or policies, which might not favor the rest. So we all have to be like, it has to really be decentralized for us to really have this vision come to pass. Geographical diversity is also one thing, one of the vision or core vision of the Ethereum network itself. Seeing Ethereum really, really being decentralized is what the network needs. So, this is just a map showing us the nodes we have in globally. Just like I said, we have less than 1% of nodes in Africa and also Latin America. So these two continents kind of have similar unique challenges. So I'll be talking on some of these challenges also. One question also I want to or another thing I will be talking about is why should we also think about or supporting Africa in trying to run nodes, right? Because of the potentials. At the moment in Africa, we have approximately about 1.7 billion people, right? And majority of this, a good number of this number is they are all youth, right? And one thing I love saying is in Africa, we have a lot of challenges, right? Where there is challenges, that's where you talk about finding solutions to solve these challenges. And one great solution right now, it's the Ethereum network, it's the Ethereum blockchain. We solve challenges we face in Africa so that tells that there's a great potential in Africa and that's why also we want to see more nodes running in Africa. Building a decentralized future, our work at NodeBridge. In trying to solve this issue of true decentralization, from my own standpoint, I came up with a community called NodeBridge. The idea right now is just to see how we kind of have more home node operators, right, support people's minds to also be participants of this network in a way to really make this Ethereum truly decentralized, right? Some huge challenges we have in Africa is education, lack of internet infrastructures, lack of power infrastructures, economical issue, right? So, and, you know, in my own small way, right, one of the things we try to do at NodeBridge is to also break this barrier, right? And we find a way to also break this barrier by, for instance, doing workshops. I've been doing a lot of workshops in a few African countries, also educating them, finding better ways to actually tell them that, yes, we all have to be part of this network, making this network resilient again. And then also, power. So because we have to find a way to also, like, use a very small system, so it's sustainable and it's actually also solved the problem. So this is just like a Raspberry Pi running on an arm, which also solves that problem. So I also, like, custom build all of these things to solve the problem for an African person who is not able to actually buy this infrastructure. Who is not able to buy this infrastructure. Because setting up this infrastructure is quite expensive. Yeah, this is some of the workshops also we kind of get to do. Yeah, this is also some of the setups. We've seen more Africans running these nodes, right? So they kind of have different setups. So actually at the moment, we also kind of use styling. We use power solar systems to generate power for sustainability. And also with the custom nodes we put together. Also, yeah, this is like the whole power system, the styling, some of the node operators we have at the moment are actually using. Also, education. So why security depends on global node operators and solo stickers, right? So, just like I said, we all have to come together, right, to really see, to push Ethereum to be truly decentralized. And then, the point here is, Africa is trying to solve its own problem. But because we're trying to solve a global problem, which is trying to solve its own problem. But because we're trying to solve a global problem which is trying to solve decentralization, we all have to be like, all of us have to all come together, like helping each other. For instance, there are different system right now, like the DVT system. Okay, so like the DVT system, which actually you could also delegate to permissionless system, you could actually have like a cluster to delegate also your stakes to a particular node that is sitting in underrepresented location, right? So we all have to come together to actually do these things together because it depends on us all, not just one geographical point, but it all depends on everybody who loves this network and who is ready actually to see this network in the next 50 years to come. Yeah, how you could also support, like, there are different ways also you could also put this support, just like I said. Supporting node operators from your own point of view, let's say it might be you delegating your nodes, it might be you delegating your stakes, it might be you setting up some resources, which is for education and its likes. So I also just want to quickly ask, what does true decentralization mean to you? And we can think about it in the next session. I'm sorry, David, but the time is up and we have a really tight schedule. If you want to talk to David, I think the work he's doing is absolutely amazing. Definitely connect with him.", "eventId": "devcon-7", - "slot_start": 1731572400000, - "slot_end": 1731573000000, + "slot_start": 1731575400000, + "slot_end": 1731576000000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1UYUaHo5Qavbvjs-V4IY1wgEZga3-zWvPCG7PXENX-k4", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1buMXIg1gOhRzKk22wUllHQbcl9xVPk1mQ7_JHDKF_oQ", + "resources_slides": "", "speakers": [ - "muhammad-noor" + "david-uzochukwu" ] }, "vector": [ @@ -643728,6 +641905,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -643897,10 +642075,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -644127,7 +642301,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -644164,6 +642337,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -644203,15 +642377,11 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -644542,6 +642712,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -644649,8 +642821,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -644662,56 +642834,47 @@ }, { "session": { - "id": "running-ethereum-node-in-africa", - "sourceId": "XT8ZWL", - "title": "Running Ethereum Node In Africa", - "description": "Running an Ethereum node in Africa presents both challenges and opportunities. It enables participation in the global blockchain ecosystem while contributing to network security and decentralization. Key points to highlight include overcoming infrastructure limitations, leveraging community support, the potential for economic empowerment through staking, and fostering local innovation and adoption. Emphasize the importance of education, collaboration, and strategic partnerships to", - "track": "Real World Ethereum", - "type": "Lightning Talk", + "id": "running-wargames-to-prepare-protocol-teams-for-incident-response", + "sourceId": "N3DBC3", + "title": "Running Wargames to Prepare Protocol Teams for Incident Response", + "description": "SEAL (Security Alliance) Wargames: cybersecurity exercises designed to enhance Web3 protocol resilience. We'll share experiences from running these with major Ethereum protocols, covering:\r\n-Exercise structure: OSINT, tabletops, and live simulations on forked networks\r\n-Scenario designs and common vulnerabilities\r\n-Infrastructure and open-source tooling\r\n-Key learnings and best practices\r\n-Scaling strategies and the importance of regular security drills in the evolving Web3 landscape", + "track": "Security", + "type": "Talk", "expertise": "Intermediate", - "audience": "Stakers/Validators", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Home staking", - "Distributed validator technology", - "Decentralization", - "diversity", - "geographical", - "Decentralization", - "Distributed validator technology", - "Home staking" + "Coordination", + "Security", + "incident", + "response", + "Coordination", + "Security" ], "keywords": [ - "Geographical", - "Diversity" + "Incident", + "Response" ], - "duration": 611, + "duration": 1350, "language": "en", - "sources_swarmHash": "17771851685309b8c91301fcc0013484c2d9732fb2aacfc27bbefa76c6c204d5", - "sources_youtubeId": "_AywwOgu2zY", + "sources_swarmHash": "702aabf1c42143159cad1c657c1247f880937f9ed6f493fa9a9bdf4323e70723", + "sources_youtubeId": "mIOEkVh6aGM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735c1c39dbb7a90e1f43d28", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735c1c39dbb7a90e1f43d28.vtt", - "transcript_text": " Hi. Okay. Okay, I want to start by asking a question. How many of us want to be here in the next, let's say, 10 years, 20 years, to see that Ethereum has gotten to the great potential it should be? How many of us? Okay, good. Awesome. So I think we are all in one spirit. I'm David. I'm David Uzotuku from Nigeria and I'm here to really spoil our hearts to see how we all come together to make Ethereum truly decentralized, right? And I'll be talking on from my own perspective as a participant and as a user and someone who who believes in the future of the ATM network what I'm trying to do and how I'm doing it so I live in Africa in Nigeria Africa and Africa has some few unique challenges and it has a lot of potentials. So today I'll be talking about running nodes in Africa and other few things. So why geographic decentralization is vital for Ethereum? If Ethereum has to be truly decentralized, It means we need to have these nodes, the information infrastructure points, distributed. And it has to be distributed from a geographical perspective. So currently right now in the world, we have like seven continents. And Africa is one of it. And just to also add, I believe in one day that some of us, one day, possible me or you, might go to Antarctica to actually run a node. Right now, in Africa, we have less than 1% of node operators in Africa. But in other parts of the world, we have more than 30% in other side part of the world. And that's not true decentralization. True decentralization is where we have a good balance to these infrastructures running together. The interesting thing about Ethereum vision is it has a vision of security, trust, resilience, but all of this can happen if we don't have true decentralization. Because one, if we have an imbalance, right, it means the majority will actually have high rules or policies, which might not favor the rest. So we all have to be like, it has to really be decentralized for us to really have this vision come to pass. Geographical diversity is also one thing, one of the vision or core vision of the Ethereum network itself. Seeing Ethereum really, really being decentralized is what the network needs. So, this is just a map showing us the nodes we have in globally. Just like I said, we have less than 1% of nodes in Africa and also Latin America. So these two continents kind of have similar unique challenges. So I'll be talking on some of these challenges also. One question also I want to or another thing I will be talking about is why should we also think about or supporting Africa in trying to run nodes, right? Because of the potentials. At the moment in Africa, we have approximately about 1.7 billion people, right? And majority of this, a good number of this number is they are all youth, right? And one thing I love saying is in Africa, we have a lot of challenges, right? Where there is challenges, that's where you talk about finding solutions to solve these challenges. And one great solution right now, it's the Ethereum network, it's the Ethereum blockchain. We solve challenges we face in Africa so that tells that there's a great potential in Africa and that's why also we want to see more nodes running in Africa. Building a decentralized future, our work at NodeBridge. In trying to solve this issue of true decentralization, from my own standpoint, I came up with a community called NodeBridge. The idea right now is just to see how we kind of have more home node operators, right, support people's minds to also be participants of this network in a way to really make this Ethereum truly decentralized, right? Some huge challenges we have in Africa is education, lack of internet infrastructures, lack of power infrastructures, economical issue, right? So, and, you know, in my own small way, right, one of the things we try to do at NodeBridge is to also break this barrier, right? And we find a way to also break this barrier by, for instance, doing workshops. I've been doing a lot of workshops in a few African countries, also educating them, finding better ways to actually tell them that, yes, we all have to be part of this network, making this network resilient again. And then also, power. So because we have to find a way to also, like, use a very small system, so it's sustainable and it's actually also solved the problem. So this is just like a Raspberry Pi running on an arm, which also solves that problem. So I also, like, custom build all of these things to solve the problem for an African person who is not able to actually buy this infrastructure. Who is not able to buy this infrastructure. Because setting up this infrastructure is quite expensive. Yeah, this is some of the workshops also we kind of get to do. Yeah, this is also some of the setups. We've seen more Africans running these nodes, right? So they kind of have different setups. So actually at the moment, we also kind of use styling. We use power solar systems to generate power for sustainability. And also with the custom nodes we put together. Also, yeah, this is like the whole power system, the styling, some of the node operators we have at the moment are actually using. Also, education. So why security depends on global node operators and solo stickers, right? So, just like I said, we all have to come together, right, to really see, to push Ethereum to be truly decentralized. And then, the point here is, Africa is trying to solve its own problem. But because we're trying to solve a global problem, which is trying to solve its own problem. But because we're trying to solve a global problem which is trying to solve decentralization, we all have to be like, all of us have to all come together, like helping each other. For instance, there are different system right now, like the DVT system. Okay, so like the DVT system, which actually you could also delegate to permissionless system, you could actually have like a cluster to delegate also your stakes to a particular node that is sitting in underrepresented location, right? So we all have to come together to actually do these things together because it depends on us all, not just one geographical point, but it all depends on everybody who loves this network and who is ready actually to see this network in the next 50 years to come. Yeah, how you could also support, like, there are different ways also you could also put this support, just like I said. Supporting node operators from your own point of view, let's say it might be you delegating your nodes, it might be you delegating your stakes, it might be you setting up some resources, which is for education and its likes. So I also just want to quickly ask, what does true decentralization mean to you? And we can think about it in the next session. I'm sorry, David, but the time is up and we have a really tight schedule. If you want to talk to David, I think the work he's doing is absolutely amazing. Definitely connect with him.", + "sources_streamethId": "6732febd80d989c5b7b49fc9", "eventId": "devcon-7", - "slot_start": 1731575400000, - "slot_end": 1731576000000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1buMXIg1gOhRzKk22wUllHQbcl9xVPk1mQ7_JHDKF_oQ", - "resources_slides": null, + "slot_start": 1731390600000, + "slot_end": 1731392400000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1Vl9aDLrFn0_bNTA3ddPbHqxDjrCLUyNEIUn4eBlSNzE", + "resources_slides": "https://drive.google.com/file/d/1jvvbrebU3zmid3XxLVhEdMAb1rR17Tv7/view", "speakers": [ - "david-uzochukwu" + "isaac-patka", + "kelsie-nabben" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -645109,19 +643272,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -645296,6 +643446,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -645471,6 +643623,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -645543,7 +643696,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -645559,7 +643711,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -645587,7 +643738,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -645628,6 +643778,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -645919,8 +644070,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -645937,6 +644086,12 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -646020,7 +644175,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -646037,53 +644191,56 @@ 0, 0, 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0 ] }, { "session": { - "id": "running-wargames-to-prepare-protocol-teams-for-incident-response", - "sourceId": "N3DBC3", - "title": "Running Wargames to Prepare Protocol Teams for Incident Response", - "description": "SEAL (Security Alliance) Wargames: cybersecurity exercises designed to enhance Web3 protocol resilience. We'll share experiences from running these with major Ethereum protocols, covering:\r\n-Exercise structure: OSINT, tabletops, and live simulations on forked networks\r\n-Scenario designs and common vulnerabilities\r\n-Infrastructure and open-source tooling\r\n-Key learnings and best practices\r\n-Scaling strategies and the importance of regular security drills in the evolving Web3 landscape", - "track": "Security", - "type": "Talk", + "id": "samba-a-besu-portal-client", + "sourceId": "FTC8PQ", + "title": "Samba, a Besu Portal Client", + "description": "A presentation about my experience participating in the EPF. Talking primarily about the project I worked on for the cohort and various obstacles that I faced along the way. I additionally aim to go into detail about where I see Samba going in the future and my role in that development.", + "track": "[CLS] EPF Day", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Product", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Coordination", - "Security", - "incident", - "response", - "Coordination", - "Security" + "Portal", + "network" ], "keywords": [ - "Incident", - "Response" + "EPF" ], - "duration": 1350, + "duration": 705, "language": "en", - "sources_swarmHash": "702aabf1c42143159cad1c657c1247f880937f9ed6f493fa9a9bdf4323e70723", - "sources_youtubeId": "mIOEkVh6aGM", + "sources_swarmHash": "e02934d00bab1d926fea5b2862138be2ad9176107861f2ad27b01d9f661df389", + "sources_youtubeId": "11sZxJ4QuLk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6732febd80d989c5b7b49fc9", + "sources_streamethId": "67342b249dbb7a90e1bd5b22", "eventId": "devcon-7", - "slot_start": 1731390600000, - "slot_end": 1731392400000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1Vl9aDLrFn0_bNTA3ddPbHqxDjrCLUyNEIUn4eBlSNzE", - "resources_slides": null, + "slot_start": 1731471300000, + "slot_end": 1731472200000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1V8MPOsuS_Y8NmrHqykkqj248dMqY5xfRkNholeY8m_Q", + "resources_slides": "https://drive.google.com/file/d/1nU7h34dhmciALEpyYLrBBGUM1i9UWX5Z/view", "speakers": [ - "isaac-patka", - "kelsie-nabben" + "derek-sorken" ] }, "vector": [ - 6, 0, 0, 0, @@ -646099,6 +644256,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -646655,9 +644813,8 @@ 0, 0, 0, - 6, - 6, 0, + 6, 0, 0, 0, @@ -646834,7 +644991,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -646921,6 +645077,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -646989,7 +645146,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -647299,9 +645455,6 @@ 0, 0, 2, - 2, - 0, - 0, 0, 0, 0, @@ -647403,9 +645556,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -647419,38 +645572,48 @@ }, { "session": { - "id": "samba-a-besu-portal-client", - "sourceId": "FTC8PQ", - "title": "Samba, a Besu Portal Client", - "description": "A presentation about my experience participating in the EPF. Talking primarily about the project I worked on for the cohort and various obstacles that I faced along the way. I additionally aim to go into detail about where I see Samba going in the future and my role in that development.", - "track": "[CLS] EPF Day", + "id": "satellite-based-cryptographic-layer-extra-terrestial-extension-to-ethereum", + "sourceId": "SZBQLK", + "title": "Satellite based Cryptographic Layer - Extra-terrestial Extension to Ethereum", + "description": "Using nano-satellites with edge compute units we will show how we intend to build an orbital compute layer with unique properties. We will propose a novel cryptographic applications layer built with vision to space explorations.\r\n\r\nTypically public blockchains enable cryptographic primitives for the digital commons on earth, we will share novel implementation of cryptographic applications that will extend the digital commons into Low Earth Orbit (LEO) and import cryptographic resources from LEO.", + "track": "Cypherpunk & Privacy", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Portal", - "network" + "Network State", + "Use cases of cryptography", + "DePIN", + "space", + "frontier", + "DePIN", + "Network State", + "Use cases of cryptography" ], "keywords": [ - "EPF" + "space", + "frontier" ], - "duration": 705, + "duration": 593, "language": "en", - "sources_swarmHash": "e02934d00bab1d926fea5b2862138be2ad9176107861f2ad27b01d9f661df389", - "sources_youtubeId": "11sZxJ4QuLk", + "sources_swarmHash": "7f0a508bddf0c6ec3d9a6daf5707ab211837a4da389a507c6bc1e223da4741bd", + "sources_youtubeId": "AJq7z4eLYm0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67342b249dbb7a90e1bd5b22", + "sources_streamethId": "6736dec174749a4b89942272", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736dec174749a4b89942272.vtt", + "transcript_text": " Leonardo Silva Reviewer:\" Elisabeth Buffard Hello, Earthlings. So, Ethereum is great. We want to take it to more planets than only Earth. And when we think about space frontier, often nerds think about the space frontier as some sci-fi or alien technologies. Or for some other people, it's just an empty void, which is slightly less inspiring. The reality is that if anything of value is going to come out of it, we need to get nuanced and work with the complexities. So what we have seen is that in the last two years alone, more satellites have been launched into orbit than in the previous five decades combined. So finally, after decades of stagnation, we're entering a space acceleration era. And this is the time where democratization of the space industry is happening and plenty of startups with wild ideas are finally taking a stab at implementing their stuff. With SpaceCoin, what we're building is orbital route of trust. It's literally out- this world secure hardware, out of reach of physical attacks. It's space-based trusted execution environment, or we call it Space TE. And this serves as infrastructure for Celestial Compute Marketplace, payment layer for satellite services, and from Earth, we'll be able to access it via RPC nodes for EVM and other networks. This is a real photo from a launch not long ago. What we've built with the Space T is infrastructure with our aerospace partner, CryptoSat. We basically designed, built, and launched satellites into orbit since 2021. Last year we contributed to the EIP-4844 with the trusted setup ceremony, the KZG, aboard the satellites. And the latest satellites are equipped with the Iridium connection, which is reachable from any place in the planet. And now we're preparing a set of nodes that will be equipped with more iridium antennas, C-edge nodes. And essentially this is immune to any kind of attacks there. The architecture is Yang Wang's session. Okay, thank you for mic. Thanks, Laj. So this is actually what you see is like the network architecture which actually is like network resilient. So we are using the Iridium satellites, which have like the 66 pieces distributed across the planet with the nine in the backup. So there's like the first line of our communication path we are using. Then we are using the Aptos Orbital, which is like the, provides us the computation infrastructure currently. And thanks to this, actually, we are building the space coin on the top. So what actually we can build on thanks to this is actually like the protocol with specific hardware capabilities. And it will be like resistant to any kind of the DDoS attacks and the censorship and in the future we believe there will be like much more a lot of communication the paths we can use and the computational capacity we can get. So this is basically like a brief diagram where actually the satellites are reachable any single moment so you can see like in any kind of the intervals some satellites are actually reachable where actually the ground stations are the proposing and then actually the electing and the voting back and the broadcasting back on the ground stations. So what we are using this, because there's a lot of constraints in the environment, is actually we used the modified HotStuff1 with the instant finality. And thanks to this, we actually, we've retrofitted for our lower bandwidth with the least amount of the messages needed between the satellites. So and actually opens us the requirement like we just need for the connectivity only one node designed as the leader. So this is briefly like the diagram. So how actually it will be like working in the future and what we are looking forward to implement currently. So we have like the pre-finality on the L2 with the pre-finality with L2 as with the L1 backup whereas the state is posted on the L1 which is actually good for the low value transactions and for full finality we'll be using the L1 where actually it's good for the high value transaction and actually the state will be fully committed to the L1 on the satellites. So for the security services thanks to this we can like offer like the space T the copcessors, which enable FHG cases. For that, actually, we are looking forward to offer the Secure Custodian for the various key management systems and schemes. Then actually, we are looking for the Cosmic Entropy and the VRF. And thanks to this, actually, we can offer some kind of the security deletion because the hardware is inaccessible for the inspection and the eternal bulletin board. So what we're actually looking forward up there is that we'll be implementing the support the EVM compatibility and we'll be looking forward to the initial deployment focusing on the bridge contracts and for future extensibility through the WASM risk and the VM based stuff. So we are looking like for the L1, L2 foundations, MPC wallets, and the validators, and especially entropy providers as well. So thank you for the attention. Please give us the follow on the Twitter, and I think now it's time for the questions. Thank you. It is, it is. Man. Peace among worlds. So, we have any questions? Remember to stand up your hand and I will toss this to you. Yeah, we can have any kind of questions. We can also have some physics. Oh. We'll be delivered to you. Don't worry, it doesn't break. So, this is fascinating to see, so hats off for building it. What happens, is it harder to patch because it's in satellites? Like on Earth, I guess you could upgrade the hardware or switch over to a different operating system. If there's a vulnerability in the OS that's running on your satellite, I imagine that's a lot harder to go and fix. So how did those challenges come in? And the second one was, what happens if one of the satellites fails? And can you still maintain connection between your L1 nodes? So there's redundancy, right? You have actually on each of the units, you have two Linux, like hardened Linux machines, and then eventually both you have the redundancy in case one satellite goes off. But with regard to patching, ignoring for a moment the hardware-related stuff, which is a longer conversation, but for software, we're actually looking at potentially implementing some kind of committee sign-off. So, essentially, you have it both peer-vetted and also it's not just like a centralized entity that patches and pushes potentially man-in-the-middle unnecessary things. If I understand correctly from your slides, you use Iridium's satellite? Correct. Okay. I understand SpaceX has a lot of low orbital satellite. Why do you choose Iridium over SpaceX? Okay. Yeah. So for currently, the Iridium is like this. It's most oldest, but it's actually most real you can get right now It's also Expensive, but it's actually thanks to it where is actually the constellation is actually quite have a long Sustainability of the networks, but yes the Starling and other providers. They're actually coming in the path So we believe there is actually in the market of the optical terminals And but we are just wait the satellites being up and ready. So now Starlink will offer for the satellite providers, but it takes time to implement in orbit. So once we have more viable and, of course, cheaper options, we will switch to that or so. And I like the redundancy. Thank you. Any more questions? Last question. Thanks for the presentation. What are some first use cases you envisage and what are some really moonshot or literally moonshot use cases that you can see potentially or what could be the coolest use cases you can foresee? It's a bit of a long story, but I reckon just some of the use cases, cosmic randomness for VRF, secure deletion, MPC application with co-signing, secure co-processing, these are the things that are applicable already today. What's maybe moonshot and very interesting is, as we showed, there's so many launches now. There's an entire new space-native economy. So we foresee a scenario where there's going an entire new space of like space native economy right so we foresee a scenario where there's going to be ai agents in space making decisions without having to download um data to earth so eventually like there's a lot of security that is required for for this to happen like we've seen with planet labs how it influenced uh security Earth. And with more assets in space, it's becoming much more sensitive. So we foresee it as the backbone for expansion of Ethereum into more planets eventually, or at least outside of pure bounds of Earth. I would like to say the Moon cases is like the payment network between the satellites. So that's how we see it. All right. Please give a big applause to these guys. That's a wonderful presentation.", "eventId": "devcon-7", - "slot_start": 1731471300000, - "slot_end": 1731472200000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1V8MPOsuS_Y8NmrHqykkqj248dMqY5xfRkNholeY8m_Q", - "resources_slides": null, + "slot_start": 1731648000000, + "slot_end": 1731648600000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1Net_UwG69ncJlQvHg5qG_nefAW16HDrDDKf-9OaDpsw", + "resources_slides": "https://drive.google.com/file/d/1Wctzat6Ivk2OKGoqk6Wk6gX6JTd82uz5/view", "speakers": [ - "derek-sorken" + "daniel-bar", + "matej-yangwao" ] }, "vector": [ @@ -647459,6 +645622,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -647469,7 +645633,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -648029,6 +646192,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -648223,11 +646387,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -648246,6 +646412,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -648293,14 +646460,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -648339,6 +646498,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -648766,14 +646926,11 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, 0, - 0, 2, 0, 0, @@ -648785,58 +646942,54 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "satellite-based-cryptographic-layer-extra-terrestial-extension-to-ethereum", - "sourceId": "SZBQLK", - "title": "Satellite based Cryptographic Layer - Extra-terrestial Extension to Ethereum", - "description": "Using nano-satellites with edge compute units we will show how we intend to build an orbital compute layer with unique properties. We will propose a novel cryptographic applications layer built with vision to space explorations.\r\n\r\nTypically public blockchains enable cryptographic primitives for the digital commons on earth, we will share novel implementation of cryptographic applications that will extend the digital commons into Low Earth Orbit (LEO) and import cryptographic resources from LEO.", - "track": "Cypherpunk & Privacy", - "type": "Lightning Talk", + "id": "scalable-and-sovereign-evm-data-modern-data-engineering-best-practices", + "sourceId": "KEEUYL", + "title": "Scalable and sovereign EVM data: modern data engineering best practices", + "description": "Collecting and analyzing large historical EVM datasets can pose a significant challenge. This has led many teams and individuals to outsource their data infrastructure to commercial 3rd-party platforms. However, over the past year a new style of data workflow has emerged, using entirely open source software and local-first processing. This new ecosystem of tools allow anyone to cheaply, easily, and robustly collect and analyze any EVM dataset from the comfort of their own laptop.", + "track": "Developer Experience", + "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Network State", - "Use cases of cryptography", - "DePIN", - "space", - "frontier", - "DePIN", - "Network State", - "Use cases of cryptography" + "Developer Infrastructure", + "data", + "analysis", + "Developer", + "Infrastructure" ], "keywords": [ - "space", - "frontier" + "Data Engineering", + "Data Science", + "Data Analysis" ], - "duration": 593, + "duration": 1242, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "177fc071fd2593ac885081c221ecbd202fb829eb9768f9bef54a4e3456bea753", + "sources_youtubeId": "bKrnOnfx9io", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736dec174749a4b89942272", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736dec174749a4b89942272.vtt", - "transcript_text": " Leonardo Silva Reviewer:\" Elisabeth Buffard Hello, Earthlings. So, Ethereum is great. We want to take it to more planets than only Earth. And when we think about space frontier, often nerds think about the space frontier as some sci-fi or alien technologies. Or for some other people, it's just an empty void, which is slightly less inspiring. The reality is that if anything of value is going to come out of it, we need to get nuanced and work with the complexities. So what we have seen is that in the last two years alone, more satellites have been launched into orbit than in the previous five decades combined. So finally, after decades of stagnation, we're entering a space acceleration era. And this is the time where democratization of the space industry is happening and plenty of startups with wild ideas are finally taking a stab at implementing their stuff. With SpaceCoin, what we're building is orbital route of trust. It's literally out- this world secure hardware, out of reach of physical attacks. It's space-based trusted execution environment, or we call it Space TE. And this serves as infrastructure for Celestial Compute Marketplace, payment layer for satellite services, and from Earth, we'll be able to access it via RPC nodes for EVM and other networks. This is a real photo from a launch not long ago. What we've built with the Space T is infrastructure with our aerospace partner, CryptoSat. We basically designed, built, and launched satellites into orbit since 2021. Last year we contributed to the EIP-4844 with the trusted setup ceremony, the KZG, aboard the satellites. And the latest satellites are equipped with the Iridium connection, which is reachable from any place in the planet. And now we're preparing a set of nodes that will be equipped with more iridium antennas, C-edge nodes. And essentially this is immune to any kind of attacks there. The architecture is Yang Wang's session. Okay, thank you for mic. Thanks, Laj. So this is actually what you see is like the network architecture which actually is like network resilient. So we are using the Iridium satellites, which have like the 66 pieces distributed across the planet with the nine in the backup. So there's like the first line of our communication path we are using. Then we are using the Aptos Orbital, which is like the, provides us the computation infrastructure currently. And thanks to this, actually, we are building the space coin on the top. So what actually we can build on thanks to this is actually like the protocol with specific hardware capabilities. And it will be like resistant to any kind of the DDoS attacks and the censorship and in the future we believe there will be like much more a lot of communication the paths we can use and the computational capacity we can get. So this is basically like a brief diagram where actually the satellites are reachable any single moment so you can see like in any kind of the intervals some satellites are actually reachable where actually the ground stations are the proposing and then actually the electing and the voting back and the broadcasting back on the ground stations. So what we are using this, because there's a lot of constraints in the environment, is actually we used the modified HotStuff1 with the instant finality. And thanks to this, we actually, we've retrofitted for our lower bandwidth with the least amount of the messages needed between the satellites. So and actually opens us the requirement like we just need for the connectivity only one node designed as the leader. So this is briefly like the diagram. So how actually it will be like working in the future and what we are looking forward to implement currently. So we have like the pre-finality on the L2 with the pre-finality with L2 as with the L1 backup whereas the state is posted on the L1 which is actually good for the low value transactions and for full finality we'll be using the L1 where actually it's good for the high value transaction and actually the state will be fully committed to the L1 on the satellites. So for the security services thanks to this we can like offer like the space T the copcessors, which enable FHG cases. For that, actually, we are looking forward to offer the Secure Custodian for the various key management systems and schemes. Then actually, we are looking for the Cosmic Entropy and the VRF. And thanks to this, actually, we can offer some kind of the security deletion because the hardware is inaccessible for the inspection and the eternal bulletin board. So what we're actually looking forward up there is that we'll be implementing the support the EVM compatibility and we'll be looking forward to the initial deployment focusing on the bridge contracts and for future extensibility through the WASM risk and the VM based stuff. So we are looking like for the L1, L2 foundations, MPC wallets, and the validators, and especially entropy providers as well. So thank you for the attention. Please give us the follow on the Twitter, and I think now it's time for the questions. Thank you. It is, it is. Man. Peace among worlds. So, we have any questions? Remember to stand up your hand and I will toss this to you. Yeah, we can have any kind of questions. We can also have some physics. Oh. We'll be delivered to you. Don't worry, it doesn't break. So, this is fascinating to see, so hats off for building it. What happens, is it harder to patch because it's in satellites? Like on Earth, I guess you could upgrade the hardware or switch over to a different operating system. If there's a vulnerability in the OS that's running on your satellite, I imagine that's a lot harder to go and fix. So how did those challenges come in? And the second one was, what happens if one of the satellites fails? And can you still maintain connection between your L1 nodes? So there's redundancy, right? You have actually on each of the units, you have two Linux, like hardened Linux machines, and then eventually both you have the redundancy in case one satellite goes off. But with regard to patching, ignoring for a moment the hardware-related stuff, which is a longer conversation, but for software, we're actually looking at potentially implementing some kind of committee sign-off. So, essentially, you have it both peer-vetted and also it's not just like a centralized entity that patches and pushes potentially man-in-the-middle unnecessary things. If I understand correctly from your slides, you use Iridium's satellite? Correct. Okay. I understand SpaceX has a lot of low orbital satellite. Why do you choose Iridium over SpaceX? Okay. Yeah. So for currently, the Iridium is like this. It's most oldest, but it's actually most real you can get right now It's also Expensive, but it's actually thanks to it where is actually the constellation is actually quite have a long Sustainability of the networks, but yes the Starling and other providers. They're actually coming in the path So we believe there is actually in the market of the optical terminals And but we are just wait the satellites being up and ready. So now Starlink will offer for the satellite providers, but it takes time to implement in orbit. So once we have more viable and, of course, cheaper options, we will switch to that or so. And I like the redundancy. Thank you. Any more questions? Last question. Thanks for the presentation. What are some first use cases you envisage and what are some really moonshot or literally moonshot use cases that you can see potentially or what could be the coolest use cases you can foresee? It's a bit of a long story, but I reckon just some of the use cases, cosmic randomness for VRF, secure deletion, MPC application with co-signing, secure co-processing, these are the things that are applicable already today. What's maybe moonshot and very interesting is, as we showed, there's so many launches now. There's an entire new space-native economy. So we foresee a scenario where there's going an entire new space of like space native economy right so we foresee a scenario where there's going to be ai agents in space making decisions without having to download um data to earth so eventually like there's a lot of security that is required for for this to happen like we've seen with planet labs how it influenced uh security Earth. And with more assets in space, it's becoming much more sensitive. So we foresee it as the backbone for expansion of Ethereum into more planets eventually, or at least outside of pure bounds of Earth. I would like to say the Moon cases is like the payment network between the satellites. So that's how we see it. All right. Please give a big applause to these guys. That's a wonderful presentation.", + "sources_streamethId": "673cc8b7982f234a1257b1be", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731648000000, - "slot_end": 1731648600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Net_UwG69ncJlQvHg5qG_nefAW16HDrDDKf-9OaDpsw", - "resources_slides": null, + "slot_start": 1731573000000, + "slot_end": 1731574800000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1ArYtVYufUwHpFKb-cm8W6DCWGSPca78nUlpjKQDTmiY", + "resources_slides": "https://drive.google.com/file/d/1HhBZWf4Y3LQba0Q5ZITTgRrtu3g6xFB9/view", "speakers": [ - "daniel-bar", - "matej-yangwao" + "storm-slivkoff" ] }, "vector": [ - 0, - 0, 0, 0, 0, @@ -649234,6 +647387,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -649410,8 +647564,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -649590,6 +647742,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -649608,13 +647761,11 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -649633,9 +647784,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -649644,6 +647792,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -649719,7 +647868,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -649903,6 +648051,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -650053,10 +648202,11 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -650153,7 +648303,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -650166,57 +648315,54 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "scalable-and-sovereign-evm-data-modern-data-engineering-best-practices", - "sourceId": "KEEUYL", - "title": "Scalable and sovereign EVM data: modern data engineering best practices", - "description": "Collecting and analyzing large historical EVM datasets can pose a significant challenge. This has led many teams and individuals to outsource their data infrastructure to commercial 3rd-party platforms. However, over the past year a new style of data workflow has emerged, using entirely open source software and local-first processing. This new ecosystem of tools allow anyone to cheaply, easily, and robustly collect and analyze any EVM dataset from the comfort of their own laptop.", - "track": "Developer Experience", + "id": "scalable-multi-party-fhe-with-phantom-zone", + "sourceId": "SLJ9QS", + "title": "Scalable multi-party FHE with Phantom-zone", + "description": "The talk introduces \"phantom-zone\", a framework to write scalable consumer facing MPC apps using multi-party FHE. Starting with what's multi-party FHE, talk gives a demo of non-trivial MPC app. Followed by introduction to programming model of MPC apps using multi-party FHE inside phantom-zone. Then the talk dives deep into primitives to realise multi-party FHE and ends with advanced FHE gadgets that further enhance multi-party FHE.", + "track": "Applied Cryptography", "type": "Talk", - "expertise": "Intermediate", + "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Developer Infrastructure", - "data", - "analysis", - "Developer", - "Infrastructure" + "MPC", + "mp-fhe", + "MPC" ], "keywords": [ - "Data Engineering", - "Data Science", - "Data Analysis" + "FHE", + "MP-FHE" ], - "duration": 1242, + "duration": 1248, "language": "en", - "sources_swarmHash": "177fc071fd2593ac885081c221ecbd202fb829eb9768f9bef54a4e3456bea753", - "sources_youtubeId": "bKrnOnfx9io", + "sources_swarmHash": "c7f9970e7169fb1282bc36149435c26ac9f1dc3b1a0753f024ae09681efd6ab0", + "sources_youtubeId": "CyIjTbHmVwg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673cc8b7982f234a1257b1be", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6735aeab9dbb7a90e1bde09b", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735aeab9dbb7a90e1bde09b.vtt", + "transcript_text": " Tanya Cushman Reviewer:\"Presenter\": Hello, welcome back. So, today I will be talking about Phantom Zone. But before I dive deep into Phantom Zone and talk about the rest of the things, I would walk you through the motivation behind Phantom Zone. So, if you guys are familiar with globally mutually trusted third party, I would want to introduce you to this idea of globally mutually trusted third party. What is this globally mutually trusted third party? Well, it provides you three guarantees. The first thing that it says is that whatever information you send to this third party, it will always keep that private. It would not leak it to anyone. The second guarantee that it provides is that all the information that it has collected over time from different people, you know, we have been sending this globally mutually trusted third party all our information, let's say for a year, all the information that it has collected for all these years, it will always keep it private and would not allow anyone to poke inside its memory. And the third guarantee that it provides, which makes this particular party very magical, is that it can compute any arbitrary function we want it to compute, as long as we provide enough authorization to be able to compute that function. And it will only output the necessary outputs. Usually, I sort of refer to this mutually trusted third party as a mutually shared computer. And if you guys are familiar with something called the God Protocol, this is the God Protocol. This is a picture from an example back in 1987. This is a picture from an example back in 1987. So first observation to make is that if you really want these three guarantees to be, if you really want this party to be globally mutually trusted, we want this party to be able to prove these three guarantees to any individual without requiring any additional interaction, which is why we require cryptography. to prove these three guarantees to any individual without requiring any additional interaction which is why we require cryptography we cannot just make it on a certain legal arguments or something like that we require cryptography for building this the building this globally mutually trusted shared computer and we started to build phantom zone to eventually build the god protocol but to stick within the realms of practicality, we could only build an abridged version of it. So for the rest of the talk, I will be talking about, A, what is Phantom Zone? Why it is an abridged version of this God Protocol? And the second important thing that I'll be talking about is, how can we push the frontiers to eventually build the God Protocol? Okay, so Phantom Zone, the abridged version. The key idea in Phantom Zone is something called multi-party fully homomorphic encryption. And for me to describe you multi-party fully homomorphic encryption, I have to eventually describe you what is single-party encryption. In single-party encryption, you have a single client, this guy over here. They hold a secret. They keep the secret private to them. They can encrypt their information, which is A here, with their secret and produce an FHC ciphertext. And then they can send this FHC ciphertext to any server. And the can evaluate and any arbitrary function on their private input which is a and produce an output ciphertext and that the and the client can receive the output ciphertext and decrypt it. So this is single-party FIG coming to multi-party FIG. Well the key idea in multi-party FIG is that you split this secret which is held private by a single client in single-party FIG. Well the key idea in multi-party FIG is that you split this secret which is held private by a single client in single party FIG among many clients. So you have S0, S1, S2 as secret shots split among these three people over here. The first step in multi-party FIG is something called collective public key generation. So all these three parties come together and they generate the collective public key. And then all these three parties, using the collective public key, encrypt their private inputs and produce FHC ciphertext. And then they send their FHC ciphertext to the server. Server executes a marble refunction on the FHC ciphertext and produce an FHC output. The key thing to notice here is that all these parties would have to produce a decryption share to eventually decrypt the output ciphertext here. So they produce the decryption share using the secret shards and then they send it to each other and then only they're able to decrypt the output ciphertext. Because in this case, the secret was split among all these parties. So why is Phantom Zone an abridged version? Well, because Phantom Zone, assuming that in the future we're able to add publicly-verifiable FHE to a Phantom Zone can only guarantee the three guarantees that I talk about in the God Protocol to only the holders of the secret shots. It cannot guarantee these three guarantees to everyone around the globe. Which is why Phantom Zone is just an abridged version of it. Okay. So you might wonder, how do we build towards the God protocol? How do we even do it? Well, what I would like to say at the moment is I would have loved to say that after a lot of research and a lot of five years of research, we have figured out the solution to build the God protocol. But no, there are no enlightening thoughts here. And there's one obvious answer to eventually building the God protocol, which is program of sophistication. What's program of sophistication? Well, to simply describe the program of sophistication, let's just assume that you have function f, right? What you can do with program obfuscation is you take this function f and perform some transformations on this function f and produce an obfuscated circuit. You can give this obfuscated circuit to someone else and program obfuscation guarantees that the only thing that you can learn from that obfuscated circuit is the input to output map and nothing else. Now you might be wondering why is this useful? Because if the function is trivial, then you can easily learn it from the input to output map. Program obfuscation becomes very interesting when you sort of like obfuscate a program that is a cryptographic function. For example, let's just say that I take a function that decrypts any ciphertext that is encrypted to my public key. So I take a function and this function has my secret key and it decrypts any ciphertext that was encrypted to me using my public key. And I perform certain transformations using program obfuscation to this function and produce an obfuscated circuit. I give this obfuscated circuit to someone else. What they can do is that they can decrypt any ciphertext that was encrypted to me using this obfuscated circuit. But they can never, ever learn what the secret is inside that circuit. They can never learn my secret key. And these are the class of functions where program obfuscation becomes useful. And I'll tie it to building the God protocol later in the slides. to building the got protocol later in the slides. So now, assume that we can only build program obfuscation for some limited class of functions, not for general class of functions, but limited class of functions. I'll tell you one way of building the got protocol using program obfuscation. building the got protocol using program of application. Step one, modify the FHT scheme that we're using before to become publicly verifiable. What do I mean by that? Well a publicly verifiable FHT schemes does those things. It evaluates the FHT function which you know a a normal FHE scheme does. In addition to evaluating the function, it also produces a proof of correct evaluation so that anyone can verify this proof with the output ciphertext and be assured that the server that sort of executed this FHE function executed it correctly, and which I usually refer to as proof pi of correct evaluation. Step two, replace the collective key generation operation that we did in the multi-party FHE with a trusted setup. In the trusted setup, you have arbitrary number of people here. They perform some MPC protocol to produce FHE keys. The two types of FHE keys which are very important. Public key and the bootstrapping key. Bootstrapping key is usually used for some sort of FHE operations that you can completely black box. The key thing here is that no one knows the ideal secret key because we're doing a trusted setup in MPC to generate these two keys. The third step is modify the trusted setup to also output an obfuscated conditional decryption oracle. Okay, that's a mouthful. I sort of like go into it one level deeper. What is an obfuscated conditional decryptor? This particular conditional decryptor is an obfuscated program of the following functionality. What it does is that takes an output ciphertext and a proof of correct evaluation of FIG circuit. It verifies whether the proof is valid and decrypts the output ciphertext if and only if the proof is valid. And this sort of like tells you why did we assume in the first place that program obfuscation may be feasible only for like limited class of functions because to build the GOT protocol like to build the got protocol using the FHERoute, we only need program obfuscation to be practical for this obfuscated conditional decryptor. So we modify the tracer setup to also output this obfuscated conditional decryptor, and that's it. And another thing to note is that this conditional decryptor also has the secret key, the ideal secret key that no one knows embedded inside it. Okay. So the end-to-end flow is, you do MPC to generate three things. Public key, bootstrapping key, and the offscored conditional decryptor, which I now realize is somewhat of a mouthful. I should have chosen some other term. Anyways, the second flow is, now anyone can encrypt their private inputs using the public key that is the output of the MPC protocol. So you have multiple ciphertexts here. And then they can send it to the FHC server. FHC server evaluates the FHC function, outputs the encrypted output. In addition, it produces a proof because the FIG server is evaluating a publicly verifiable FIG scheme. And then we plug in the proof as well as the output to the off-scaled conditional decryptor and the conditional decryptor would only decrypt the encrypted output if and only if the proof is valid. So this is one way of building the God protocol using publicly verifiable FHE and program obfuscation for obfuscated conditional decryptor. So there's one way, which I've just shown you, but we need new ideas to push the frontiers and to finally build the program obfuscation or and to finally build program obfuscation or indistinguishably obfuscation, if you're familiar with that. Here, I've showed you just one way. But if you're able to come up with new ideas, then probably we can make program obfuscation more practical for general circuits, not just for limited class of functions that we used before. And probably, we can directly build the God protocol from program obfuscation. So while I was exploring this field of program obfuscation and I.O., one key observation that I made was that it's really hard to get efficient program of specification from standard assumptions and we would inevitably require exotic assumptions. And I'll tell you what are standard assumptions and what are exotic assumptions. Well a standard assumption is an assumption that has been there for a while, for example D log, discrete log problem. There also exists additional incentive for people to break these standard assumptions. And exotic assumptions are somewhat newer assumptions. Like, they have been only there for like five years, or not even five, it was like two to three years. What we can do as a community to, you know, realizing that we might inevitably need newer assumptions to build practical program amplification is we can start examining these newer assumptions, start breaking them, start testing them. Or we can build applications using this assumption so that we can incentivize people to break them and tell us whether they're broken or not. And then eventually, in a few years, we would have candidate assumptions that are newer assumptions, but they have become then standard using which we can build practical program sophistication. And taking a first step towards this, we are launching a bounty program to break one of the candidate assumptions, which is called program obfuscation by local mixing. The way I think about this particular assumption is that they're taking more computational complexity approach than taking the traditional approach of using algebraic structures to build program obfuscation. The goal of the bounty is that we provided an obfuscated circuit with roughly 240,000 gates, which was obfuscated from an original circuit with roughly 1,000 gates. And you had to find the original circuit. You can learn more about the bounty at OfficeTopia.io. If you know what OfficeTopia is, OfficeTopia means that we're living in a world where authentication is practical, and the bounty amount is 10K. And this bounty is launched in collaboration with Ethereum Foundation and Zerix Spark. Okay. So before I break, and I think that I have a bunch of time, okay, before I break, and I think that I have a bunch of time. Okay, before I break, I would want to make one conjecture. And the conjecture goes as follows. I think the God protocol is the convergence of cryptography. Probably building the God protocol would require certain sort of like FHE. That is just one route, but like publicly viable FHE and other things like MPC for just setup and so on and so forth. But once you build the got protocol, I think it encompasses everything. It gives us everything that we have been wanting for for a while. It gives us witness encryption. It gives us zero knowledge proofs via signatures. It gives us MPC, multi-party computation. It gives us FE, functional encryption, all of these things that we've been demanding for a while. And this is also one of the major reasons that we should start investigating much more seriously how to get practical program application and finally build the God protocol. And that's it. Thank you. All right, thank you for All right. Thank you for the talk. We do have some questions rolling in. Yeah, let's go through some of the questions. Let's start with the first one. Can we implement threshold ECDSA with Phantom Zone? At the moment, yes, because you can express everything. Like, theoretically, yes, but it would be very impractical to implement ECDSA with PhantomZone at the moment because ECDSA is like you're doing elliptical operations, which is a lot of operations. As far as I understand, threshold ECDSA is possible. It takes two days to generate one single signature. All right, so next question. Can you tell us a little bit more about the definition of obfuscation as a virtual black box? That's the first question over here. Isn't the definition of obfuscation as a virtual black box impossible? virtual black box impossible? I am not posing obfuscation as a virtual black box. I did not mean to say obfuscation is a virtual black box. By the way, the impossible result of a virtual black box is only for certain very restricted class of programs. It's not for general class of programs. Eventually you can aim for virtual black box with certain caveats. But again saying that my definition of sophistication is not virtual black box. All right and what can be done today with Phantom Zone? At the moment as I said Phantom Zone is an abridged version of the Scott protocol. It does not even have publicly verified FHE scheme, so it does not give you all the three guarantees. The only guarantee that it gives you is that it will execute the function that you ask it to execute while private information can be coming from multiple people. It'll keep the information private, but you'll have to trust it for it. So you'll have to trust this particular server to always keep the information private, but you'll have to trust it for it. So you'll have to trust this particular server to always keep the information private and not send it to anyone else. Perfect. And we do have one last question. Oh, cool. More questions rolling in. Can obfuscating programs undermine open source transparency and make it harder to verify the absence of malicious code? I see. Make it harder to verify absence of malicious code? I see. Make it harder to verify absence of malicious code. Well, that is assuming that the entire program is obfuscated. When I say obfuscation, we require obfuscation for certain parts of the program, which can interact with a public program and a private program which is obfuscated. I understand that obfuscation can be used for many malicious purposes as well, like for example, you know, like, there are several reasons why people might be interested in obfuscation, but we can, as a community, make sure that there's interaction between the public interfaces and the private interfaces which are obfuscated. All right. And why do you call the publicly verifiable FHE circuit obfuscated? Doesn't the require solidity verifier or something which is public? No, I think once I give you obfuscated circuit, there are certain guarantees that you can learn from the obfuscated circuit itself, that it does not reveal anything, as long as you've done the obfuscation correctly. Alright, and do you have evidence that the conditional decryption functionality is possible using I.O.? Yes. There are theoretical results and we're trying to make it practical as well. All right. Can you give one example each on how I.O. can replace ZK, MPC, FHE? Okay. So for ZK, what you can do is like you can embed a secret key inside this off-secreted circuit, the God protocol, and a zero-knowledge proof is just a signature from this God protocol. Whatever secret exists inside this particular server or this God protocol, or this FHC circuit, off-secreted circuit, a signature by that thing becomes a zero-knowledge proof. So you do not require zero- zero knowledge on the client side anymore. For MPC, again, it's a globally mutually trusted third party. All of us encrypt our private inputs with the public key corresponding to the secret key that lives inside this off-site circuit. And we send our private inputs to this. It decrypts that, performs some function, and produces the output. So that's one way of replacing MPC, and the same applies for FG. Cool. We can stay here for maybe another 10 seconds if there are any new questions rolling in. All right, cool.", "eventId": "devcon-7", - "slot_start": 1731573000000, - "slot_end": 1731574800000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1ArYtVYufUwHpFKb-cm8W6DCWGSPca78nUlpjKQDTmiY", - "resources_slides": null, + "slot_start": 1731567600000, + "slot_end": 1731569400000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1V86Kc6aOcbAUsOm8NBUDaQ00YrCn0XJN5ce8Lyt73WU", + "resources_slides": "https://drive.google.com/file/d/172T15E7PtqtQw3tN8MRjbwgKRVai0bjf/view", "speakers": [ - "storm-slivkoff" + "janmajaya-mall" ] }, "vector": [ 0, 0, 0, - 6, 0, 0, 0, @@ -650224,6 +648370,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -650611,7 +648758,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -650633,6 +648779,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -650968,7 +649115,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -651018,7 +649164,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -651052,6 +649197,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -651278,7 +649424,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -651433,7 +649578,6 @@ 0, 0, 2, - 2, 0, 0, 0, @@ -651529,8 +649673,6 @@ 0, 2, 0, - 0, - 0, 2, 0, 0, @@ -651549,42 +649691,45 @@ }, { "session": { - "id": "scalable-multi-party-fhe-with-phantom-zone", - "sourceId": "SLJ9QS", - "title": "Scalable multi-party FHE with Phantom-zone", - "description": "The talk introduces \"phantom-zone\", a framework to write scalable consumer facing MPC apps using multi-party FHE. Starting with what's multi-party FHE, talk gives a demo of non-trivial MPC app. Followed by introduction to programming model of MPC apps using multi-party FHE inside phantom-zone. Then the talk dives deep into primitives to realise multi-party FHE and ends with advanced FHE gadgets that further enhance multi-party FHE.", - "track": "Applied Cryptography", + "id": "scaling-autonomous-worlds-building-the-foundations-and-sewers-for-millions-of-inhabitants", + "sourceId": "QPAXL7", + "title": "Scaling autonomous worlds - building the foundations… and sewers for millions of inhabitants", + "description": "One tends to think of Ethereum scaling in financial terms—how many transactions per second? What’s the TVL? How much liquidity?\r\n\r\nBut in a possible future where Ethereum applications extend beyond finance, into areas like autonomous worlds, games, and social, what does scaling look like and what challenges await?\r\n\r\nJoin us as we explore challenges, solutions, and open questions in this space—how do we bring latency down despite seconds-long block time? Could we shard an app across multiple chains?", + "track": "Layer 2", "type": "Talk", - "expertise": "Beginner", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "MPC", - "mp-fhe", - "MPC" + "Layer 2s", + "Cross-L2", + "Autonomous World", + "cross-chain", + "Autonomous World", + "Cross-L2", + "Layer 2s" ], "keywords": [ - "FHE", - "MP-FHE" + "Cross-chain" ], - "duration": 1248, + "duration": 1310, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "a23391ef57a705e96c256c35383615ce8ca6f48030865d8df316b6c794d67ba3", + "sources_youtubeId": "uJ2nhPDvjLw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735aeab9dbb7a90e1bde09b", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735aeab9dbb7a90e1bde09b.vtt", - "transcript_text": " Tanya Cushman Reviewer:\"Presenter\": Hello, welcome back. So, today I will be talking about Phantom Zone. But before I dive deep into Phantom Zone and talk about the rest of the things, I would walk you through the motivation behind Phantom Zone. So, if you guys are familiar with globally mutually trusted third party, I would want to introduce you to this idea of globally mutually trusted third party. What is this globally mutually trusted third party? Well, it provides you three guarantees. The first thing that it says is that whatever information you send to this third party, it will always keep that private. It would not leak it to anyone. The second guarantee that it provides is that all the information that it has collected over time from different people, you know, we have been sending this globally mutually trusted third party all our information let's say for a year. All the information that it has collected for all these years, it will always keep it private and would not allow anyone to poke inside its memory. And the third guarantee that it provides, which makes this particular party very magical, is that it can compute any arbitrary function we want it to compute, as long as we provide enough authorization to be able to compute that function. And it will only output the necessary outputs. Usually, I sort of refer to this mutually trusted third party as a mutually shared computer. And if you guys are familiar with something called the God Protocol, this is the God Protocol. This is a picture from an example back in 1987. So first observation to make is that if you really want these three guarantees to be, if you really want this party to be globally mutually trusted, we want this party to be able to prove these three guarantees to any individual without requiring any additional interaction, which is why we require cryptography. to prove these three guarantees to any individual without requiring any additional interaction which is why we require cryptography we cannot just make it on a certain legal arguments or something like that we require cryptography for building this the building this globally mutually trusted shared computer and we started to build phantom zone to eventually build the god protocol but to stick within the realms of practicality, we could only build an abridged version of it. So for the rest of the talk, I will be talking about, A, what is Phantom Zone? Why it is an abridged version of this God Protocol? And the second important thing that I'll be talking about is, how can we push the frontiers to eventually build the God Protocol? Okay, so Phantom Zone, the abridged version. The key idea in Phantom Zone is something called multi-party fully homomorphic encryption. And for me to describe you multi-party fully homomorphic encryption, I have to eventually describe you what is single-party encryption. In single-party encryption, you have a single client, this guy over here. They hold a secret. They keep the secret private to them. They can encrypt their information, which is A here, with their secret and produce an FHC ciphertext. And then they can send this FHC ciphertext to any server. And the can evaluate and any arbitrary function on their private input which is a and produce an output ciphertext and that the and the client can receive the output ciphertext and decrypt it. So this is single-party FIG coming to multi-party FIG. Well the key idea in multi-party FIG is that you split this secret which is held private by a single client in single-party FIG. Well the key idea in multi-party FIG is that you split this secret which is held private by a single client in single party FIG among many clients. So you have S0, S1, S2 as secret shots split among these three people over here. The first step in multi-party FIG is something called collective public key generation. So all these three parties come together and they generate the collective public key. And then all these three parties, using the collective public key, encrypt their private inputs and produce FHC ciphertext. And then they send their FHC ciphertext to the server. Server executes a marble refunction on the FHC ciphertext and produce an FHC output. The key thing to notice here is that all these parties would have to produce a decryption share to eventually decrypt the output ciphertext here. So they produce the decryption share using the secret shards and then they send it to each other and then only they're able to decrypt the output ciphertext. Because in this case, the secret was split among all these parties. So why is Phantom Zone an abridged version? Well, because Phantom Zone, assuming that in the future we're able to add publicly-verifiable FHE to a Phantom Zone can only guarantee the three guarantees that I talk about in the God Protocol to only the holders of the secret shots. It cannot guarantee these three guarantees to everyone around the globe. Which is why Phantom Zone is just an abridged version of it. Okay. So you might wonder, how do we build towards the God protocol? How do we even do it? Well, what I would like to say at the moment is I would have loved to say that after a lot of research and a lot of five years of research, we have figured out the solution to build the God protocol. But no, there are no enlightening thoughts here. And there's one obvious answer to eventually building the God protocol, which is program of sophistication. What's program of sophistication? Well, to simply describe the program of sophistication, let's just assume that you have function f, right? What you can do with program obfuscation is you take this function f and perform some transformations on this function f and produce an obfuscated circuit. You can give this obfuscated circuit to someone else and program obfuscation guarantees that the only thing that you can learn from that obfuscated circuit is the input to output map and nothing else. Now you might be wondering why is this useful? Because if the function is trivial, then you can easily learn it from the input to output map. Program obfuscation becomes very interesting when you sort of like obfuscate a program that is a cryptographic function. For example, let's just say that I take a function that decrypts any ciphertext that is encrypted to my public key. So I take a function and this function has my secret key and it decrypts any ciphertext that was encrypted to me using my public key. And I perform certain transformations using program obfuscation to this function and produce an obfuscated circuit. I give this obfuscated circuit to someone else. What they can do is that they can decrypt any ciphertext that was encrypted to me using this obfuscated circuit. But they can never, ever learn what the secret is inside that circuit. They can never learn my secret key. And these are the class of functions where program obfuscation becomes useful. And I'll tie it to building the God protocol later in the slides. So now, assume that we can only build program obfuscation for some limited class of functions, not for general class of functions, but limited class of functions. I'll tell you one way of building the got protocol using program ob application. Step one, modify the FHT scheme that we're using before to become publicly verifiable. What do I mean by that? Well a publicly verifiable FHT schemes does those things. It evaluates the FHT function which you know a a normal FHE scheme does. In addition to evaluating the function, it also produces a proof of correct evaluation so that anyone can verify this proof with the output ciphertext and be assured that the server that sort of executed this FHE function executed it correctly, and which I usually refer to as proof pi of correct evaluation. Step two, replace the collective key generation operation that we did in the multi-party FHE with a trusted setup. In the trusted setup, you have arbitrary number of people here. They perform some MPC protocol to produce FHE keys. The two types of FHE keys which are very important. Public key and the bootstrapping key. Bootstrapping key is usually used for some sort of FHE operations that you can completely black box. The key thing here is that no one knows the ideal secret key because we're doing a trusted setup in MPC to generate these two keys. The third step is modify the trusted setup to also output an obfuscated conditional decryption oracle. Okay, that's a mouthful. I sort of like go into it one level deeper. What is an obfuscated conditional decryptor? This particular conditional decryptor is an obfuscated program of the following functionality. What it does is that takes an output ciphertext and a proof of correct evaluation of FIG circuit. It verifies whether the proof is valid and decrypts the output ciphertext if and only if the proof is valid. And this sort of like tells you why did we assume in the first place that program obfuscation may be feasible only for like limited class of functions because to build the GOT protocol like to build the got protocol using the FHERoute, we only need program obfuscation to be practical for this obfuscated conditional decryptor. So we modify the tracer setup to also output this obfuscated conditional decryptor, and that's it. And another thing to note is that this conditional decryptor also has the secret key, the ideal secret key that no one knows embedded inside it. Okay. So the end-to-end flow is, you do MPC to generate three things. Public key, bootstrapping key, and the offscored conditional decryptor, which I now realize is somewhat of a mouthful. I should have chosen some other term. Anyways, the second flow is, now anyone can encrypt their private inputs using the public key that is the output of the MPC protocol. So you have multiple ciphertexts here. And then they can send it to the FHC server. FHC server evaluates the FHC function, outputs the encrypted output. In addition, it produces a proof because the FIG server is evaluating a publicly verifiable FIG scheme. And then we plug in the proof as well as the output to the off-scaled conditional decryptor and the conditional decryptor would only decrypt the encrypted output if and only if the proof is valid. So this is one way of building the God protocol using publicly verifiable FHE and program obfuscation for obfuscated conditional decryptor. So there's one way, which I've just shown you, but we need new ideas to push the frontiers and to finally build the program obfuscation or and to finally build program obfuscation or indistinguishably obfuscation, if you're familiar with that. Here, I've showed you just one way. But if you are able to come up with new ideas, then probably we can make program obfuscation more practical for general circuits, not just for limited class of functions that we used before. And probably, we can directly build the God protocol from program obfuscation. So while I was exploring this field of program obfuscation and I.O., one key observation that I made was that it's really hard to get efficient program of specification from standard assumptions and we would inevitably require exotic assumptions. And I'll tell you what are standard assumptions and what are exotic assumptions. Well a standard assumption is an assumption that has been there for a while, for example D log, discrete log problem. There also exists additional incentive for people to break these standard assumptions. And exotic assumptions are somewhat newer assumptions. Like, they have been only there for like five years, or not even five, it was like two to three years. What we can do as a community to, you know, realizing that we might inevitably need newer assumptions to build practical program amplification is we can start examining these newer assumptions, start breaking them, start testing them. Or we can build applications using this assumption so that we can incentivize people to break them and tell us whether they're broken or not. And then eventually, in a few years, we would have candidate assumptions that are newer assumptions, but they have become then standard using which we can build practical program sophistication. And taking a first step towards this, we are launching a bounty program to break one of the candidate assumptions, which is called program obfuscation by local mixing. The way I think about this particular assumption is that they're taking more computational complexity approach than taking the traditional approach of using algebraic structures to build program obfuscation. The goal of the bounty is that we provided an obfuscated circuit with roughly 240,000 gates, which was obfuscated from an original circuit with roughly 1,000 gates. And you had to find the original circuit. You can learn more about the bounty at OfficeTopia.io. If you know what OfficeTopia is, OfficeTopia means that we're living in a world where authentication is practical, and the bounty amount is 10K. And this bounty is launched in collaboration with Ethereum Foundation and Zerix Spark. Okay. So before I break, and I think that I have a bunch of time, okay, before I break, and I think that I have a bunch of time. Okay, before I break, I would want to make one conjecture. And the conjecture goes as follows. I think the God protocol is the convergence of cryptography. Probably building the God protocol would require certain sort of like FHE. That is just one route, but like publicly viable FHE and other things like MPC for just setup and so on and so forth. But once you build the got protocol, I think it encompasses everything. It gives us everything that we have been wanting for for a while. It gives us witness encryption. It gives us zero knowledge proofs via signatures. It gives us MPC, multi-party computation. It gives us FE, functional encryption, all of these things that we've been demanding for a while. And this is also one of the major reasons that we should start investigating much more seriously how to get practical program application and finally build the God protocol. And that's it. Thank you. All right, thank you for All right. Thank you for the talk. We do have some questions rolling in. Yeah, let's go through some of the questions. Let's start with the first one. Can we implement threshold ECDSA with Phantom Zone? At the moment, yes, because you can express everything. Like, theoretically, yes, but it would be very impractical to implement ECDSA with PhantomZone at the moment because ECDSA is like you're doing elliptical operations, which is a lot of operations. As far as I understand, threshold ECDSA is possible. It takes two days to generate one single signature. All right, so next question. Can you tell us a little bit more about the definition of obfuscation as a virtual black box? That's the first question over here. Isn't the definition of obfuscation as a virtual black box impossible? I am not posing obfuscation as a virtual black box. I did not mean to say obfuscation is a virtual black box. By the way, the impossible result of a virtual black box is only for certain very restricted class of programs. It's not for general class of programs. Eventually you can aim for virtual black box with certain caveats. But again saying that my definition of sophistication is not virtual black box. All right and what can be done today with Phantom Zone? At the moment as I said Phantom Zone is an abridged version of the Scott protocol. It does not even have publicly verified FHE scheme, so it does not give you all the three guarantees. The only guarantee that it gives you is that it will execute the function that you ask it to execute while private information can be coming from multiple people. It'll keep the information private, but you'll have to trust it for it. So you'll have to trust this particular server to always keep the information private, but you'll have to trust it for it. So you'll have to trust this particular server to always keep the information private and not send it to anyone else. Perfect. And we do have one last question. Oh, cool. More questions rolling in. Can obfuscating programs undermine open source transparency and make it harder to verify the absence of malicious code? I see. Make it harder to verify absence of malicious code? I see. Make it harder to verify absence of malicious code. Well, that is assuming that the entire program is obfuscated. When I say obfuscation, we require obfuscation for certain parts of the program, which can interact with a public program and a private program which is obfuscated. I understand that obfuscation can be used for many malicious purposes as well, like for example, you know, like, there are several reasons why people might be interested in obfuscation, but we can, as a community, make sure that there's interaction between the public interfaces and the private interfaces which are obfuscated. All right. And why do you call the publicly verifiable FHE circuit obfuscated? Doesn't the require solidity verifier or something which is public? No, I think once I give you obfuscated circuit, there are certain guarantees that you can learn from the obfuscated circuit itself, that it does not reveal anything, as long as you've done the obfuscation correctly. Alright, and do you have evidence that the conditional decryption functionality is possible using I.O.? Yes. There are theoretical results and we're trying to make it practical as well. All right. Can you give one example each on how I.O. can replace ZK, MPC, FHE? Okay. So for ZK, what you can do is like you can embed a secret key inside this off-secreted circuit, the God protocol, and a zero-knowledge proof is just a signature from this God protocol. Whatever secret exists inside this particular server or this God protocol, or this FHC circuit, off-secreted circuit, a signature by that thing becomes a zero-knowledge proof. So you do not require zero- zero knowledge on the client side anymore. For MPC, again, it's a globally mutually trusted third party. All of us encrypt our private inputs with the public key corresponding to the secret key that lives inside this off-site circuit. And we send our private inputs to this. It decrypts that, performs some function, and produces the output. So that's one way of replacing MPC, and the same applies for FG. Cool. We can stay here for maybe another 10 seconds if there are any new questions rolling in. All right, cool.", + "sources_streamethId": "67346ed49dbb7a90e1e1b1a2", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67346ed49dbb7a90e1e1b1a2.vtt", + "transcript_text": " All right. My name is Tidat. I'm a software engineer at Lattice. And yeah, I'm about to talk about bringing you on our journey, basically building the best infrastructure for autonomous worlds and mod apps. basically building the best infrastructure for autonomous worlds and Mudd apps. Okay, so I will start by talking a little bit, bringing a little bit of context for those who don't know what Mudd is and what are autonomous worlds. I will then kind of describe some design patterns that we learned about while we were essentially solving problems for Mudd apps.", "eventId": "devcon-7", - "slot_start": 1731567600000, - "slot_end": 1731569400000, + "slot_start": 1731484800000, + "slot_end": 1731486600000, "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1V86Kc6aOcbAUsOm8NBUDaQ00YrCn0XJN5ce8Lyt73WU", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/11DTfplHre4QguicqcET5ubMdfycNHdyjo8Imn5A0lWc", + "resources_slides": "https://drive.google.com/file/d/1Ku6W_lj1fSlmK07byI0Goh-r68o-uoLN/view", "speakers": [ - "janmajaya-mall" + "tdot" ] }, "vector": [ @@ -651595,9 +649740,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -652008,9 +650150,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -652170,6 +650309,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -652407,6 +650547,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -652428,7 +650569,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -652451,6 +650591,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -652540,6 +650681,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -652900,12 +651042,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 0, - 2, - 0, 2, 0, 0, @@ -652924,56 +651064,46 @@ }, { "session": { - "id": "scaling-autonomous-worlds-building-the-foundations-and-sewers-for-millions-of-inhabitants", - "sourceId": "QPAXL7", - "title": "Scaling autonomous worlds - building the foundations… and sewers for millions of inhabitants", - "description": "One tends to think of Ethereum scaling in financial terms—how many transactions per second? What’s the TVL? How much liquidity?\r\n\r\nBut in a possible future where Ethereum applications extend beyond finance, into areas like autonomous worlds, games, and social, what does scaling look like and what challenges await?\r\n\r\nJoin us as we explore challenges, solutions, and open questions in this space—how do we bring latency down despite seconds-long block time? Could we shard an app across multiple chains?", - "track": "Layer 2", - "type": "Talk", - "expertise": "Intermediate", + "id": "scaling-clean-air-now-and-the-future", + "sourceId": "RKA9MF", + "title": "Scaling Clean Air: Now and the Future", + "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Layer 2s", - "Cross-L2", - "Autonomous World", - "cross-chain", - "Autonomous World", - "Cross-L2", - "Layer 2s" - ], - "keywords": [ - "Cross-chain" - ], - "duration": 1310, + "tags": [], + "keywords": [], + "duration": 758, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "12064d7d2a4ccbfe9f6ef7d3ab1af74c6aa25dd4a215d0812f624bf47f56891f", + "sources_youtubeId": "q2YUVMRPQKw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67346ed49dbb7a90e1e1b1a2", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67346ed49dbb7a90e1e1b1a2.vtt", - "transcript_text": " All right. My name is Tidat. I'm a software engineer at Lattice. And yeah, I'm about to talk about bringing you on our journey, basically building the best infrastructure for autonomous worlds and mod apps. basically building the best infrastructure for autonomous worlds and Mudd apps. Okay, so I will start by talking a little bit, bringing a little bit of context for those who don't know what Mudd is and what are autonomous worlds. I will then kind of describe some design patterns that we learned about while we were essentially solving problems for Mudd apps.", + "sources_streamethId": "6735b4a09dbb7a90e12f4f30", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735b4a09dbb7a90e12f4f30.vtt", + "transcript_text": " I have some money to do stuff around air quality, indoor air quality, especially I'm an engineer by trade, so I write software. I wrote the software on the toy that you saw in the booth out there. And I got tagged in to give this presentation, so you're going to have to bear with me on that front. So here's a question. And we're going to do price is right rules. So closest without going over. You eat about 1.8 kilograms of food a day. You drink about 2 kilograms of water a day does anybody know how much air you consume a day hands somebody give me a hand yes 11 000 liters i want it by mass i heard 10 kilograms. Who was that? Yeah, all right. Toss them one. Yeah, let's say 12 kilograms a day. That's a lot of air, and there's stuff in that air. Stuff in the air. Bad air is a bad time. So this is from a study about decision-making versus carbon dioxide content. So if you ever see, you know, you know about hypoxia's effect on the brain, long before you ever get to anything like passing out, you start making really bad decisions. So if you wouldn't sign on a house drunk, you probably shouldn't sign on a house above a couple thousand ppm of CO2 or you will make terrible choices. The inverse effect is also true. Bad air is a bad time also because it leads to airborne infectious disease. We all know about COVID, but there's plenty of other stuff, tuberculosis, chickenpox, measles, all that stuff. Some of it, there's great vaccines against some of it. Nobody really knows. And you really don't want to catch it even if you have a not particularly symptomatic case. There's a really interesting talk about that you can stick around for. Also, the economics of it are incredible. Trying to do clean air-based interventions is an excellent way to reduce things like sorry, heart attacks. Children's test scores, air purifiers in classrooms are one of the most economically effective way to boost test scores compared to anything like teacher training, other interventions. You can find direct links between indoor air quality in education spaces, absenteeism, and eventual health and economic outcomes for the people in those spaces. So at Entropic Engineering and Open Aeros, we think this is pretty cheap and easy to fix. You can have some really high-impact effects. Masks, especially with fit testing. This is another place where we received some funding from Balvi to work on open-source condensation particle counters that can make it really easy to do this kind of work. Portable air cleaners. You can see one in this picture of our office. Windows. These ones don't open, but if you're in a place where it's thermodynamically acceptable, you can open your windows. That's an excellent way to do some mitigation of this kind of stuff. Simple HVAC upgrades like dropping in better filters, making sure you actually change your filters. And we also do some work with skin and eye safe 222 nanometer UV light like this one here. There's also one on the display table outside on the front. Unlike earlier 254 nanometer technologies, this is stuff that's relatively easy to deploy because the threshold for skin and eye damage is much, much higher. It's something that you can retrofit into existing spaces a lot more easily. It's an excellent drop-in mitigation. Here's the real question. If this stuff is so cheap and easy, you know, we've got filters in this room, we've got UV lights outside, why isn't it already fixed? Well, we think one of the big reasons that it's not already fixed is it's invisible. Literally. When we're talking about particulate in the air, we're talking about particles that are so small that measuring them requires innovative technologies to make them grow physically large enough in the device that we work with via condensation to make them countable because they're so small they don't meaningfully reflect enough light to count them even under laboratory conditions, let alone with your eyes. Like, think about when you see smog outside. Well, that is the aggregate effect of looking through a bunch of particles in the air. When you're inside, you just can't see it. And you can't tell what kind of particles they are, how big they are, and you also can't tell what CO2 concentrations are like. So you can't tell the extent to which your judgment is impaired and you're going to expect poor health outcomes like that. So what I'm here to talk about is U-crit air, also known as the clean air Tamagotchi, is something we've been working on for the last five or six weeks. This is a portable air quality device that includes a virtual pet. So the idea is in low power mode, there's an e-ink screen on top, which will wake and update periodically. You've seen them on the Kindle, so that supports really low power operation. We hope to get at least a week or two out of it. The hardware we have outside is pre-production hardware. But we are eventually hoping to sell these things. So take that with a grain of salt when you play with them. But they are fun to play with because on the bottom screen, when you wake it up into higher power mode, you have a virtual pet character. And the whole pitch of this is that when you think about air quality, especially indoor air quality, it's something that's really easy to ignore. Like you can sit there at work and think to yourself, I have kind of a headache whenever I sit in this conference room, but I got some dials to pay attention to. And that builds up every time you're exposed to higher CO2 levels, every time you're exposed to high particulate concentration. The chronic health effects of all of these are cumulative, which means that you can ignore it and ignore it and ignore it until 30 years down the road. It turns out that you've had sequelae from multiple low intensity viral infections, or you have developed chronic lung problems from exposure to particulate matter. So the concept that we have here is a sort of gamification approach where you and the virtual pet are exposed to the same air by virtue of it being with you. It has onboard sensors that can measure carbon dioxide concentration and particulate concentration and a couple other fun things. Except the virtual pet reacts in real time much faster than you do. So what we're doing is sort of hacking your judgment to say, well, you know, you know intellectually that there's going to be long-term negative effects to bad air quality that you're exposed to. But if we make those long-term negative effects happen on a very short term to this little guy, especially if he's cute, it's sort of a way to hack that decision making process. So you can play with him, you can arrange furniture, you can buy for him various clean air interventions like those that we have here, you can buy him a HEPA filter, you can buy him UV lights, you can have him wear a mask, all of that fun stuff that we think are sort of straightforward, relatively easy and effective interventions. You can also apply to your character. And the idea is let's take this thing that on the human time scale and on the human health time scale is essentially invisible, make it very visible, especially in a relatively fun way, make it make a lot more sense to pay attention to that. So that's the concept. I'm not going to try and do it during the talk because I was very politely informed that they're going to drag me off stage with a shepherd's crook if I go over. You can catch me after the end of this track out front and we'll probably do a little bit of trivia and give away the units that we have that are pre-production. You can find more information about them at ucritter.com slash error or there's that information outside? Yeah. So those are all pre-production. You can tell. 3D printed case. We assembled them in-house. So they're a little bit of a precious commodity but we're going to give them away. Fun little bit of a crash project. Blew the in-seat power circuit breaker on the plane which apparently they can restore. Got to do some laptop power management limiting to get back on the road building these things. But some other cool tricks that they have up their sleeve compared to other commercial air quality sensors is they do barometric compensation for CO2. So if you live anywhere other than sort of median height in terms of elevation, or if you're on a plane or you're climbing a mountain, your air quality sensor that you already have is almost certainly lying to you if it's not barometrically compensated. So this is a fun little trick that we do. This is me flying from the last leg from Korea to here, where you can see that without barometric compensation, you're underestimating carbon dioxide concentration in the cabin by a couple hundred ppm so we think that's a fun little bit of special sauce that went into this project they also have a barometric pressure sensor on board so if you want to climb a mountain you can track your altitude as you go and the other trick that we think is really cool and should become something that people expect in mitigated spaces is on the topic of uh these airborne you know these air quality problems being invisible if you if you buy something like these hepa filters how do you even tell if they work well one way you can do it is you can keep track of the carbon dioxide or particulate concentration in the room for hepa, you're looking at particulate, but for ventilation to outside air, like you're going to open the window, you want to look at carbon dioxide. And you can do something. You can affect an intervention like open a window, or you can affect an intervention like turn on a HEPA filter, and you can watch the rate at which these airborne contaminants drop. And with a little bit of math that is baked into the Eucritters, you can calculate, you know, what the CO2 concentration change or what the particulate concentration change tells you about the mitigation choices you've made in your space, usually measured in air changes per hour, which is sort of one air change per hour means volumetrically all the air in the room would be cycled through once an hour. That's great for measuring CO2. So if you're going to affect some mitigation like turn on your HVAC, you can watch the change in CO2. You can see a drop. And if you have a U-critter with you, it's logging. So you can compute the effectiveness of that intervention. And if you're going to switch on HEPA filters, you can take your U-critter and look at the recorded particulate concentration information and compute what's called an effective air changes per hour number. So I think that's the last cool trick for the U-critter right now. Oh, it also works as a Bluetooth sensor. You can talk to it over Bluetooth and pull out current air quality readings. But it has Wi-Fi hardware, and we're excited in the future. We're hoping to ship some updates that will let us do privacy-preserving aggregation of air quality statistics, sort of like existing air quality sensor networks, but that extend to places that might not otherwise be reached, like the places you go in your daily commute that fixed sensors aren't appropriate for. This is open source work. We're still in pre-production, so you can't ‑‑ I don't think the schematics are up there yet, but if you pester them in the Discord, maybe you'll get them to post them. So if you have feedback, we'd love to hear it. The website is out there. It's also in the firmware if you get one. Please reach out if you'd like to collaborate. I'd also like to say thanks to Balvi, thanks to the Ethereum world. I'm not a crypto person personally, but the work that we've been up to at Entropic working on the Clean Air Tamagotchi but also at OpenAeros and the other efforts wouldn't have been possible without you guys. So it's something we really appreciate. I think I'm going to wrap it there. Thank you so much, Lewis.", "eventId": "devcon-7", - "slot_start": 1731484800000, - "slot_end": 1731486600000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/11DTfplHre4QguicqcET5ubMdfycNHdyjo8Imn5A0lWc", - "resources_slides": null, + "slot_start": 1731570600000, + "slot_end": 1731571500000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1ZJZJ_2zvDgnrKFG8JEZo8VMp_Z1mb0btmMRtR2j0Vv0", + "resources_slides": "", "speakers": [ - "tdot" + "louis-goessling" ] }, "vector": [ 0, + 6, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -653783,7 +651913,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -653827,7 +651956,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -653917,7 +652045,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -654188,7 +652315,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -654283,7 +652409,6 @@ 2, 0, 0, - 0, 2, 0, 0, @@ -654302,42 +652427,53 @@ }, { "session": { - "id": "scaling-clean-air-now-and-the-future", - "sourceId": "RKA9MF", - "title": "Scaling Clean Air: Now and the Future", - "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "id": "scaling-community-lessons-from-building-base", + "sourceId": "P73W8S", + "title": "Scaling Community: Lessons from Building Base", + "description": "Drawing from experiences as a Base core contributor and Base community lead, this talk is about building scalable Ethereum communities. Learn strategies for engagement, growth, best practices, and key insights.", + "track": "Developer Experience", "type": "Lightning Talk", - "expertise": "", - "audience": "Engineering", + "expertise": "Intermediate", + "audience": "Community", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 758, + "tags": [ + "Security", + "community", + "Layer 1", + "Layer 2s", + "Values" + ], + "keywords": [ + "Community", + "Discord", + "Farcaster", + "Building Community", + "Community Management", + "Community Security" + ], + "duration": 574, "language": "en", - "sources_swarmHash": "12064d7d2a4ccbfe9f6ef7d3ab1af74c6aa25dd4a215d0812f624bf47f56891f", - "sources_youtubeId": "q2YUVMRPQKw", + "sources_swarmHash": "4e8c46194a8800b5909aeb01cc6c0324728e82519f7c0ad031cb1149411d564e", + "sources_youtubeId": "7T9YaSIAk2s", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735b4a09dbb7a90e12f4f30", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735b4a09dbb7a90e12f4f30.vtt", - "transcript_text": " I have some money to do stuff around air quality, indoor air quality, especially I'm an engineer by trade, so I write software. I wrote the software on the toy that you saw in the booth out there. And I got tagged in to give this presentation, so you're going to have to bear with me on that front. So here's a question. And we're going to do price is right rules. So closest without going over. You eat about 1.8 kilograms of food a day. You drink about 2 kilograms of water a day does anybody know how much air you consume a day hands somebody give me a hand yes 11 000 liters i want it by mass i heard 10 kilograms. Who was that? Yeah, all right. Toss them one. Yeah, let's say 12 kilograms a day. That's a lot of air, and there's stuff in that air. Stuff in the air. Bad air is a bad time. So this is from a study about decision-making versus carbon dioxide content. So if you ever see, you know, you know about hypoxia's effect on the brain, long before you ever get to anything like passing out, you start making really bad decisions. So if you wouldn't sign on a house drunk, you probably shouldn't sign on a house above a couple thousand ppm of CO2 or you will make terrible choices. The inverse effect is also true. Bad air is a bad time also because it leads to airborne infectious disease. We all know about COVID, but there's plenty of other stuff, tuberculosis, chickenpox, measles, all that stuff. Some of it, there's great vaccines against some of it. Nobody really knows. And you really don't want to catch it even if you have a not particularly symptomatic case. There's a really interesting talk about that you can stick around for. Also, the economics of it are incredible. Trying to do clean air-based interventions is an excellent way to reduce things like sorry, heart attacks. Children's test scores, air purifiers in classrooms are one of the most economically effective way to boost test scores compared to anything like teacher training, other interventions. You can find direct links between indoor air quality in education spaces, absenteeism, and eventual health and economic outcomes for the people in those spaces. So at Entropic Engineering and Open Aeros, we think this is pretty cheap and easy to fix. You can have some really high-impact effects. Masks, especially with fit testing. This is another place where we received some funding from Balvi to work on open-source condensation particle counters that can make it really easy to do this kind of work. Portable air cleaners. You can see one in this picture of our office. Windows. These ones don't open, but if you're in a place where it's thermodynamically acceptable, you can open your windows. That's an excellent way to do some mitigation of this kind of stuff. Simple HVAC upgrades like dropping in better filters, making sure you actually change your filters. And we also do some work with skin and eye safe 222 nanometer UV light like this one here. There's also one on the display table outside on the front. Unlike earlier 254 nanometer technologies, this is stuff that's relatively easy to deploy because the threshold for skin and eye damage is much, much higher. It's something that you can retrofit into existing spaces a lot more easily. It's an excellent drop-in mitigation. Here's the real question. If this stuff is so cheap and easy, you know, we've got filters in this room, we've got UV lights outside, why isn't it already fixed? Well, we think one of the big reasons that it's not already fixed is it's invisible. Literally. When we're talking about particulate in the air, we're talking about particles that are so small that measuring them requires innovative technologies to make them grow physically large enough in the device that we work with via condensation to make them countable because they're so small they don't meaningfully reflect enough light to count them even under laboratory conditions, let alone with your eyes. Like, think about when you see smog outside. Well, that is the aggregate effect of looking through a bunch of particles in the air. When you're inside, you just can't see it. And you can't tell what kind of particles they are, how big they are, and you also can't tell what CO2 concentrations are like. So you can't tell the extent to which your judgment is impaired and you're going to expect poor health outcomes like that. So what I'm here to talk about is U-crit air, also known as the clean air Tamagotchi, is something we've been working on for the last five or six weeks. This is a portable air quality device that includes a virtual pet. So the idea is in low power mode, there's an e-ink screen on top, which will wake and update periodically. You've seen them on the Kindle, so that supports really low power operation. We hope to get at least a week or two out of it. The hardware we have outside is pre-production hardware. But we are eventually hoping to sell these things. So take that with a grain of salt when you play with them. But they are fun to play with because on the bottom screen, when you wake it up into higher power mode, you have a virtual pet character. And the whole pitch of this is that when you think about air quality, especially indoor air quality, it's something that's really easy to ignore. Like you can sit there at work and think to yourself, I have kind of a headache whenever I sit in this conference room, but I got some dials to pay attention to. And that builds up every time you're exposed to higher CO2 levels, every time you're exposed to high particulate concentration. The chronic health effects of all of these are cumulative, which means that you can ignore it and ignore it and ignore it until 30 years down the road. It turns out that you've had sequelae from multiple low intensity viral infections, or you have developed chronic lung problems from exposure to particulate matter. So the concept that we have here is a sort of gamification approach where you and the virtual pet are exposed to the same air by virtue of it being with you. It has onboard sensors that can measure carbon dioxide concentration and particulate concentration and a couple other fun things. Except the virtual pet reacts in real time much faster than you do. So what we're doing is sort of hacking your judgment to say, well, you know, you know intellectually that there's going to be long-term negative effects to bad air quality that you're exposed to. But if we make those long-term negative effects happen on a very short term to this little guy, especially if he's cute, it's sort of a way to hack that decision making process. So you can play with him, you can arrange furniture, you can buy for him various clean air interventions like those that we have here, you can buy him a HEPA filter, you can buy him UV lights, you can have him wear a mask, all of that fun stuff that we think are sort of straightforward, relatively easy and effective interventions. You can also apply to your character. And the idea is let's take this thing that on the human time scale and on the human health time scale is essentially invisible, make it very visible, especially in a relatively fun way, make it make a lot more sense to pay attention to that. So that's the concept. I'm not going to try and do it during the talk because I was very politely informed that they're going to drag me off stage with a shepherd's crook if I go over. You can catch me after the end of this track out front and we'll probably do a little bit of trivia and give away the units that we have that are pre-production. You can find more information about them at ucritter.com slash error or there's that information outside? Yeah. So those are all pre-production. You can tell. 3D printed case. We assembled them in-house. So they're a little bit of a precious commodity but we're going to give them away. Fun little bit of a crash project. Blew the in-seat power circuit breaker on the plane which apparently they can restore. Got to do some laptop power management limiting to get back on the road building these things. But some other cool tricks that they have up their sleeve compared to other commercial air quality sensors is they do barometric compensation for CO2. So if you live anywhere other than sort of median height in terms of elevation, or if you're on a plane or you're climbing a mountain, your air quality sensor that you already have is almost certainly lying to you if it's not barometrically compensated. So this is a fun little trick that we do. This is me flying from the last leg from Korea to here, where you can see that without barometric compensation, you're underestimating carbon dioxide concentration in the cabin by a couple hundred ppm so we think that's a fun little bit of special sauce that went into this project they also have a barometric pressure sensor on board so if you want to climb a mountain you can track your altitude as you go and the other trick that we think is really cool and should become something that people expect in mitigated spaces is on the topic of uh these airborne you know these air quality problems being invisible if you if you buy something like these hepa filters how do you even tell if they work well one way you can do it is you can keep track of the carbon dioxide or particulate concentration in the room for hepa, you're looking at particulate, but for ventilation to outside air, like you're going to open the window, you want to look at carbon dioxide. And you can do something. You can affect an intervention like open a window, or you can affect an intervention like turn on a HEPA filter, and you can watch the rate at which these airborne contaminants drop. And with a little bit of math that is baked into the Eucritters, you can calculate, you know, what the CO2 concentration change or what the particulate concentration change tells you about the mitigation choices you've made in your space, usually measured in air changes per hour, which is sort of one air change per hour means volumetrically all the air in the room would be cycled through once an hour. That's great for measuring CO2. So if you're going to affect some mitigation like turn on your HVAC, you can watch the change in CO2. You can see a drop. And if you have a U-critter with you, it's logging. So you can compute the effectiveness of that intervention. And if you're going to switch on HEPA filters, you can take your U-critter and look at the recorded particulate concentration information and compute what's called an effective air changes per hour number. So I think that's the last cool trick for the U-critter right now. Oh, it also works as a Bluetooth sensor. You can talk to it over Bluetooth and pull out current air quality readings. But it has Wi-Fi hardware, and we're excited in the future. We're hoping to ship some updates that will let us do privacy-preserving aggregation of air quality statistics, sort of like existing air quality sensor networks, but that extend to places that might not otherwise be reached, like the places you go in your daily commute that fixed sensors aren't appropriate for. This is open source work. We're still in pre-production, so you can't ‑‑ I don't think the schematics are up there yet, but if you pester them in the Discord, maybe you'll get them to post them. So if you have feedback, we'd love to hear it. The website is out there. It's also in the firmware if you get one. Please reach out if you'd like to collaborate. I'd also like to say thanks to Balvi, thanks to the Ethereum world. I'm not a crypto person personally, but the work that we've been up to at Entropic working on the Clean Air Tamagotchi but also at OpenAeros and the other efforts wouldn't have been possible without you guys. So it's something we really appreciate. I think I'm going to wrap it there. Thank you so much, Lewis.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731570600000, - "slot_end": 1731571500000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1ZJZJ_2zvDgnrKFG8JEZo8VMp_Z1mb0btmMRtR2j0Vv0", - "resources_slides": null, + "slot_start": 1731401400000, + "slot_end": 1731402000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1Z6KNA8npIjlvXcTWwPrFhWHFQ9A2gd2wkiaNRb-bwuQ", + "resources_slides": "https://drive.google.com/file/d/1pHNzfZgE2qzPXp91mudAu0-VW7oMeqHJ/view", "speakers": [ - "louis-goessling" + "wbnns" ] }, "vector": [ 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -655082,6 +653218,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -655096,6 +653233,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -655146,6 +653284,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -655203,6 +653342,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -655254,6 +653394,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -655638,6 +653779,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -655646,12 +653788,6 @@ 0, 0, 0, - 0, - 0, - 0, - 2, - 0, - 0, 2, 0, 0, @@ -655660,66 +653796,62 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "scaling-community-lessons-from-building-base", - "sourceId": "P73W8S", - "title": "Scaling Community: Lessons from Building Base", - "description": "Drawing from experiences as a Base core contributor and Base community lead, this talk is about building scalable Ethereum communities. Learn strategies for engagement, growth, best practices, and key insights.", - "track": "Developer Experience", + "id": "scaling-crypto-theres-an-app-for-that-onboarding-millions-in-africa-with-minipay", + "sourceId": "EXCPST", + "title": "Scaling Crypto? There's an App for That. Onboarding Millions in Africa with MiniPay", + "description": "Post-EthCC, everyone’s talking about the industry’s influx of infra & lack of consumer apps. These conversations overlook the strides made in Africa with MiniPay, a self-custodial stablecoin wallet with 3M+ activated accounts since launching less than a year ago. In this panel, Rene, Yoseph & co-panelists will discuss building, scaling, & updating a truly user-friendly crypto wallet, introducing net new users to Web3 and dApps, & the power of ERC-20 stablecoins for payments in emerging markets.", + "track": "Real World Ethereum", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Community", + "expertise": "Beginner", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Security", - "community", - "Layer 1", - "Layer 2s", - "Values" + "Protocol Design", + "Scalability", + "UI/UX", + "Mobile", + "Protocol Design", + "Scalability", + "UI/UX" ], "keywords": [ - "Community", - "Discord", - "Farcaster", - "Building Community", - "Community Management", - "Community Security" + "payment", + "p2p finance", + "mobile" ], - "duration": 574, + "duration": 575, "language": "en", - "sources_swarmHash": "4e8c46194a8800b5909aeb01cc6c0324728e82519f7c0ad031cb1149411d564e", - "sources_youtubeId": "7T9YaSIAk2s", + "sources_swarmHash": "7851df1a8fb38bb024cefa2524794dd6ea145f15f791a2a89b592b81f5121777", + "sources_youtubeId": "cxrKuY7XQoc", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6735c0b19dbb7a90e1dd49c5", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735c0b19dbb7a90e1dd49c5.vtt", + "transcript_text": " All right. Hi, everyone. I'm Charles from Opera. I'm leading the product team for Minipay. And I'm here to tell you about how we're onboarding millions of users in Africa to crypto and to stable coins. Opera, as you may know, is a browser company. We have many apps, and we have over 100 million users on the African continent. Opera Mini is our most popular app. It's a browser that allows you to surf the web, but in a very efficient way that means that you don't have to buy as much data to browse the web, so people love it. And MiniPay is built right into Opera Mini and distributed through this channel. We've launched about a year ago to five African countries, and we've been getting tremendous traction. MiniPay is not only built inside Opera Mini, but it's also built on the best of Celo. And it's different in many ways to other crypto wallets. Minipay doesn't try to be a Swiss army knife that sees all your assets and all the chains and all the swaps. It's really focused around a payment use case using stable coins. And Celo is perfectly suited for that. It allows us to use phone numbers instead of wallet addresses. So similar as to mobile money systems already in market today. It's super fast and almost free. So a typical stablecoin transfer on Celo is 0.1 cent. So we don't need things like account abstraction to really get the user benefit of gas abstraction. It also has automatic key backup, so when users activate MiniPay, their keys are backed up, they don't lose them, which is a common thing in self-custody wallets. And it's a UI that really tries to speak to users with familiar terms and has this Web2 experience to it with the full power of Web3. And the last part, which very few wallets do, is really have a localized approach to on and off ramp by working with smaller partners to really make sure that it's possible to buy and sell stable coins for very low amounts using local payment methods. Zooming in on one of these differences, the phone number to wallet address system is actually built on Social Connect, which is a solo protocol that allows us to map this in a privacy-preserving way, from number to address, but not the other way. And it really improves discoverability. You can see which one of your friends is already on MiniPay, and it makes copy-pasting addresses a thing of the past. Another interesting feature of Opera MiniPay is that although we've added support for more stablecoins, dollar stablecoins, we wanted to make it easy for users to move in and out of them if needed by having this concept of swaps using pockets. So you can simply drag and swap from one pocket to the other, one to one. And we use the mental protocol for this, which allows us to have super low slippage and excellent fees. As this is a developer conference, I'll mention that we also have mini apps, which are web apps that are integrated within MiniPay. We have over 10 of them now. Also getting tremendous traction since launch and this is something that you can do and you can build using existing web and ethereum technologies and the cello team is there to assess also in making this a reality I'm happy to share that since launch, we've activated over 4 million wallets, mapped to phone numbers, and as of October, we've done over a million, during October, we've done over a million P2P transfers between those users. Thank you. You know, going past the tech and the numbers, right, we're starting now to tell a bit more of these stories that we're seeing in market using MiniPay. I'll go a bit quick now. I see time's running out. But this is a story of a freelancer getting paid in stables and being able to use them locally. This is another one in Kenya, being able to use USDT and pay local merchants for groceries interacting directly with the payment system locally M-Pesa so it requires no merchant integration. This is another one someone has lost their wallet in Kenya and was able to basically pay for everything during a week, thanks to MiniPay and this integration with the local payment system. MiniPay is now growing outside of Opera Mini, so I'm also glad to announce that we've made available now the Android app as a standalone app, so you can just go on the Google Play Store and try it out. It's enabled in over 60 countries, and we basically ramp up as we have good cash in and cash out in those countries. This is what it looks like, really trying to focus on the dollar use case and the payment use case. And we foresee that users in country in Nigeria or Kenya will invite people from outside Nigeria to the standalone app, and you can imagine cross-border payments being powered through this system. Also happy to reveal that the iOS version is coming very soon. So if you are keen to be one of the first to try MiniPay on iOS, scan this QR code, sign up the form, and we'll be sending invites to the test flight very shortly. And if you want to build on MiniPay, please reach out to us on x at MiniPay or at cellodevs. And the cellodevs are organizing bounties during EVE Global. So if you're joining the hackathon, make sure you check it out. Thank you. Thank you, Charles. That was incredible, all the attractions. Any questions from our audience? Any questions? Okay, well, a few. I see the gentleman in the red shirt first. So my understanding is that in Africa, there's not much of a savings culture because many of the currencies are losing their value every year. So there's more of a borrowing culture than a savings culture. With access to stable coins and mini-pay, are you seeing more of a savings culture develop? Are people using this for savings and accruing those savings over time? What sort of data are you seeing around that? So savings is certainly something that we hear could be useful in the context of mini-pay. This is not a use case we're really focusing on now. So for us right now, it's a marginal use case. It's really around money movement at this point. Another one over there. With using USD there, is that somehow in conflict with the local currencies? Is that something that you see as a risk? Like, for example, when integrating with tax payments, that should be happening, or just with local governments not being happy that their original money is not being used, or how is that in Africa? So, I mean, these economies already have a lot of demand for dollars. This is just a different channel to access them. And, you know, users can use Opera Mini and, sorry, MiniPay in a way that they can think in local currency, like they can denominate everything in local currency. So a lot of people are doing that to alleviate the kind of mental friction. One more over here. Good job. All right. What is a mini app? Is it like an iframe or? Yeah, it's an embedded web app. Yeah, it's not an iframe, but we just invoke your app. And each app is within a sandbox, so we only allow known blockchain interactions. Is the output dumping HTML? Yeah, this is existing web technology. This is existing Web3 API. It's not exotic anymore, but it's packaged in a way that users don't have to connect their wallets. It's integrated, and the signing process is very human-readable. Okay, cool. I see that there's a few people wanting to ask more questions, but the time is up for this session. Feel free to reach out to Charles for further discussion. Let's thank Charles for another day. Thank you, Charles.", "eventId": "devcon-7", - "slot_start": 1731401400000, - "slot_end": 1731402000000, + "slot_start": 1731574800000, + "slot_end": 1731575400000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Z6KNA8npIjlvXcTWwPrFhWHFQ9A2gd2wkiaNRb-bwuQ", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1lk319WDhop2qBsR_BdMLAl1tdzOwri17ao4IPguI7Ac", + "resources_slides": "https://drive.google.com/file/d/1IWeENduiUQrxullo7FQJbPPCzmc0EBA9/view", "speakers": [ - "wbnns" + "charles-hamel" ] }, "vector": [ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -656464,7 +654596,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -656479,13 +654610,13 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -656508,6 +654639,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -656520,6 +654652,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -656530,7 +654663,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -656588,7 +654720,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -656602,6 +654733,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -656640,9 +654772,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -657033,9 +655162,6 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, @@ -657044,52 +655170,46 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "scaling-crypto-theres-an-app-for-that-onboarding-millions-in-africa-with-minipay", - "sourceId": "EXCPST", - "title": "Scaling Crypto? There's an App for That. Onboarding Millions in Africa with MiniPay", - "description": "Post-EthCC, everyone’s talking about the industry’s influx of infra & lack of consumer apps. These conversations overlook the strides made in Africa with MiniPay, a self-custodial stablecoin wallet with 3M+ activated accounts since launching less than a year ago. In this panel, Rene, Yoseph & co-panelists will discuss building, scaling, & updating a truly user-friendly crypto wallet, introducing net new users to Web3 and dApps, & the power of ERC-20 stablecoins for payments in emerging markets.", - "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "scaling-ethereum-with-das-an-iterative-approach", + "sourceId": "JFWPRG", + "title": "Scaling Ethereum with DAS: an iterative approach", + "description": "In this time between the launch of 4844 and the possible launch of a first version of PeerDAS, we explore and explain the iterative approach that has been employed in the rollout of blobs and DAS to Ethereum, and discuss the past and future steps.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Protocol Design", - "Scalability", - "UI/UX", - "Mobile", - "Protocol Design", - "Scalability", - "UI/UX" + "Blobspace", + "Data Availability", + "Ethereum Roadmap", + "Scalability" ], "keywords": [ - "payment", - "p2p finance", - "mobile" + "PeerDAS" ], - "duration": 575, + "duration": 1522, "language": "en", - "sources_swarmHash": "7851df1a8fb38bb024cefa2524794dd6ea145f15f791a2a89b592b81f5121777", - "sources_youtubeId": "cxrKuY7XQoc", + "sources_swarmHash": "567a45d310dc81275e061b69797f55ce5386ac2d95acdaf5d71076c274539d71", + "sources_youtubeId": "toR2UKzE_zA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735c0b19dbb7a90e1dd49c5", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735c0b19dbb7a90e1dd49c5.vtt", - "transcript_text": " All right. Hi, everyone. I'm Charles from Opera. I'm leading the product team for Minipay. And I'm here to tell you about how we're onboarding millions of users in Africa to crypto and to stable coins. Opera, as you may know, is a browser company. We have many apps, and we have over 100 million users on the African continent. Opera Mini is our most popular app. It's a browser that allows you to surf the web, but in a very efficient way that means that you don't have to buy as much data to browse the web, so people love it. And MiniPay is built right into Opera Mini and distributed through this channel. We've launched about a year ago to five African countries, and we've been getting tremendous traction. MiniPay is not only built inside Opera Mini, but it's also built on the best of Celo. And it's different in many ways to other crypto wallets. Minipay doesn't try to be a Swiss army knife that sees all your assets and all the chains and all the swaps. It's really focused around a payment use case using stable coins. And Celo is perfectly suited for that. It allows us to use phone numbers instead of wallet addresses. So similar as to mobile money systems already in market today. It's super fast and almost free. So a typical stablecoin transfer on Celo is 0.1 cent. So we don't need things like account abstraction to really get the user benefit of gas abstraction. It also has automatic key backup, so when users activate MiniPay, their keys are backed up, they don't lose them, which is a common thing in self-custody wallets. And it's a UI that really tries to speak to users with familiar terms and has this Web2 experience to it with the full power of Web3. And the last part, which very few wallets do, is really have a localized approach to on and off ramp by working with smaller partners to really make sure that it's possible to buy and sell stable coins for very low amounts using local payment methods. Zooming in on one of these differences, the phone number to wallet address system is actually built on Social Connect, which is a solo protocol that allows us to map this in a privacy-preserving way, from number to address, but not the other way. And it really improves discoverability. You can see which one of your friends is already on MiniPay, and it makes copy-pasting addresses a thing of the past. Another interesting feature of Opera MiniPay is that although we've added support for more stablecoins, dollar stablecoins, we wanted to make it easy for users to move in and out of them if needed by having this concept of swaps using pockets. So you can simply drag and swap from one pocket to the other, one to one. And we use the mental protocol for this, which allows us to have super low slippage and excellent fees. As this is a developer conference, I'll mention that we also have mini apps, which are web apps that are integrated within MiniPay. We have over 10 of them now. Also getting tremendous traction since launch and this is something that you can do and you can build using existing web and ethereum technologies and the cello team is there to assess also in making this a reality I'm happy to share that since launch, we've activated over 4 million wallets, mapped to phone numbers, and as of October, we've done over a million, during October, we've done over a million P2P transfers between those users. Thank you. You know, going past the tech and the numbers, right, we're starting now to tell a bit more of these stories that we're seeing in market using MiniPay. I'll go a bit quick now. I see time's running out. But this is a story of a freelancer getting paid in stables and being able to use them locally. This is another one in Kenya, being able to use USDT and pay local merchants for groceries interacting directly with the payment system locally M-Pesa so it requires no merchant integration. This is another one someone has lost their wallet in Kenya and was able to basically pay for everything during a week, thanks to MiniPay and this integration with the local payment system. MiniPay is now growing outside of Opera Mini, so I'm also glad to announce that we've made available now the Android app as a standalone app, so you can just go on the Google Play Store and try it out. It's enabled in over 60 countries, and we basically ramp up as we have good cash in and cash out in those countries. This is what it looks like, really trying to focus on the dollar use case and the payment use case. And we foresee that users in country in Nigeria or Kenya will invite people from outside Nigeria to the standalone app, and you can imagine cross-border payments being powered through this system. Also happy to reveal that the iOS version is coming very soon. So if you are keen to be one of the first to try MiniPay on iOS, scan this QR code, sign up the form, and we'll be sending invites to the test flight very shortly. And if you want to build on MiniPay, please reach out to us on x at MiniPay or at cellodevs. And the cellodevs are organizing bounties during EVE Global. So if you're joining the hackathon, make sure you check it out. Thank you. Thank you, Charles. That was incredible, all the attractions. Any questions from our audience? Any questions? Okay, well, a few. I see the gentleman in the red shirt first. So my understanding is that in Africa, there's not much of a savings culture because many of the currencies are losing their value every year. So there's more of a borrowing culture than a savings culture. With access to stable coins and mini-pay, are you seeing more of a savings culture develop? Are people using this for savings and accruing those savings over time? What sort of data are you seeing around that? So savings is certainly something that we hear could be useful in the context of mini-pay. This is not a use case we're really focusing on now. So for us right now, it's a marginal use case. It's really around money movement at this point. Another one over there. With using USD there, is that somehow in conflict with the local currencies? Is that something that you see as a risk? Like, for example, when integrating with tax payments, that should be happening, or just with local governments not being happy that their original money is not being used, or how is that in Africa? So, I mean, these economies already have a lot of demand for dollars. This is just a different channel to access them. And, you know, users can use Opera Mini and, sorry, MiniPay in a way that they can think in local currency, like they can denominate everything in local currency. So a lot of people are doing that to alleviate the kind of mental friction. One more over here. Good job. All right. What is a mini app? Is it like an iframe or? Yeah, it's an embedded web app. Yeah, it's not an iframe, but we just invoke your app. And each app is within a sandbox, so we only allow known blockchain interactions. Is the output dumping HTML? Yeah, this is existing web technology. This is existing Web3 API. It's not exotic anymore, but it's packaged in a way that users don't have to connect their wallets. It's integrated, and the signing process is very human-readable. Okay, cool. I see that there's a few people wanting to ask more questions, but the time is up for this session. Feel free to reach out to Charles for further discussion. Let's thank Charles for another day. Thank you, Charles.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731574800000, - "slot_end": 1731575400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1lk319WDhop2qBsR_BdMLAl1tdzOwri17ao4IPguI7Ac", - "resources_slides": null, + "slot_start": 1731398400000, + "slot_end": 1731400200000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1AIOGsICQD3wWyrBZ5kDP7FX-hHDQ53lT_n8M7Jdl_kI", + "resources_slides": "https://drive.google.com/file/d/1DlRtJr-dFvuzL-C1inJa5SY6_xs7x-vB/view", "speakers": [ - "charles-hamel" + "francesco" ] }, "vector": [ @@ -657097,8 +655217,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -657673,9 +655791,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -657867,7 +655985,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -657890,7 +656007,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -657903,7 +656019,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -657913,8 +656028,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -658036,6 +656153,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -658404,7 +656522,7 @@ 0, 0, 0, - 0, + 2, 0, 0, 0, @@ -658415,9 +656533,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -658429,45 +656544,52 @@ }, { "session": { - "id": "scaling-ethereum-with-das-an-iterative-approach", - "sourceId": "JFWPRG", - "title": "Scaling Ethereum with DAS: an iterative approach", - "description": "In this time between the launch of 4844 and the possible launch of a first version of PeerDAS, we explore and explain the iterative approach that has been employed in the rollout of blobs and DAS to Ethereum, and discuss the past and future steps.", - "track": "Core Protocol", - "type": "Talk", + "id": "searcher-competition-in-block-building", + "sourceId": "MHRYV9", + "title": "Searcher Competition in Block Building", + "description": "We study the amount of MEV captured by validators, as a function of searcher competition. The core is a suitable solution concept in this context that makes robust predictions independent of implementation details or specific mechanisms chosen. The surplus share of validators is a function of searcher competition. Searchers can obtain at most the marginal value increase of the winning block relative to the best block that can be built without them. We validate the theory empirically.", + "track": "Cryptoeconomics", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Design", "featured": false, "doNotRecord": false, "tags": [ - "Blobspace", - "Data Availability", - "Ethereum Roadmap", - "Scalability" + "Core Protocol", + "Gaming", + "Mechanism design", + "MEV", + "theory", + "cooperative", + "Core Protocol", + "Mechanism design", + "MEV" ], "keywords": [ - "PeerDAS" + "Cooperative", + "Game", + "Theory;" ], - "duration": 1522, + "duration": 599, "language": "en", - "sources_swarmHash": "567a45d310dc81275e061b69797f55ce5386ac2d95acdaf5d71076c274539d71", - "sources_youtubeId": "toR2UKzE_zA", + "sources_swarmHash": "2549f66fc5a9634575f6d89f41afb1dc348f3a5b89c4267992a25578642d3491", + "sources_youtubeId": "T_I7HYBIxZQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "67383c451b0f83434d2a7a78", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67383c451b0f83434d2a7a78.vtt", + "transcript_text": " Hello everyone, welcome to the presentation. Today I will talk about searcher competition in block building. So this is the paper title and it was co-authored with Christoph Schlegel and Danik Sui from Flashpots and Benny Sudakov from ETH Zurich and I'm a researcher at off-chain labs. So motivation was MEV and we know that there are many forms of MEV extraction and I'm listing only the most popular ones here. So there's this sex-sex arbitrage, background, sandwiches, and liquidation. And in each form, there are also many different specializations that you can take as a searcher. And here, the main point is that there are many different types, so searchers are different in their specializations. So it creates some kind of heterogeneity among searchers. So we tried to model this with the tools of game theory. And for that, we need to identify who are the players. And the main player is validator or a proposer that proposes the next block. And then there are these specialized arbitrageurs that in the Ethereum setting are called searchers. And in current market, there are also builders that aggregate searchers and also mempool, public mempool transactions. But we are ignoring it for this simplified game because for us, the fundamental players are searchers and the proposer. We denote the set of searchers by S that we can identify by their addresses. Then searchers submit their bundles of transactions to include in the block. Typically now they send it to block builder, but in principle they could have sent it to a proposer then from these bundles there are some in these bundles there are some conflicts so you cannot build the block that is the union of all bundles so you can build many different blocks. For a build block, searcher generates some value, and validator or the proposer also generates a value. So, these are the smallest building blocks of our game. So, we are abstracting away from all particular mechanisms, how they interact, and that includes builders and we only try to understand who will get how much among these players depending on their bundles. So for this we are using tools from cooperative game theory and for that we need to define a value of a coalition and value of a coalition is the best block that the searchers in this coalition build. If there is no proposer in the coalition, then the value is zero. So proposer or validator can block any, or veto any block. So you need to agree with the proposer, the searchers. And this already gives a coalitional transferable utility game. And in these games, the most natural solution, we think, is the core. And let me define what is core. It's very simple and intuitive. The core solution gives payoffs to all players so that no coalition of players prefer to deviate from their location and create their own block together. Because if they can, they will. And so we need to specify the payments to all searchers and the validator of the global game. So there are nice properties that Core has. First of all, it's always non-empty. You always have one solution. In particular, you can give all the value to the validator. But, of course, this would be very unfair to searchers. And there are other core solutions too, often, but not always. So, for example, there are cores such that searchers capture all the value. And if we have additive value over the bundles, then we actually have nice characterization of the core. So to make it more interesting, we look at the stochastic setting where we have a number of opportunities denoted by M and number of searchers denoted by N. And each searcher generates some value for each opportunity, so we have this matrix. And by P we denote the probability that each searcher finds each opportunity. Then we have very nice simple result that as soon as probability slightly high so it's larger than this tool log n divided by n and there are not too many opportunities so m is less than n and with high probability the validator captures everything and this can be in particular empirically checked in the data. So thank you for your attention. Happy to answer your questions. Thank you very much. So I posted one question. I wonder if you know how many search providers are currently in ethereum ecosystem so currently today i don't know but it's in the order of hundreds hundreds yes okay and we have one more question how do you know that the core is non-empty and this is usually requires strong assumptions so we know because if you give all the value to validator, it satisfies all these inequalities that core requires. It's very easy to check and strong assumption is that what you may refer as strong assumption is that validator can block any or veto any coalition. So that has a veto power and that's what that's why core is non-empty. And in particular we give exact example of a core. Okay, thank you very much. Please give a round of applause to our speaker. So we're gonna to have, I think, a break and the next session will start at 1.50. Thank you. Terima kasih telah menonton! Kampung Kampung Kampung Thank you.", "eventId": "devcon-7", - "slot_start": 1731398400000, - "slot_end": 1731400200000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1AIOGsICQD3wWyrBZ5kDP7FX-hHDQ53lT_n8M7Jdl_kI", - "resources_slides": null, + "slot_start": 1731648600000, + "slot_end": 1731649200000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1oRDP1vAH4P88oiBLEXOsJco7KgtJbQmYvKAeAkMug6Y", + "resources_slides": "https://drive.google.com/file/d/182J1xnWr7i92dwkT_5wohH-6ATft1TNi/view", "speakers": [ - "francesco" + "akaki-mamageishvili" ] }, "vector": [ - 0, - 0, 0, 0, 6, @@ -658911,6 +657033,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -659047,12 +657170,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -659223,10 +657340,12 @@ 0, 0, 0, + 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -659238,6 +657357,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -659284,10 +657404,8 @@ 0, 0, 0, - 2, 0, 0, - 2, 0, 0, 0, @@ -659331,6 +657449,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -659357,7 +657476,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -659409,7 +657527,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -659665,6 +657782,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -659691,6 +657809,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -659785,7 +657904,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -659796,65 +657914,52 @@ 0, 0, 0, + 2, 0, 0 ] }, { "session": { - "id": "searcher-competition-in-block-building", - "sourceId": "MHRYV9", - "title": "Searcher Competition in Block Building", - "description": "We study the amount of MEV captured by validators, as a function of searcher competition. The core is a suitable solution concept in this context that makes robust predictions independent of implementation details or specific mechanisms chosen. The surplus share of validators is a function of searcher competition. Searchers can obtain at most the marginal value increase of the winning block relative to the best block that can be built without them. We validate the theory empirically.", - "track": "Cryptoeconomics", + "id": "secondhand-liberalism-a-story-of-microdosing-internet-freedom", + "sourceId": "TB8DG7", + "title": "Secondhand Liberalism: A Story of Microdosing Internet Freedom", + "description": "Liberalism isn't the default. For those growing up in non-Western societies, liberalism is often \"secondhand\"—it's imbued in Western cultural products and present in software and the internet which in turn service liberal ideals. What if it's no longer the case?", + "track": "Real World Ethereum", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Design", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Core Protocol", - "Gaming", - "Mechanism design", - "MEV", - "theory", - "cooperative", - "Core Protocol", - "Mechanism design", - "MEV" - ], - "keywords": [ - "Cooperative", - "Game", - "Theory;" - ], - "duration": 599, + "tags": [], + "keywords": [], + "duration": 543, "language": "en", - "sources_swarmHash": "2549f66fc5a9634575f6d89f41afb1dc348f3a5b89c4267992a25578642d3491", - "sources_youtubeId": "T_I7HYBIxZQ", + "sources_swarmHash": "f8394bbfe10b82dd021769ebea02f6110bf67078d9fd2f93fefe403b06ac7639", + "sources_youtubeId": "QWr8sgwt21Y", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67383c451b0f83434d2a7a78", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67383c451b0f83434d2a7a78.vtt", - "transcript_text": " Hello everyone, welcome to the presentation. Today I will talk about searcher competition in block building. So this is the paper title and it was co-authored with Christoph Schlegel and Danik Sui from Flashpots and Benny Sudakov from ETH Zurich and I'm a researcher at off-chain labs. So motivation was MEV and we know that there are many forms of MEV extraction and I'm listing only the most popular ones here. So there's this sex-sex arbitrage, background, sandwiches, and liquidation. And in each form, there are also many different specializations that you can take as a searcher. And here, the main point is that there are many different types, so searchers are different in their specializations. So it creates some kind of heterogeneity among searchers. So we tried to model this with the tools of game theory. And for that, we need to identify who are the players. And the main player is validator or a proposer that proposes the next block. And then there are these specialized arbitrageurs that in the Ethereum setting are called searchers. And in current market, there are also builders that aggregate searchers and also mempool, public mempool transactions. But we are ignoring it for this simplified game because for us, the fundamental players are searchers and the proposer. We denote the set of searchers by S that we can identify by their addresses. Then searchers submit their bundles of transactions to include in the block. Typically now they send it to block builder, but in principle they could have sent it to a proposer then from these bundles there are some in these bundles there are some conflicts so you cannot build the block that is the union of all bundles so you can build many different blocks. For a build block, searcher generates some value, and validator or the proposer also generates a value. So, these are the smallest building blocks of our game. So, we are abstracting away from all particular mechanisms, how they interact, and that includes builders and we only try to understand who will get how much among these players depending on their bundles. So for this we are using tools from cooperative game theory and for that we need to define a value of a coalition and value of a coalition is the best block that the searchers in this coalition build. If there is no proposer in the coalition, then the value is zero. So proposer or validator can block any, or veto any block. So you need to agree with the proposer, the searchers. And this already gives a coalitional transferable utility game. And in these games, the most natural solution, we think, is the core. And let me define what is core. It's very simple and intuitive. The core solution gives payoffs to all players so that no coalition of players prefer to deviate from their location and create their own block together. Because if they can, they will. And so we need to specify the payments to all searchers and the validator of the global game. So there are nice properties that Core has. First of all, it's always non-empty. You always have one solution. In particular, you can give all the value to the validator. But, of course, this would be very unfair to searchers. And there are other core solutions too, often, but not always. So, for example, there are cores such that searchers capture all the value. And if we have additive value over the bundles, then we actually have nice characterization of the core. So to make it more interesting, we look at the stochastic setting where we have a number of opportunities denoted by M and number of searchers denoted by N. And each searcher generates some value for each opportunity, so we have this matrix. And by P we denote the probability that each searcher finds each opportunity. Then we have very nice simple result that as soon as probability slightly high so it's larger than this tool log n divided by n and there are not too many opportunities so m is less than n and with high probability the validator captures everything and this can be in particular empirically checked in the data. So thank you for your attention. Happy to answer your questions. Thank you very much. So I posted one question. I wonder if you know how many search providers are currently in ethereum ecosystem so currently today i don't know but it's in the order of hundreds hundreds yes okay and we have one more question how do you know that the core is non-empty and this is usually requires strong assumptions so we know because if you give all the value to validator, it satisfies all these inequalities that core requires. It's very easy to check and strong assumption is that what you may refer as strong assumption is that validator can block any or veto any coalition. So that has a veto power and that's what that's why core is non-empty. And in particular we give exact example of a core. Okay, thank you very much. Please give a round of applause to our speaker. So we're gonna to have, I think, a break and the next session will start at 1.50. Thank you. Terima kasih telah menonton! Kampung Kampung Kampung Thank you.", + "sources_streamethId": "673700f51b0f83434db18b94", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673700f51b0f83434db18b94.vtt", + "transcript_text": " All right hello everyone how's everyone going how's everyone doing good Good? Okay. Let's cheer up. Okay, today I'm going to talk about a title called Secondhand Liberalism, a story of microdosing internet freedom. It has little things to do with Ethereum, but has a lot of things to do with Ethereum at the same time. And this lighting talk also is going to contribute to a larger panel at 2 p.m. in the same room called From Western Liberalism to World Liberalism. Okay. So when I hear the word liberalism, it really hits differently when I speak in my native language in Chinese. Liberalism translate as . The literal meaning is freedom principles or freedom doctrines. This translation actually encapsulates a very brief but remarkable period of China's internet history which I was fortunate enough to live through. To notice, the story in the next five minutes I'm going to tell you is a very simplified version of the history of Chinese internet. To acknowledge the nuance and the complexity due to the restriction of time, I'm going to tell a very, very nutshell version of it. So I want to take you back to the early 2000s. It is a time when posting a meme like this, Winnie the Pooh, on the Chinese internet was still legal. And it is unthinkable right now, because as soon as you post anything of Winnie the Pooh on Weibo, the Chinese Twitter, the post will very likely to be deleted in a few seconds. So during this period, China experienced what I called second-hand liberalism. What I mean by that is China didn't have democracy or election at the time, and there wasn't this freedom of speech to be free, that breast freedom. If you picture millions of Chinese citizens gathering in those digital town squares, I'm talking about sites and forums like Tianya, Bullog, Weibo, and Renren. They were not just social media sites, but they were intellectual playgrounds. Chinese netizens were diving into discussions about liberal democracy, about human rights, about social justices, and this is non-thinkable today. And growing up in China, I never actually learned about liberal ideologies from my textbooks. But I absorbed liberalism online. It was everywhere. It was tangible in the cyberspace, but it was invisible in practice. The internet at the time buzzed with hate today. We're talking about government corruption. We're talking about labor rights. We're talking about government corruption. We're talking about labor rights. We're talking about environmental issues. We're talking about the forced relocation before Beijing's Olympic. And we're also talking about the controversy regarding the Wenchuan earthquake. Those are all red flags today. When censorship began tightening, the Chinese netizens began posting puzzles and codes. They developed a very sophisticated system of communicating with each other and understanding in this hidden language. It was about building a community with shared hidden meanings. If you're in crypto, you're going to be really familiar with this. Building a community with a shared hidden meanings. If you're in crypto, you're gonna be really familiar with this. Building a community is all about inventing those metaphors, inventing those images and shared memes so you guys can understand each other. And this was Chinese internet in the early 2000s. This brief period, I'm talking about from 2000s and early 2010s, fostered what scholar Yang Guobing called contentious public sphere. It is archipelago of critical discussions about political awakening and about intellectual exploration. These spaces encouraged what researcher Angela Wu called reading dispossession for changing political understanding, and thus creating solidarity very briefly. President Bill Clinton said in 2000, March 8th, saying, China is trying to crack down the internet. That's ridiculous because cracking down the internet is like trying to the wall. It's impossible. And President Clinton said, good luck, Chinese censors. It's impossible to censor the internet. However, guess what? It was President Clinton's naivety of something a scholar might call techno-solutionism. It turns out you can nail a jail to the wall. Right? There is something called the Great Firewall today. And the technology, as someone would imagine, that would inevitably bring China into liberation, proven to be just another infrastructure, could be controlled and can be monitored. This transformation has been very stark. The golden age of the Chinese internet freedom only lasted, I would calculate, for 15 years. Today, the Chinese internet is, as you can tell from the chart in the first picture. And the Great Firewall has been nearly impenetrable. This vibrant intellectual discussion, this vibrant discussion about civil rights and all the issues in the society have largely disappeared behind the layers of surveillance and censorship. And the brief window that I lived through when I was a teenager, when I was a child in China, has permanently closed. The term liberalism, freedom principle, has become pretty much stigmatized in China. If you ask a Chinese citizen on the street, what do you think about , what do you think about liberalism? People will be like, this is Western, evil Western infiltration. This is a Western ideology we do not like. Liberalization is hypocritical and it lost its meaning. So this story is like this, and I want us to think about this story. Not just like a cautionary tale about technology and control, but it really reminds us that liberalism, we're talking about freedom of expression and freedom of choice, is very precious and precarious. So the internet of the technology and underneath are not inherently liberating, as the Chinese internet today is highly constructed and a lot of censorship mechanisms are artificial features. So to those of you who are building technology, the story of the death of the China's free internet is really sad. It really reminds us that technological progression doesn't bring liberation automatically, but but rather technology is a result of ideology based choices made by governments, corporations, organizations and eventually individuals. Thank you very much. Please give a hand to Afra. We are already slightly over time.", "eventId": "devcon-7", - "slot_start": 1731648600000, - "slot_end": 1731649200000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1oRDP1vAH4P88oiBLEXOsJco7KgtJbQmYvKAeAkMug6Y", - "resources_slides": null, + "slot_start": 1731650400000, + "slot_end": 1731651000000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1l0E0bgrVT-d4Vqx2zZeLBbCopXsf2glznve9weGlx2U", + "resources_slides": "https://drive.google.com/file/d/1tqYaGOQ-8lCOmEupzwnwcnX7cbCICLwP/view", "speakers": [ - "akaki-mamageishvili" + "afra-zhao-wang" ] }, "vector": [ 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -660292,7 +658397,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -660430,6 +658534,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -660601,12 +658706,10 @@ 0, 0, 0, - 6, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -660618,7 +658721,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -660710,7 +658812,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -661044,7 +659145,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -661071,7 +659171,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -661162,10 +659261,12 @@ 0, 0, 0, - 2, 0, 0, + 2, + 0, 0, + 2, 0, 0, 0, @@ -661177,43 +659278,38 @@ 0, 0, 0, - 2, 0, 0 ] }, { "session": { - "id": "secondhand-liberalism-a-story-of-microdosing-internet-freedom", - "sourceId": "TB8DG7", - "title": "Secondhand Liberalism: A Story of Microdosing Internet Freedom", - "description": "Liberalism isn't the default. For those growing up in non-Western societies, liberalism is often \"secondhand\"—it's imbued in Western cultural products and present in software and the internet which in turn service liberal ideals. What if it's no longer the case?", - "track": "Real World Ethereum", - "type": "Lightning Talk", + "id": "securing-ethereum", + "sourceId": "9FQPCQ", + "title": "Securing Ethereum", + "description": "Discussion from security leaders in the audit, crowd-sec and bug bounty space on the importance of Ethereum security, current trends and predictions for the space in the coming years.", + "track": "[CLS] ETH Escape - Speed Hacking Challenge", + "type": "Panel", "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], "keywords": [], - "duration": 543, + "tags": [], "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673700f51b0f83434db18b94", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673700f51b0f83434db18b94.vtt", - "transcript_text": " All right hello everyone how's everyone going how's everyone doing good Good? Okay. Let's cheer up. Okay, today I'm going to talk about a title called Secondhand Liberalism, a story of microdosing internet freedom. It has little things to do with Ethereum, but has a lot of things to do with Ethereum at the same time. And this lighting talk also is going to contribute to a larger panel at 2 p.m. in the same room called From Western Liberalism to World Liberalism. Okay. So when I hear the word liberalism, it really hits differently when I speak in my native language in Chinese. Liberalism translate as . The literal meaning is freedom principles or freedom doctrines. This translation actually encapsulates a very brief but remarkable period of China's internet history which I was fortunate enough to live through. To notice, the story in the next five minutes I'm going to tell you is a very simplified version of the history of Chinese internet. To acknowledge the nuance and the complexity due to the restriction of time, I'm going to tell a very, very nutshell version of it. So I want to take you back to the early 2000s. It is a time when posting a meme like this, Winnie the Pooh, on the Chinese internet was still legal. And it is unthinkable right now, because as soon as you post anything of Winnie the Pooh on Weibo, the Chinese Twitter, the post will very likely to be deleted in a few seconds. So during this period, China experienced what I called second-hand liberalism. What I mean by that is China didn't have democracy or election at the time, and there wasn't this freedom of speech to be free, that breast freedom. If you picture millions of Chinese citizens gathering in those digital town squares, I'm talking about sites and forums like Tianya, Bullog, Weibo, and Renren. They were not just social media sites, but they were intellectual playgrounds. Chinese netizens were diving into discussions about liberal democracy, about human rights, about social justices, and this is non-thinkable today. And growing up in China, I never actually learned about liberal ideologies from my textbooks. But I absorbed liberalism online. It was everywhere. It was tangible in the cyberspace, but it was invisible in practice. The internet at the time buzzed with hate today. We're talking about government corruption. We're talking about labor rights. We're talking about government corruption. We're talking about labor rights. We're talking about environmental issues. We're talking about the forced relocation before Beijing's Olympic. And we're also talking about the controversy regarding the Wenchuan earthquake. Those are all red flags today. When censorship began tightening, the Chinese netizens began posting puzzles and codes. They developed a very sophisticated system of communicating with each other and understanding in this hidden language. It was about building a community with shared hidden meanings. If you're in crypto, you're going to be really familiar with this. Building a community with a shared hidden meanings. If you're in crypto, you're gonna be really familiar with this. Building a community is all about inventing those metaphors, inventing those images and shared memes so you guys can understand each other. And this was Chinese internet in the early 2000s. This brief period, I'm talking about from 2000s and early 2010s, fostered what scholar Yang Guobing called contentious public sphere. It is archipelago of critical discussions about political awakening and about intellectual exploration. These spaces encouraged what researcher Angela Wu called reading dispossession for changing political understanding, and thus creating solidarity very briefly. President Bill Clinton said in 2000, March 8th, saying, China is trying to crack down the internet. That's ridiculous because cracking down the internet is like trying to the wall. It's impossible. And President Clinton said, good luck, Chinese censors. It's impossible to censor the internet. However, guess what? It was President Clinton's naivety of something a scholar might call techno-solutionism. It turns out you can nail a jail to the wall. Right? There is something called the Great Firewall today. And the technology, as someone would imagine, that would inevitably bring China into liberation, proven to be just another infrastructure, could be controlled and can be monitored. This transformation has been very stark. The golden age of the Chinese internet freedom only lasted, I would calculate, for 15 years. Today, the Chinese internet is, as you can tell from the chart in the first picture. And the Great Firewall has been nearly impenetrable. This vibrant intellectual discussion, this vibrant discussion about civil rights and all the issues in the society have largely disappeared behind the layers of surveillance and censorship. And the brief window that I lived through when I was a teenager, when I was a child in China, has permanently closed. The term liberalism, freedom principle, has become pretty much stigmatized in China. If you ask a Chinese citizen on the street, what do you think about , what do you think about liberalism? People will be like, this is Western, evil Western infiltration. This is a Western ideology we do not like. Liberalization is hypocritical and it lost its meaning. So this story is like this, and I want us to think about this story. Not just like a cautionary tale about technology and control, but it really reminds us that liberalism, we're talking about freedom of expression and freedom of choice, is very precious and precarious. So the internet of the technology and underneath are not inherently liberating, as the Chinese internet today is highly constructed and a lot of censorship mechanisms are artificial features. So to those of you who are building technology, the story of the death of the China's free internet is really sad. It really reminds us that technological progression doesn't bring liberation automatically, but but rather technology is a result of ideology based choices made by governments, corporations, organizations and eventually individuals. Thank you very much. Please give a hand to Afra. We are already slightly over time.", - "eventId": "devcon-7", - "slot_start": 1731650400000, - "slot_end": 1731651000000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1l0E0bgrVT-d4Vqx2zZeLBbCopXsf2glznve9weGlx2U", - "resources_slides": null, "speakers": [ - "afra-zhao-wang" - ] + "michael-lewellen", + "neville-grech", + "pietro-carta", + "michael-okeeffe", + "luna-tong" + ], + "eventId": "devcon-7", + "slot_start": 1731573000000, + "slot_end": 1731576300000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1m_-I_-ifsaORsMAY0_k78On1s9BICet-47EDiFzpEAM", + "resources_slides": "" }, "vector": [ 0, @@ -661222,8 +659318,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -661237,6 +659331,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -661538,6 +659633,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -661799,6 +659895,9 @@ 0, 0, 6, + 6, + 6, + 6, 0, 0, 0, @@ -662523,14 +660622,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -662552,31 +660643,41 @@ }, { "session": { - "id": "securing-ethereum", - "sourceId": "9FQPCQ", - "title": "Securing Ethereum", - "description": "Discussion from security leaders in the audit, crowd-sec and bug bounty space on the importance of Ethereum security, current trends and predictions for the space in the coming years.", - "track": "[CLS] ETH Escape - Speed Hacking Challenge", - "type": "Panel", - "expertise": "", + "id": "securing-grandines-performance", + "sourceId": "GGWXYQ", + "title": "Securing Grandine's Performance", + "description": "Our project focuses on improving Grandine’s performance and stability through targeted benchmarking and profiling. By conducting a comparative analysis with Lighthouse, we aim to identify architectural optimizations, especially those related to parallelization. Establishing baseline metrics is key to this approach, as it allows us to focus on refining critical areas within Grandine for optimal, efficient performance, thereby supporting the robustness of the Ethereum network.", + "track": "[CLS] EPF Day", + "type": "Lightning Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, + "tags": [ + "Consensus", + "Consensus Mechanisms", + "Core Protocol", + "Cryptography", + "Security" + ], "keywords": [], - "tags": [], + "duration": 813, "language": "en", - "speakers": [ - "michael-lewellen", - "neville-grech", - "pietro-carta", - "michael-okeeffe", - "luna-tong" - ], + "sources_swarmHash": "3c895500f7d570e1fc742742e5d135753d9fe1464ebae8ef9cf72bb0ac6582c2", + "sources_youtubeId": "78N_LYGSZ3Q", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673459489dbb7a90e1204ed1", "eventId": "devcon-7", - "slot_start": 1731573000000, - "slot_end": 1731576300000, + "slot_start": 1731482100000, + "slot_end": 1731483000000, "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1m_-I_-ifsaORsMAY0_k78On1s9BICet-47EDiFzpEAM" + "resources_presentation": "https://docs.google.com/presentation/d/1prZ931qBVTXdBa8oGWfuFhX5yIKVdrAsZ9rAg99ejog", + "resources_slides": "https://drive.google.com/file/d/1ewIaM4llzZdfdKOAYb_BEGlRfJl63Uhl/view", + "speakers": [ + "mercy-boma-naps-nkari", + "zarathustra" + ] }, "vector": [ 0, @@ -662594,10 +660695,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -662901,10 +660998,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -663162,11 +661255,6 @@ 0, 0, 0, - 6, - 6, - 6, - 6, - 0, 0, 0, 0, @@ -663178,6 +661266,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -663338,10 +661428,12 @@ 0, 0, 0, + 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -663349,11 +661441,13 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -663528,6 +661622,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -663897,6 +661992,7 @@ 2, 0, 0, + 0, 2, 0, 0, @@ -663915,43 +662011,50 @@ }, { "session": { - "id": "securing-grandines-performance", - "sourceId": "GGWXYQ", - "title": "Securing Grandine's Performance", - "description": "Our project focuses on improving Grandine’s performance and stability through targeted benchmarking and profiling. By conducting a comparative analysis with Lighthouse, we aim to identify architectural optimizations, especially those related to parallelization. Establishing baseline metrics is key to this approach, as it allows us to focus on refining critical areas within Grandine for optimal, efficient performance, thereby supporting the robustness of the Ethereum network.", - "track": "[CLS] EPF Day", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "security-frameworks-by-seal", + "sourceId": "A7TNUF", + "title": "Security Frameworks by SEAL", + "description": "Comprised of dedicated security specialists, SEAL aims to spread awareness and educate the community about Web3 security best practices and pitfalls. We address various challenges, compile accessible resources, and create new content. Open to all backgrounds, our guidelines provide comprehensive security frameworks for Web3 projects, offering best practices and practical solutions throughout their lifecycle. We aim to make Web3 a safer space for developers and users alike.", + "track": "Security", + "type": "Talk", + "expertise": "Beginner", + "audience": "Product", "featured": false, "doNotRecord": false, + "keywords": [ + "Best practices", + "Guidelines", + "Frameworks." + ], "tags": [ - "Consensus", - "Consensus Mechanisms", - "Core Protocol", - "Cryptography", + "Security", + "Hacks", + "Public good", + "framework", + "Hacks", + "Public good", "Security" ], - "keywords": [], - "duration": 813, "language": "en", - "sources_swarmHash": "3c895500f7d570e1fc742742e5d135753d9fe1464ebae8ef9cf72bb0ac6582c2", - "sources_youtubeId": "78N_LYGSZ3Q", + "sources_swarmHash": "41a583919688b5e2f6ad7b3fa35a6f8a8fa49a66f9c7cd395a60a280ae8a22ae", + "sources_youtubeId": "XTrR7aQLeWQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673459489dbb7a90e1204ed1", - "eventId": "devcon-7", - "slot_start": 1731482100000, - "slot_end": 1731483000000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1prZ931qBVTXdBa8oGWfuFhX5yIKVdrAsZ9rAg99ejog", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "mercy-boma-naps-nkari", - "zarathustra" - ] + "matta-the-red-guild" + ], + "eventId": "devcon-7", + "slot_start": 1731576600000, + "slot_end": 1731578400000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1HmUewjGmXzH3e1271bv_rXsd73TpbSS90ZBFslgi4ic", + "resources_slides": "https://drive.google.com/file/d/1vvOCM-7zuriJcDo3qpapOcyrj3Hd46xk/view" }, "vector": [ + 6, 0, 0, 0, @@ -663967,9 +662070,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -664233,6 +662333,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -664539,8 +662640,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -664708,7 +662807,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -664716,15 +662814,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -664817,6 +662911,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -664871,6 +662966,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -664897,7 +662993,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -664961,6 +663056,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -665266,7 +663362,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -665276,6 +663371,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -665288,45 +663385,47 @@ }, { "session": { - "id": "security-frameworks-by-seal", - "sourceId": "A7TNUF", - "title": "Security Frameworks by SEAL", - "description": "Comprised of dedicated security specialists, SEAL aims to spread awareness and educate the community about Web3 security best practices and pitfalls. We address various challenges, compile accessible resources, and create new content. Open to all backgrounds, our guidelines provide comprehensive security frameworks for Web3 projects, offering best practices and practical solutions throughout their lifecycle. We aim to make Web3 a safer space for developers and users alike.", - "track": "Security", + "id": "security-of-fiat-shamir-transformation", + "sourceId": "VMNCS8", + "title": "Security of Fiat-Shamir transformation", + "description": "Fiat-Shamir transformation underlies virtually every SNARK used in the Ethereum ecosystem as it makes interactive proofs non-interactive. In this talk, we discuss the security issues if the transformation is used incorrectly (e.g., parallel repetition of a ZKP defined over a small field; such protocols became very popular thanks to their efficiency), provide examples, show the security loss that the transformation brings, and the concrete security of ZKP. Finally, we discuss best practices for k", + "track": "Applied Cryptography", "type": "Talk", - "expertise": "Beginner", - "audience": "Product", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, - "keywords": [ - "Best practices", - "Guidelines", - "Frameworks." - ], "tags": [ + "Fiat-Shamir heuristic", + "STARK", "Security", - "Hacks", - "Public good", - "framework", - "Hacks", - "Public good", - "Security" + "iop", + "Fiat-Shamir heuristic", + "Security", + "STARK" ], - "language": "en", - "speakers": [ - "matta-the-red-guild" + "keywords": [ + "small fields", + "IOP" ], + "duration": 1593, + "language": "en", + "sources_swarmHash": "48a411b091a8c6046acad9b1ee6abd821654d0b9005b8e2e4fffe3fe33eac9c6", + "sources_youtubeId": "VIMnaOUvw08", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731576600000, - "slot_end": 1731578400000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1HmUewjGmXzH3e1271bv_rXsd73TpbSS90ZBFslgi4ic" + "slot_start": 1731482400000, + "slot_end": 1731484200000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1qlPnS97cEpEKuQEuS06efm97LnehdTDo-7FRoyWVIHY", + "resources_slides": "https://drive.google.com/file/d/1hVGyOb5GLWW87XGWxh0ptWmu-zAZOWJS/view", + "speakers": [ + "michal-zajac" + ] }, "vector": [ - 6, - 0, - 0, - 0, 0, 0, 0, @@ -665337,6 +663436,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -665603,7 +663703,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -665915,6 +664014,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -666183,15 +664283,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -666238,40 +664329,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -666589,6 +664646,54 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -666630,26 +664735,19 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -666659,47 +664757,50 @@ }, { "session": { - "id": "security-of-fiat-shamir-transformation", - "sourceId": "VMNCS8", - "title": "Security of Fiat-Shamir transformation", - "description": "Fiat-Shamir transformation underlies virtually every SNARK used in the Ethereum ecosystem as it makes interactive proofs non-interactive. In this talk, we discuss the security issues if the transformation is used incorrectly (e.g., parallel repetition of a ZKP defined over a small field; such protocols became very popular thanks to their efficiency), provide examples, show the security loss that the transformation brings, and the concrete security of ZKP. Finally, we discuss best practices for k", - "track": "Applied Cryptography", - "type": "Talk", + "id": "security-through-obscurity-using-microdots-to-store-secrets", + "sourceId": "UHQDPU", + "title": "Security through obscurity. Using microdots to store secrets.", + "description": "Key custody remains a tricky problem to solve. Most of the focus around improving the security of key custody revolve around software based approaches like secret sharing. However, physical approaches are also possible. \r\n\r\nThis talk discusses on how to secure secrets using microdots and how microdots may be fabricated at home with legally accessible tools.\r\n\r\nMicrodots is a technique which allows one to shrink documents down. This allows one to embed secrets in documents in plain sight.", + "track": "Security", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Lobby", "featured": false, "doNotRecord": false, "tags": [ - "Fiat-Shamir heuristic", - "STARK", - "Security", - "iop", - "Fiat-Shamir heuristic", + "Digital Sovereignty", + "Cryptography", "Security", - "STARK" + "Hardware wallets", + "Custody", + "Cryptography", + "Custody", + "Digital Sovereignty", + "Hardware wallets", + "Security" ], "keywords": [ - "small fields", - "IOP" + "None" ], - "duration": 1593, + "duration": 579, "language": "en", - "sources_swarmHash": "48a411b091a8c6046acad9b1ee6abd821654d0b9005b8e2e4fffe3fe33eac9c6", - "sources_youtubeId": "VIMnaOUvw08", + "sources_swarmHash": "70b7a1a2acf3ec307ad982db5ea9e354b109ab2b5981ba87ee71c5967e486a52", + "sources_youtubeId": "3mXa1oeHzzA", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731482400000, - "slot_end": 1731484200000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1qlPnS97cEpEKuQEuS06efm97LnehdTDo-7FRoyWVIHY", - "resources_slides": null, + "slot_start": 1731406200000, + "slot_end": 1731406800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1zGqyVZiy__TgQYZes9fefN5S6uBUQLT9Yl6wbxjJ-2M", + "resources_slides": "https://drive.google.com/file/d/1NvOTq-c11nvheTGJj72lD0LAQJCrQhZP/view", "speakers": [ - "michal-zajac" + "jseam" ] }, "vector": [ + 6, 0, 0, 0, @@ -666710,7 +664811,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -667448,9 +665548,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -667464,6 +665561,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -667500,6 +665598,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -667662,8 +665761,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -667788,6 +665885,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -667925,9 +666023,6 @@ 0, 0, 2, - 2, - 0, - 0, 0, 0, 0, @@ -668019,7 +666114,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -668031,55 +666125,55 @@ 0, 0, 0, - 0 + 0, + 2 ] }, { "session": { - "id": "security-through-obscurity-using-microdots-to-store-secrets", - "sourceId": "UHQDPU", - "title": "Security through obscurity. Using microdots to store secrets.", - "description": "Key custody remains a tricky problem to solve. Most of the focus around improving the security of key custody revolve around software based approaches like secret sharing. However, physical approaches are also possible. \r\n\r\nThis talk discusses on how to secure secrets using microdots and how microdots may be fabricated at home with legally accessible tools.\r\n\r\nMicrodots is a technique which allows one to shrink documents down. This allows one to embed secrets in documents in plain sight.", - "track": "Security", + "id": "semaphore-v4", + "sourceId": "ZU9D8U", + "title": "Semaphore V4", + "description": "Semaphore is a protocol enabling individuals to prove group membership and send messages (such as votes or endorsements) anonymously. The latest version enhances efficiency and simplifies the use of libraries and contracts. This presentation will cover the new features, project vision, and the importance and challanges of zero-knowledge technologies.", + "track": "Applied Cryptography", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Lobby", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Digital Sovereignty", - "Cryptography", - "Security", - "Hardware wallets", - "Custody", - "Cryptography", - "Custody", - "Digital Sovereignty", - "Hardware wallets", - "Security" + "Privacy", + "Zero-Knowledge", + "User Experience", + "proof-of", + "membership", + "Privacy", + "User Experience", + "Zero-Knowledge" ], "keywords": [ - "None" + "semaphore", + "anonymity sets", + "proof of membership" ], - "duration": 579, + "duration": 1035, "language": "en", - "sources_swarmHash": "70b7a1a2acf3ec307ad982db5ea9e354b109ab2b5981ba87ee71c5967e486a52", - "sources_youtubeId": "3mXa1oeHzzA", + "sources_swarmHash": "619dc838e91326f82a78ebd1207f07fa45e9941e162c7999de38f6d08fee6691", + "sources_youtubeId": "OErC2MyIKjY", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731406200000, - "slot_end": 1731406800000, + "slot_start": 1731397200000, + "slot_end": 1731397800000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1zGqyVZiy__TgQYZes9fefN5S6uBUQLT9Yl6wbxjJ-2M", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/12uKp51aS4tQMokLfQJRDQlh518PRLNinkH3148Cq9Do", + "resources_slides": "https://drive.google.com/file/d/1Nj0EM_QtP6Fbu2pGvbgXkl-l2cmQ1nSH/view", "speakers": [ - "jseam" + "cedoor" ] }, "vector": [ - 6, 0, 0, 0, @@ -668090,6 +666184,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -668830,10 +666925,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -668847,6 +666938,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -668880,7 +666972,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -668947,6 +667038,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -669168,7 +667260,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -669200,6 +667291,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -669391,12 +667483,11 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, 0, + 2, 0, 0, 0, @@ -669409,52 +667500,42 @@ 0, 0, 0, - 0, - 2 + 0 ] }, { "session": { - "id": "semaphore-v4", - "sourceId": "ZU9D8U", - "title": "Semaphore V4", - "description": "Semaphore is a protocol enabling individuals to prove group membership and send messages (such as votes or endorsements) anonymously. The latest version enhances efficiency and simplifies the use of libraries and contracts. This presentation will cover the new features, project vision, and the importance and challanges of zero-knowledge technologies.", - "track": "Applied Cryptography", + "id": "shadow-network-simulations", + "sourceId": "H7HCJJ", + "title": "Shadow Network Simulations", + "description": "In my EPF project, I implemented Ethshadow, a configuration generator for simulating Ethereum networks using Shadow, and used it to research improvements to the current state of PeerDAS and to estimate the effects of IDONTWANT on node bandwidth. In this presentation, I will present my findings and make a case for testing using Ethshadow.", + "track": "[CLS] EPF Day", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Privacy", - "Zero-Knowledge", - "User Experience", - "proof-of", - "membership", - "Privacy", - "User Experience", - "Zero-Knowledge" - ], - "keywords": [ - "semaphore", - "anonymity sets", - "proof of membership" + "Core Protocol", + "Layer 1", + "Testing" ], - "duration": 1035, + "keywords": [], + "duration": 936, "language": "en", - "sources_swarmHash": "619dc838e91326f82a78ebd1207f07fa45e9941e162c7999de38f6d08fee6691", - "sources_youtubeId": "OErC2MyIKjY", + "sources_swarmHash": "02ccab671e4eb30a5b598c337aa17ffcc9a69a77816b79dcc9bd7f7834af3df6", + "sources_youtubeId": "uVvbuK0dpeQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "67347cd49dbb7a90e16882fa", "eventId": "devcon-7", - "slot_start": 1731397200000, - "slot_end": 1731397800000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/12uKp51aS4tQMokLfQJRDQlh518PRLNinkH3148Cq9Do", - "resources_slides": null, + "slot_start": 1731485700000, + "slot_end": 1731486600000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/13dCJ8eFHfsvUgtv1Dz5mrPCKUF6Y5dXPwWu0wN0ixkY", + "resources_slides": "https://drive.google.com/file/d/1_R2ZrugYt8vE__LbiUbCccitwR7Dk9zp/view", "speakers": [ - "cedoor" + "daniel-knopik" ] }, "vector": [ @@ -669468,12 +667549,12 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -670225,7 +668306,8 @@ 0, 0, 0, - 6, + 2, + 0, 0, 0, 0, @@ -670325,7 +668407,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -670450,6 +668531,23 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -670579,7 +668677,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -670685,27 +668782,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -670776,8 +668852,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -670794,37 +668870,44 @@ }, { "session": { - "id": "shadow-network-simulations", - "sourceId": "H7HCJJ", - "title": "Shadow Network Simulations", - "description": "In my EPF project, I implemented Ethshadow, a configuration generator for simulating Ethereum networks using Shadow, and used it to research improvements to the current state of PeerDAS and to estimate the effects of IDONTWANT on node bandwidth. In this presentation, I will present my findings and make a case for testing using Ethshadow.", - "track": "[CLS] EPF Day", - "type": "Lightning Talk", + "id": "simulating-an-ethereum-network-at-scale", + "sourceId": "FAZBAD", + "title": "Simulating an Ethereum network at scale", + "description": "Previously, when Ethereum client developers wanted to test their ideas on the network layer, they either had to use a simulation tool that could be used only with some programming language or had to do network emulation instead, which requires a cluster of computers to do it at scale rather than running it on a laptop-size machine. This talk will tell you how to simulate an Ethereum network with 100+ nodes on a laptop-sized machine with production Ethereum clients.", + "track": "Core Protocol", + "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Core Protocol", "Layer 1", - "Testing" + "simulation", + "Layer", + "1" ], - "keywords": [], - "duration": 936, + "keywords": [ + "Networking", + "Simulation" + ], + "duration": 1401, "language": "en", - "sources_swarmHash": "02ccab671e4eb30a5b598c337aa17ffcc9a69a77816b79dcc9bd7f7834af3df6", - "sources_youtubeId": "uVvbuK0dpeQ", + "sources_swarmHash": "803d858f15851efaa0200588d31df2fa0570a608f38b4923e8617c3cea7d94c9", + "sources_youtubeId": "g-VE038cW1M", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67347cd49dbb7a90e16882fa", + "sources_streamethId": "6736e0e71b0f83434d9fea11", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731485700000, - "slot_end": 1731486600000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/13dCJ8eFHfsvUgtv1Dz5mrPCKUF6Y5dXPwWu0wN0ixkY", - "resources_slides": null, + "slot_start": 1731564600000, + "slot_end": 1731566400000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1x5qwU96CuNwokAG1SeZ9BSYZKjgzyrpzL5MwVOtxJWQ", + "resources_slides": "https://drive.google.com/file/d/1kDxXu8LLYnFQuK8zwMi5UTLaZjC_P4nx/view", "speakers": [ - "daniel-knopik" + "daniel-knopik", + "pop" ] }, "vector": [ @@ -670832,6 +668915,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -670843,8 +668927,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -671421,9 +669503,7 @@ 0, 0, 6, - 0, - 0, - 0, + 6, 0, 0, 0, @@ -671598,9 +669678,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -671660,6 +669737,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -671823,7 +669901,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -672058,6 +670135,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -672146,7 +670224,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -672159,57 +670236,51 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "simulating-an-ethereum-network-at-scale", - "sourceId": "FAZBAD", - "title": "Simulating an Ethereum network at scale", - "description": "Previously, when Ethereum client developers wanted to test their ideas on the network layer, they either had to use a simulation tool that could be used only with some programming language or had to do network emulation instead, which requires a cluster of computers to do it at scale rather than running it on a laptop-size machine. This talk will tell you how to simulate an Ethereum network with 100+ nodes on a laptop-sized machine with production Ethereum clients.", - "track": "Core Protocol", + "id": "simulating-economic-systems-of-an-autonomous-world", + "sourceId": "KWKW3W", + "title": "Simulating Economic Systems of an Autonomous World", + "description": "This presentation reviews the basics of token systems design and their onchain game applications. This will be specifically tailored to onchain complicated economic systems and simulating them in interactive notebooks for real-time graphing; aiding in parameter tweaking and finding gaps in systems designs. The goal of this talk will be to begin to bridge the gap between complex token systems designers and onchain game designers.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, - "tags": [ - "Layer 1", - "simulation", - "Layer", - "1" - ], "keywords": [ - "Networking", - "Simulation" + "Token Engineering", + "Simulations", + "Complex Systems" + ], + "tags": [ + "Autonomous World", + "Gaming", + "Protocol Design" ], - "duration": 1401, "language": "en", "sources_swarmHash": "", "sources_youtubeId": "", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736e0e71b0f83434d9fea11", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731564600000, - "slot_end": 1731566400000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1x5qwU96CuNwokAG1SeZ9BSYZKjgzyrpzL5MwVOtxJWQ", - "resources_slides": null, "speakers": [ - "daniel-knopik", - "pop" - ] + "nico-rodriguez" + ], + "eventId": "devcon-7", + "slot_start": 1731577800000, + "slot_end": 1731579300000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1JGirNWdZq9HEHUw7sdVF-0QUGOk9fJFHX5UmLIB_6hk", + "resources_slides": "https://drive.google.com/file/d/1WbHadz-O2MJY3w-YoLENCtorEaisyeZ6/view" }, "vector": [ - 0, - 0, - 0, - 0, - 6, 0, 0, 0, @@ -672222,6 +670293,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -672365,6 +670437,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -672797,8 +670870,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -672971,7 +671042,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -673005,6 +671075,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -673034,7 +671105,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -673069,6 +671139,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -673433,7 +671505,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -673523,8 +671594,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -673541,35 +671612,26 @@ }, { "session": { - "id": "simulating-economic-systems-of-an-autonomous-world", - "sourceId": "KWKW3W", - "title": "Simulating Economic Systems of an Autonomous World", - "description": "This presentation reviews the basics of token systems design and their onchain game applications. This will be specifically tailored to onchain complicated economic systems and simulating them in interactive notebooks for real-time graphing; aiding in parameter tweaking and finding gaps in systems designs. The goal of this talk will be to begin to bridge the gap between complex token systems designers and onchain game designers.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "singer-sing-writer-hour-with-adegbengaoggunbdeje", + "sourceId": "R9KTR7", + "title": "Singer sing writer hour with adegbengaoggunbdeje", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Token Engineering", - "Simulations", - "Complex Systems" - ], - "tags": [ - "Autonomous World", - "Gaming", - "Protocol Design" - ], + "keywords": [], + "tags": [], "language": "en", - "speakers": [ - "nico-rodriguez" - ], + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731577800000, - "slot_end": 1731579300000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1JGirNWdZq9HEHUw7sdVF-0QUGOk9fJFHX5UmLIB_6hk" + "slot_start": 1731470400000, + "slot_end": 1731474000000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/188EWHuoqMHZmI_lZQs8v-nCOf8dWQUXTZ39BGcW23wE", + "resources_slides": "" }, "vector": [ 0, @@ -673581,10 +671643,10 @@ 0, 0, 0, + 6, 0, 0, 0, - 6, 0, 0, 0, @@ -673728,7 +671790,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -674369,7 +672430,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -674433,8 +672493,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -674889,8 +672947,6 @@ 2, 0, 0, - 0, - 0, 2, 0, 0, @@ -674903,37 +672959,59 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "singer-sing-writer-hour-with-adegbengaoggunbdeje", - "sourceId": "R9KTR7", - "title": "Singer sing writer hour with adegbengaoggunbdeje", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", - "audience": "Engineering", + "id": "single-slot-finality-and-the-future-of-staking", + "sourceId": "LZCP8E", + "title": "Single Slot Finality and the future of staking", + "description": "Discussing the evolution of the thinking around future upgrades to the Ethereum consensus protocol (single slot finality project) in relationship to the future of staking. For example discussing things like https://ethresear.ch/t/orbit-ssf-solo-staking-friendly-validator-set-management-for-ssf/19928/3", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "keywords": [ + "Economic", + "security" + ], + "tags": [ + "Core Protocol", + "Ethereum Roadmap", + "Home staking", + "Single-slot Finality", + "Consensus Mechanisms", + "Security", + "economy", + "Consensus Mechanisms", + "Core Protocol", + "Ethereum Roadmap", + "Home staking", + "Single-slot Finality" + ], "language": "en", - "speakers": [], + "sources_swarmHash": "82fbf9013dca892d6a0f02a4a78bd16cd2cd1448127fd267782bdeb1e9cbf5e9", + "sources_youtubeId": "6VEEAemYaeI", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "francesco" + ], "eventId": "devcon-7", - "slot_start": 1731470400000, - "slot_end": 1731474000000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/188EWHuoqMHZmI_lZQs8v-nCOf8dWQUXTZ39BGcW23wE" + "slot_start": 1731573600000, + "slot_end": 1731575400000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1198JUW8nHiS-gIHBkbDTKrorHlxq2jJXKTiMaVCMvcI", + "resources_slides": "https://drive.google.com/file/d/1634mOw_plgIJ5zzOzx_9Wq1b_ypPpluz/view" }, "vector": [ - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -675514,6 +673592,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -675681,6 +673760,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -675699,6 +673779,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -675756,6 +673837,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -675764,6 +673846,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -675869,7 +673952,9 @@ 0, 0, 0, + 2, 0, + 2, 0, 0, 0, @@ -676058,6 +674143,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -676235,23 +674321,15 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, 2, 0, 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -676265,49 +674343,52 @@ }, { "session": { - "id": "single-slot-finality-and-the-future-of-staking", - "sourceId": "LZCP8E", - "title": "Single Slot Finality and the future of staking", - "description": "Discussing the evolution of the thinking around future upgrades to the Ethereum consensus protocol (single slot finality project) in relationship to the future of staking. For example discussing things like https://ethresear.ch/t/orbit-ssf-solo-staking-friendly-validator-set-management-for-ssf/19928/3", - "track": "Core Protocol", + "id": "slangs-query-api-a-better-way-to-analyse-solidity-code", + "sourceId": "8PYLB7", + "title": "Slang’s Query API: a better way to analyse Solidity code", + "description": "Slang is Nomic Foundation’s modular set of Solidity compiler APIs. This presentation will review Slang’s query engine approach to analysing Solidity code, and explain why it makes building tools that support multiple Solidity versions significantly easier than existing solutions, leading overall to higher quality tools.", + "track": "Developer Experience", "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", + "expertise": "Expert", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Economic", - "security" - ], "tags": [ - "Core Protocol", - "Ethereum Roadmap", - "Home staking", - "Single-slot Finality", - "Consensus Mechanisms", - "Security", - "economy", - "Consensus Mechanisms", - "Core Protocol", - "Ethereum Roadmap", - "Home staking", - "Single-slot Finality" + "Developer Infrastructure", + "Tooling", + "Languages", + "compilers", + "Developer Infrastructure", + "Languages", + "Tooling" ], - "language": "en", - "speakers": [ - "francesco" + "keywords": [ + "Parsing", + "Compiling" ], + "duration": 1573, + "language": "en", + "sources_swarmHash": "43fe979794664aaea8f19c8d9b6da6366feea49e50b444c8a89c69179314f148", + "sources_youtubeId": "ScMhFA5Jnhk", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6736e51074749a4b8997dc40", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736e51074749a4b8997dc40.vtt", + "transcript_text": " Okay, good day. My name is Tony. I work at the NOMIC Foundation in Applied Research. I was previously a co-lead on the SLANG project and it's in this capacity that I'm going to speak to you today. I have a fantastic job working with excellent people at NOMIC. NOMIC is a non-profit and our number one core value is kindness and we live this every day. And we are hiring. There's meant to be a QR code at the end of my slides but I'm afraid it didn't make it. And I want to give heartfelt thanks to the co-founders of Nomec, Pato and Fran for creating and maintaining such an amazing place to work. So today we're going to explore the slang query API which is a new feature which makes working with solidity code surprisingly straightforward. Now this is not a tutorial so some of the details have been omitted. You can't copy and paste any of the code that I'm going to be providing. It's just so that you can, well I'm hoping to inspire you, so that you go and find out some more details. Okay, so 20 minutes is a bit of a sprint, so I'm going to speed up. Let's go. Okay, so what is Slang? So Slang's our Solidity compiler library. It's basically a developer tool that enables you to write better tools. It parses and analyzes solidity code from version 0.4.11 to the latest 0.8, which is basically all of the solidity code that is live. Our focus is on correctness and convenience. We're about to release version 1 which covers the typical front-end features of a compiler and our main objective as I said is to enable you to develop better tools. Slang has a radical open to clarity of meta architecture. Slang's an error-correcting compiler, so it always produces output, even if you've got source code errors. A key feature of Slang, which is what we're here to talk about, is its query API for code analysis. So compiler front ends fall into two main categories. There are those that produce what's called concrete syntax trees, and then produce an abstract syntax tree from that. And there are those that produce abstract syntax trees directly. So what's the difference between concrete syntax and abstract syntax? Well, concrete syntax, as you can see on this slide, is a complete representation of the source code. It includes every character, every bit of white space, every comment. It's like having, as I say, like having the full book, like a PDF. Whereas an abstract syntax tree is a simplified tree representation. So it doesn't include white space and equal signs and punctuation. It's just got the essential details. It's like if you were reading the plot of a book but you didn't actually have the text. So here's an example of a concrete syntax tree. So we've got a simple variable declaration here and every single element of the original source is represented. Nothing is removed or simplified. Now, this is different from an AST, which will eliminate some of these details. So if we look what we've got here, it's a simple variable declaration. Now, it consists of a type name, which is uint, and then some whitespace, an identifier, some whitespace punctuation, the equal sign, some whitespace, punctuation, equal sign, some whitespace, a number literal, one, and then the semicolon. Now, what's crucial to understand is that we're preserving every single character. In fact, even if there's an error in the source code, you'll find it in the concrete syntax tree. So why is this important? Well, when we're building developer tools or transforming code, we often need the complete information so we can round trip. If we were just concerned with the meaning of the code, then we'd use an abstract syntax tree. But for many tools, like formatters, linters, or refactoring tools, we often want the full syntactic details. You don't want to lose comments when you're refactoring and sometimes you want to know if you've left two blank lines and you want to preserve that. This level of detail is great but it's also challenging. It's quite difficult to process this. On the one hand we have complete information but working with detailed trees can be complex and that's the challenge that the query API solves. So from now on I'll admit trivia nodes and whitespace from the examples. They're still there but I don't need to show them. So let's look at a slightly more complex example. This definition shows how different elements nest within each other. And once again, notice how everything is preserved here. We've got a function definition, we've got the keyword function, the identifier, we've got a parameter list, which in this case is empty, but we still preserve it. And then nested in that is a block and a return statement and so on. Now, this level of detail allows us to maintain complete source fidelity. As I said, if we're going to round trip, we can track precise code locations. So if you want to do error reporting, it handles formatting preservation. And when you're building development tools this detailed structure is essential for certain class of development tools. So to emphasize this point, I know I'm being repetitive here I really want to make this point though, why do we need a concrete syntax tree? Well we really want to in a lot of tools, for example in Pretia, which is now using slang by the way, using this technology, Pretia Solidity, we want to make sure that we can preserve every single character, especially comments. For example, you might have a tool that wants to deal with comments, wants to encode validation information in a special comment format. Not like the documentation comment, but your own special comment format, so you want to preserve those comments. An AST on the other hand is focusing on the essential structure which is good for semantic processing, doesn't contain the formatting information, simplifies expressions and emits all comments in white space. So now that we understand what a CST is for, let's look at how you might traditionally process a CST. So this is the kind of code that you often see a lot of. know what it's like to traverse a tree especially a very deep and complex tree Which is what you typically get out of a compiler? And it's not pretty What we're trying to do is actually quite simple We just want to find all the variable declarations in some solidity code, but look at what we have to do So first of all we're writing a recursive function. We need recursion because we don't know how deep in the tree it is. Could be at the contract, inside a function, inside a block, anywhere. And then when we find each node we've got to manually check its kind, we've got to search through the children, We've got to handle all the edge cases, and we've got to manage the recursion stack, especially if you're threading state. And this is a simple example. In real-life code, you'd have to handle error cases, deal with unexpected node types, which is what you can get if you've got a source code error, for example. Manage your state during traversal, and if you've written a visitor pattern, you know that it's a pain in the ass to manage your state. You've got to handle parent references, and you've got to manage the position in the source code. Now, the problems with this approach are numerous. First, it's incredibly verbose. The amount of code you need to write to do even simple analysis is substantial, and the more code you write, the more you have to maintain, and the more places there are for bugs to hide. Second, it's error-prone, not only because of the volume of the code, but also it's quite tricky to write these when you're dealing with complicated trees. For example, in Solidity, variable declarations can appear in for loop initialisers. I don't know if this code would handle that. It's a question for the reader. It's hard to maintain. When the language evolves, and as you know, solidity evolves significantly in all sorts of interesting ways, like in one of the 0.5s where the exponentiation operator changed its associativity. So when the language evolves, you need to update your traversal code, and that's because the traversal code is mixed with the analysis code. So what you want to do for analysis is mixed in with the mechanics of traversal. To make changes, you're often going to break things. Finally, and more importantly, this forces you to think about what or how you want to do things rather than what you want to do. And the intent of this code, finding variable declarations, is buried in here. You could look at a big bit of traversal code and you wouldn't know what it was meant to be doing. We need something better. So we'll have a look at some specific challenges with manual traversal. So recursion management. You've got to handle deep nesting efficiently, avoiding stack overflow. You've got to make sure you don't have any infinite loops. You've got to maintain your context, state management, and you've got to know when to stop recursing. Once again, if you use a visitor pattern where you've got an external code driving your visitor, you often want to stop or you don't want to go into the children of this node. How do you do that? That means you end up with a more complicated API. State tracking is quite complex. You need to keep track of the parent node when you're walking a tree. You need to maintain scope information in the case of a compiler, build up your composite results, handle cross-node relationships, i.e. you want to link this node which you visited 30 minutes ago with this other node which you're visiting now. Now error handling just multiplies the complexity and edge cases keep appearing. For example, parenthesised expressions, nested structures, optional elements and language specific quirks of which there are plenty in Solidity. And finally you've got a big maintenance budget. Now I know I'm repeating this point but this is significant as you'll see when we get to queries, which I think is the next slide. So this is where queries come in. So let's look at how we solve some of those same problems using queries. So this is our first example. We want to find all the unit variable, all the uint variable declarations. It's as simple as saying, in variable declarations where the type name is uint and it's got an identifier, find that and return it. No traversal. No edge cases. Hardly any maintenance. No efficiency problems. So the second example shows something a bit more powerful. In this case we want to find immediately nested function declarations, i.e. a function inside another function. So here we're saying in every contract definition there are some contract members. In the contract members there's a contract member. For every function definition in there find the block and look for a statement in there that is itself a function definition, bind that to a variable called nested and return it. So why is this so powerful? Well first of all it's declarative, where it's saying what we want, not how we want to do it. It makes our code easier to understand and maintain, and when you look at this two months later it's obvious what it's doing. Whereas I'd suggest that you'd have a couple of pages of code traversing your tree if you were using manual traversal. And it wouldn't be obvious what it was actually meant to do. So it's structure aware. So the query understands Solidity's syntax structure. It knows about scope and it knows about nesting. And it also knows where type name, for example, in the first case here, appears in the tree. These patterns are composable. So we can build complex pattern matching from simple pattern matching. So if we wanted to find uint variables within these nested function definitions it would be as simple as putting that, appending that first query below the second query. But most importantly this is focusing once again on what we're trying to achieve. We can think about code patterns and structure design without getting bogged down in the implementation details. So let's have a look at some more advanced query patterns. This one isn't that advanced, but for example in an unchecked block you want to find all function calls. It's easy as that and it's obvious what it does. You could write this on one line. The second example is a bit more complicated but here we're looking for variable declarations, state variable declarations that have a type that is either a mapping or an array. And think about what this would take if you're doing manual traversal. You'd need scope tracking, type checking logic, complex conditional logic, and you'd need to carefully handle the nested structure here. So what are the use cases for this? Some use cases, because I'm hoping that people come up with far more use cases than we know. We want emergent benefits from this. Well, if you wanted to have custom coding standards, you want to enforce custom coding standards. You've got project-specific restrictions that you want to check. Or you want to do version compatibility checks, so you want to make sure that you don't use certain features of solidity. Style checking. Well, if you've got specific naming patterns you want to enforce specific structural conventions and you can have context aware rules now you might be thinking well can't I use a linter to do this well yes but this is meant to write the linter this is designed for writing linters so that's why these are the use cases. This is not an end user tool. This is a tool for developers to write tools. So you can do pattern detection. Anti-patterns, for example. Optimisation opportunities, which may not be immediately obvious if you've got a big library of things. Detecting complex structural patterns that you might want to simplify or mark. So once again, this is a kind of an ESLinter equivalent. Code transformation. Automated refactoring. Refactoring is transformations that preserve the semantics. Code modernisation. It's very easy to write patterns that then transform into more modern code. Automated modifications, i.e. things that aren't refactorings that actually change the code. And formatting. And as I said, Prettier Solidity is already using slang. One important point I want to make here is that we support WASM and we support specifically the component spec and wit. So for those who know what that means, this means that all of this technology works in the browser. It's also available as a Rust API or as TypeScript, which is our first target because most people are using TypeScript, which is our first target, because most people are using TypeScript. So, some more applications, code transfer, I've already done that. Documentation generation, it's easy to generate automatic documents by extracting function signatures, doing structure analysis, checking for particular usage patterns and documenting them, and processing comments. As I mentioned before, you might have validation information in a certain specialised comment format. Now, you can also use this technology in combination with AI tooling, for example, to produce diagrams from your code. You can use this by encoding the slang API as a rag. So what are the key benefits? Well, structure-aware queries. Your queries naturally align with the structure of the code that you're trying to match. You don't have to think about trees and nodes and traversal. Complete syntax preservation. You never lose information. Perform transformations without losing the formatting. And you can round trip from a CST back to source. Efficient pattern matching. So we can spend all the effort required to make this efficient. If you know anything about the state of the art in terms of tree pattern matching, and in fact we're not a search engine, we are a unifier. So we use a prologue slash datalog based mechanism which returns you all the potential matching results, not just the first one. You can avoid unnecessary traversals. We can cache results. We can index the syntax tree. Make it very efficient. It would take a lot of effort to get the same result if you were doing it yourself. Composable syntax rules, you can combine simple rules to make bigger ones, you can have a big library of these patterns and you can reuse them over your code, share queries between your projects. You've got maintainable analysis code. So the queries express your intent, not how you want to do it, but what you want to do. Your code is shorter, it's easier to understand. Any changes to the language, we take care of that. Slang takes care of that, so you don't have to. Less code means fewer bugs. And these benefits compound. It's not one plus one plus one. It's the whole is far greater than the sum of the parts. So what impact does this have on your development? Well, before using this query API, you'd spend significant time writing and debugging the traversal code. You'd struggle with maintaining the analysis logic, and you'd face challenges when adding new features. After it, you end up with clear, focused code. It's easy to maintain. You've got a robust implementation, and it's highly extensible. And that is it. Any questions? Fantastic. Thank you, Anthony. All right, let's get with the questions. So, folks, remember, you can scan that QR code, add questions to that list, smash that upvote button so that the most interesting question gets asked. Let's go with the first one. What are our advantages to using slang compared to Semgrep? Well, I must admit I'm not familiar with Semgrep, but I know the general concept. The specific thing here that this is a programmatic tool that you use to build other tools. Now, of course, you can combine things like Semgrep, I imagine, but I don't know if that is something that you would include in a tool for analysing lots of different versions of solidity. That makes sense. Thank you. How does slang differ from a fine-tuned LLM? Could slang's modularity allow to use LLMs in the future? Oh, this is... I'm in applied research, and this is something that is very actively under research. I'm not making any commitment, but if you've used fine-tuned LLMs, and I've used them a lot, for example, a lot of the code that I write is actually written by Claude. A lot of this presentation was written by Claude. Amazing tool, but you always need a human in the loop. a lot of this presentation was written by Claude. Amazing tool, but you always need a human in the loop. So, yes, you can do this using LLMs, but the thing with LLMs is that they are a human augmentation tool, whereas the approach that we're talking about here is exact and precise. If you've used LLMs to do this, you know that you spend a lot of time on prompt engineering, and then you cross your fingers, and something comes back, and it doesn't quite work, and you prompt it again, and so on and so forth. So, yes, this is certainly something that could be included in the future. There's a chance, but maybe not now. I mean, not immediately. Certainly not now. Okay. Are the limitations of a tree visitor-based approach not solved by using an API with CFG and a terminated representation like Slither provides? Well, if by CFG, well, you're either meaning two things. You're either meaning the control flow graph, which is typically something you'd find in an abstract syntax tree, or if you mean control flow as exposed by rust for example and this is something where you reify your control flow. So you return a token in your visitor which says do you want to continue, do you want to go down the trees. Now this tree visitor, that are, these solution, these slither may well solve the same kind of problem. We have a different set of constraints. For example, our query language is intended to be extended with semantic predicates and with the ability to do arbitrary recursion by skipping over arbitrary subtrees. So there's a lot that we want to do there. So I wouldn't say that in Slither you can or cannot do that. I'm not familiar enough with Slither. Perfect, thank you. Is there a question you'd like Moose in there? Well will this work with different EVM versions? Yes, it will, because we're dealing with source. We're well before EVM. Slang is a compiler, and it'll produce bytecode. So that's a question about the compiler, not about querying. Have we published the grammar for slang? Yes, we have. Slang is actually a declarative meta project. So you can use it for any programming language, not just Solidity. So we have a very good open source, we have a very good declarative meta project, so you can use it for any programming language, not just solidity. We have published that. Everything we do is open source. Nomic, foundation on GitHub, all of our development is in the open. You can reuse all of this. Awesome. How important is it to maintain code with different Solidity versions? Why not just use one version? Because if you want to be able to provide tools that analyse a large number of contracts, which may be an earlier version of Solidity than the latest one, for example, 0411. There's a contract on Mainnet that is 0411. You want to get the source to that. You want to provide analysis tools for it. That's why we want to support all of these versions. Now we don't go back to 001 or whatever the first version was. So we're pragmatic about it. But these are the versions that are live, and our goal is to support all the versions that are live. How do you determine the cut-off version? What's pragmatic? Analysis of Mainnet and all the contracts that are live. Okay, is it the number of contracts? Is it the value stored in those? Is it the amount of transactions? No, the earliest. I didn't do this analysis, so I must admit I don't know what process we used to determine that, but it is the case that the earliest contract in use, apparently, is 0411. Fantastic, thank you. What's the best practice for using slang to analyze a large number of programs? Where should the data be stored and in what format? Imagine this is something you run locally, correct? Yes, it is. Perfect. Thank you. All right. What update in Solidity was the hardest to adapt for? You mentioned quirks and weird updates in Solidity. Which one was the hardest? None of them was particularly hard. The volume of quirks is what is a challenge. I mean, we've got hundreds of edge cases. We had to analyse Sol-C because that's the only definition of the language and go through the code and then test it and then go to Sanctuary and look at massive numbers of contracts, run our compiler over them or run our parser over them. What breaks? What doesn't? We've got extensive test cases. So that was the challenge. It's the number, not any particular difficulty. Fantastic. Thank you. Do you dream in bytecode? No, I don't dream in... I dream compilers, actually. All right. Anthony, thank you a lot for your time and all the best to you. Thank you a lot for your time and all the best to you. Thank you. People, our next session will start in a few minutes. I am off for today. Thank you for those who spent a bit of time with me this morning. And I'll see you soon.", "eventId": "devcon-7", - "slot_start": 1731573600000, - "slot_end": 1731575400000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1198JUW8nHiS-gIHBkbDTKrorHlxq2jJXKTiMaVCMvcI" + "slot_start": 1731648600000, + "slot_end": 1731650400000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1y7kvxWFxGZ-TBTEld48n6Dz0MGYoIGHria1lhFAdTZo", + "resources_slides": "https://drive.google.com/file/d/1VukfZOfvmTCPi9Zgla8IjhhjDFLyuxPp/view", + "speakers": [ + "antony-blakey" + ] }, "vector": [ 0, 0, 0, - 0, 6, 0, 0, @@ -676885,7 +674966,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -676899,6 +674979,8 @@ 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -677055,7 +675137,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -677107,6 +675188,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -677132,7 +675214,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -677141,8 +675222,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -677152,6 +675231,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -677247,9 +675327,7 @@ 0, 0, 0, - 2, 0, - 2, 0, 0, 0, @@ -677284,6 +675362,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -677439,7 +675518,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -677618,12 +675696,11 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 2, + 2, + 0, 0, 0, 0, @@ -677640,58 +675717,53 @@ }, { "session": { - "id": "slangs-query-api-a-better-way-to-analyse-solidity-code", - "sourceId": "8PYLB7", - "title": "Slang’s Query API: a better way to analyse Solidity code", - "description": "Slang is Nomic Foundation’s modular set of Solidity compiler APIs. This presentation will review Slang’s query engine approach to analysing Solidity code, and explain why it makes building tools that support multiple Solidity versions significantly easier than existing solutions, leading overall to higher quality tools.", - "track": "Developer Experience", + "id": "smart-accounts-need-smart-sessions", + "sourceId": "SJDY99", + "title": "Smart Accounts need Smart Sessions", + "description": "The world of dapps is evolving and wallets are becoming smarter. This is powered by developments in Smart Accounts which unlock more user-friendly experiences. Learn about how WalletConnect is introducing Smart Sessions and walkthrough all the standards (EIPs, ERCs and CAIPs) that will make the future of wallet UX possible.", + "track": "Usability", "type": "Talk", - "expertise": "Expert", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Developer Infrastructure", - "Tooling", - "Languages", - "compilers", - "Developer Infrastructure", - "Languages", - "Tooling" + "interoperability" ], "keywords": [ - "Parsing", - "Compiling" + "standards", + "wallets", + "interoperability" ], - "duration": 1573, + "duration": 1802, "language": "en", - "sources_swarmHash": "43fe979794664aaea8f19c8d9b6da6366feea49e50b444c8a89c69179314f148", - "sources_youtubeId": "ScMhFA5Jnhk", + "sources_swarmHash": "06f9344ba6e1d54564b078134d5ad55ec3e142a2bb173b240d8df7aa64772788", + "sources_youtubeId": "GeYbDsOW4hQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736e51074749a4b8997dc40", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736e51074749a4b8997dc40.vtt", - "transcript_text": " Okay, good day. My name is Tony. I work at the NOMIC Foundation in Applied Research. I was previously a co-lead on the SLANG project and it's in this capacity that I'm going to speak to you today. I have a fantastic job working with excellent people at NOMIC. NOMIC is a non-profit and our number one core value is kindness and we live this every day. And we are hiring. There's meant to be a QR code at the end of my slides but I'm afraid it didn't make it. And I want to give heartfelt thanks to the co-founders of Nomec, Pato and Fran for creating and maintaining such an amazing place to work. So today we're going to explore the slang query API which is a new feature which makes working with solidity code surprisingly straightforward. Now this is not a tutorial so some of the details have been omitted. You can't copy and paste any of the code that I'm going to be providing. It's just so that you can, well I'm hoping to inspire you, so that you go and find out some more details. Okay, so 20 minutes is a bit of a sprint, so I'm going to speed up. Let's go. Okay, so what is Slang? So Slang's our Solidity compiler library. It's basically a developer tool that enables you to write better tools. It parses and analyzes solidity code from version 0.4.11 to the latest 0.8, which is basically all of the solidity code that is live. Our focus is on correctness and convenience. We're about to release version 1 which covers the typical front-end features of a compiler and our main objective as I said is to enable you to develop better tools. Slang has a radical open to clarity of meta architecture. Slang's an error-correcting compiler, so it always produces output, even if you've got source code errors. A key feature of Slang, which is what we're here to talk about, is its query API for code analysis. So compiler front ends fall into two main categories. There are those that produce what's called concrete syntax trees, and then produce an abstract syntax tree from that. And there are those that produce abstract syntax trees directly. So what's the difference between concrete syntax and abstract syntax? Well, concrete syntax, as you can see on this slide, is a complete representation of the source code. It includes every character, every bit of white space, every comment. It's like having, as I say, like having the full book, like a PDF. Whereas an abstract syntax tree is a simplified tree representation. So it doesn't include white space and equal signs and punctuation. It's just got the essential details. It's like if you were reading the plot of a book but you didn't actually have the text. So here's an example of a concrete syntax tree. So we've got a simple variable declaration here and every single element of the original source is represented. Nothing is removed or simplified. Now, this is different from an AST, which will eliminate some of these details. So if we look what we've got here, it's a simple variable declaration. Now, it consists of a type name, which is uint, and then some whitespace, an identifier, some whitespace punctuation, the equal sign, some whitespace, punctuation, equal sign, some whitespace, a number literal, one, and then the semicolon. Now, what's crucial to understand is that we're preserving every single character. In fact, even if there's an error in the source code, you'll find it in the concrete syntax tree. So why is this important? Well, when we're building developer tools or transforming code, we often need the complete information so we can round trip. If we were just concerned with the meaning of the code, then we'd use an abstract syntax tree. But for many tools, like formatters, linters, or refactoring tools, we often want the full syntactic details. You don't want to lose comments when you're refactoring and sometimes you want to know if you've left two blank lines and you want to preserve that. This level of detail is great but it's also challenging. It's quite difficult to process this. On the one hand we have complete information but working with detailed trees can be complex and that's the challenge that the query API solves. So from now on I'll admit trivia nodes and whitespace from the examples. They're still there but I don't need to show them. So let's look at a slightly more complex example. This definition shows how different elements nest within each other. And once again, notice how everything is preserved here. We've got a function definition, we've got the keyword function, the identifier, we've got a parameter list, which in this case is empty, but we still preserve it. And then nested in that is a block and a return statement and so on. Now, this level of detail allows us to maintain complete source fidelity. As I said, if we're going to round trip, we can track precise code locations. So if you want to do error reporting, it handles formatting preservation. And when you're building development tools this detailed structure is essential for certain class of development tools. So to emphasize this point, I know I'm being repetitive here I really want to make this point though, why do we need a concrete syntax tree? Well we really want to in a lot of tools, for example in Pretia, which is now using slang by the way, using this technology, Pretia Solidity, we want to make sure that we can preserve every single character, especially comments. For example, you might have a tool that wants to deal with comments, wants to encode validation information in a special comment format. Not like the documentation comment, but your own special comment format, so you want to preserve those comments. An AST on the other hand is focusing on the essential structure which is good for semantic processing, doesn't contain the formatting information, simplifies expressions and emits all comments in white space. So now that we understand what a CST is for, let's look at how you might traditionally process a CST. So this is the kind of code that you often see a lot of. know what it's like to traverse a tree especially a very deep and complex tree Which is what you typically get out of a compiler? And it's not pretty What we're trying to do is actually quite simple We just want to find all the variable declarations in some solidity code, but look at what we have to do So first of all we're writing a recursive function. We need recursion because we don't know how deep in the tree it is. Could be at the contract, inside a function, inside a block, anywhere. And then when we find each node we've got to manually check its kind, we've got to search through the children, We've got to handle all the edge cases, and we've got to manage the recursion stack, especially if you're threading state. And this is a simple example. In real-life code, you'd have to handle error cases, deal with unexpected node types, which is what you can get if you've got a source code error, for example. Manage your state during traversal, and if you've written a visitor pattern, you know that it's a pain in the ass to manage your state. You've got to handle parent references, and you've got to manage the position in the source code. Now, the problems with this approach are numerous. First, it's incredibly verbose. The amount of code you need to write to do even simple analysis is substantial, and the more code you write, the more you have to maintain, and the more places there are for bugs to hide. Second, it's error-prone, not only because of the volume of the code, but also it's quite tricky to write these when you're dealing with complicated trees. For example, in Solidity, variable declarations can appear in for loop initialisers. I don't know if this code would handle that. It's a question for the reader. It's hard to maintain. When the language evolves, and as you know, solidity evolves significantly in all sorts of interesting ways, like in one of the 0.5s where the exponentiation operator changed its associativity. So when the language evolves, you need to update your traversal code, and that's because the traversal code is mixed with the analysis code. So what you want to do for analysis is mixed in with the mechanics of traversal. To make changes, you're often going to break things. Finally, and more importantly, this forces you to think about what or how you want to do things rather than what you want to do. And the intent of this code, finding variable declarations, is buried in here. You could look at a big bit of traversal code and you wouldn't know what it was meant to be doing. We need something better. So we'll have a look at some specific challenges with manual traversal. So recursion management. You've got to handle deep nesting efficiently, avoiding stack overflow. You've got to make sure you don't have any infinite loops. You've got to maintain your context, state management, and you've got to know when to stop recursing. Once again, if you use a visitor pattern where you've got an external code driving your visitor, you often want to stop or you don't want to go into the children of this node. How do you do that? That means you end up with a more complicated API. State tracking is quite complex. You need to keep track of the parent node when you're walking a tree. You need to maintain scope information in the case of a compiler, build up your composite results, handle cross-node relationships, i.e. you want to link this node which you visited 30 minutes ago with this other node which you're visiting now. Now error handling just multiplies the complexity and edge cases keep appearing. For example, parenthesised expressions, nested structures, optional elements and language specific quirks of which there are plenty in Solidity. And finally you've got a big maintenance budget. Now I know I'm repeating this point but this is significant as you'll see when we get to queries, which I think is the next slide. So this is where queries come in. So let's look at how we solve some of those same problems using queries. So this is our first example. We want to find all the unit variable, all the uint variable declarations. It's as simple as saying, in variable declarations where the type name is uint and it's got an identifier, find that and return it. No traversal. No edge cases. Hardly any maintenance. No efficiency problems. So the second example shows something a bit more powerful. In this case we want to find immediately nested function declarations, i.e. a function inside another function. So here we're saying in every contract definition there are some contract members. In the contract members there's a contract member. For every function definition in there find the block and look for a statement in there that is itself a function definition, bind that to a variable called nested and return it. So why is this so powerful? Well first of all it's declarative, where it's saying what we want, not how we want to do it. It makes our code easier to understand and maintain, and when you look at this two months later it's obvious what it's doing. Whereas I'd suggest that you'd have a couple of pages of code traversing your tree if you were using manual traversal. And it wouldn't be obvious what it was actually meant to do. So it's structure aware. So the query understands Solidity's syntax structure. It knows about scope and it knows about nesting. And it also knows where type name, for example, in the first case here, appears in the tree. These patterns are composable. So we can build complex pattern matching from simple pattern matching. So if we wanted to find uint variables within these nested function definitions it would be as simple as putting that, appending that first query below the second query. But most importantly this is focusing once again on what we're trying to achieve. We can think about code patterns and structure design without getting bogged down in the implementation details. So let's have a look at some more advanced query patterns. This one isn't that advanced, but for example in an unchecked block you want to find all function calls. It's easy as that and it's obvious what it does. You could write this on one line. The second example is a bit more complicated but here we're looking for variable declarations, state variable declarations that have a type that is either a mapping or an array. And think about what this would take if you're doing manual traversal. You'd need scope tracking, type checking logic, complex conditional logic, and you'd need to carefully handle the nested structure here. So what are the use cases for this? Some use cases, because I'm hoping that people come up with far more use cases than we know. We want emergent benefits from this. Well, if you wanted to have custom coding standards, you want to enforce custom coding standards. You've got project-specific restrictions that you want to check. Or you want to do version compatibility checks, so you want to make sure that you don't use certain features of solidity. Style checking. Well, if you've got specific naming patterns you want to enforce specific structural conventions and you can have context aware rules now you might be thinking well can't I use a linter to do this well yes but this is meant to write the linter this is designed for writing linters so that's why these are the use cases. This is not an end user tool. This is a tool for developers to write tools. So you can do pattern detection. Anti-patterns, for example. Optimisation opportunities, which may not be immediately obvious if you've got a big library of things. Detecting complex structural patterns that you might want to simplify or mark. So once again, this is a kind of an ESLinter equivalent. Code transformation. Automated refactoring. Refactoring is transformations that preserve the semantics. Code modernisation. It's very easy to write patterns that then transform into more modern code. Automated modifications, i.e. things that aren't refactorings that actually change the code. And formatting. And as I said, Prettier Solidity is already using slang. One important point I want to make here is that we support WASM and we support specifically the component spec and wit. So for those who know what that means, this means that all of this technology works in the browser. It's also available as a Rust API or as TypeScript, which is our first target because most people are using TypeScript, which is our first target, because most people are using TypeScript. So, some more applications, code transfer, I've already done that. Documentation generation, it's easy to generate automatic documents by extracting function signatures, doing structure analysis, checking for particular usage patterns and documenting them, and processing comments. As I mentioned before, you might have validation information in a certain specialised comment format. Now, you can also use this technology in combination with AI tooling, for example, to produce diagrams from your code. You can use this by encoding the slang API as a rag. So what are the key benefits? Well, structure-aware queries. Your queries naturally align with the structure of the code that you're trying to match. You don't have to think about trees and nodes and traversal. Complete syntax preservation. You never lose information. Perform transformations without losing the formatting. And you can round trip from a CST back to source. Efficient pattern matching. So we can spend all the effort required to make this efficient. If you know anything about the state of the art in terms of tree pattern matching, and in fact we're not a search engine, we are a unifier. So we use a prologue slash datalog based mechanism which returns you all the potential matching results, not just the first one. You can avoid unnecessary traversals. We can cache results. We can index the syntax tree. Make it very efficient. It would take a lot of effort to get the same result if you were doing it yourself. Composable syntax rules, you can combine simple rules to make bigger ones, you can have a big library of these patterns and you can reuse them over your code, share queries between your projects. You've got maintainable analysis code. So the queries express your intent, not how you want to do it, but what you want to do. Your code is shorter, it's easier to understand. Any changes to the language, we take care of that. Slang takes care of that, so you don't have to. Less code means fewer bugs. And these benefits compound. It's not one plus one plus one. It's the whole is far greater than the sum of the parts. So what impact does this have on your development? Well, before using this query API, you'd spend significant time writing and debugging the traversal code. You'd struggle with maintaining the analysis logic, and you'd face challenges when adding new features. After it, you end up with clear, focused code. It's easy to maintain. You've got a robust implementation, and it's highly extensible. And that is it. Any questions? Fantastic. Thank you, Anthony. All right, let's get with the questions. So, folks, remember, you can scan that QR code, add questions to that list, smash that upvote button so that the most interesting question gets asked. Let's go with the first one. What are our advantages to using slang compared to Semgrep? Well, I must admit I'm not familiar with Semgrep, but I know the general concept. The specific thing here that this is a programmatic tool that you use to build other tools. Now, of course, you can combine things like Semgrep, I imagine, but I don't know if that is something that you would include in a tool for analysing lots of different versions of solidity. That makes sense. Thank you. How does slang differ from a fine-tuned LLM? Could slang's modularity allow to use LLMs in the future? Oh, this is... I'm in applied research, and this is something that is very actively under research. I'm not making any commitment, but if you've used fine-tuned LLMs, and I've used them a lot, for example, a lot of the code that I write is actually written by Claude. A lot of this presentation was written by Claude. Amazing tool, but you always need a human in the loop. a lot of this presentation was written by Claude. Amazing tool, but you always need a human in the loop. So, yes, you can do this using LLMs, but the thing with LLMs is that they are a human augmentation tool, whereas the approach that we're talking about here is exact and precise. If you've used LLMs to do this, you know that you spend a lot of time on prompt engineering, and then you cross your fingers, and something comes back, and it doesn't quite work, and you prompt it again, and so on and so forth. So, yes, this is certainly something that could be included in the future. There's a chance, but maybe not now. I mean, not immediately. Certainly not now. Okay. Are the limitations of a tree visitor-based approach not solved by using an API with CFG and a terminated representation like Slither provides? Well, if by CFG, well, you're either meaning two things. You're either meaning the control flow graph, which is typically something you'd find in an abstract syntax tree, or if you mean control flow as exposed by rust for example and this is something where you reify your control flow. So you return a token in your visitor which says do you want to continue, do you want to go down the trees. Now this tree visitor, that are, these solution, these slither may well solve the same kind of problem. We have a different set of constraints. For example, our query language is intended to be extended with semantic predicates and with the ability to do arbitrary recursion by skipping over arbitrary subtrees. So there's a lot that we want to do there. So I wouldn't say that in Slither you can or cannot do that. I'm not familiar enough with Slither. Perfect, thank you. Is there a question you'd like Moose in there? Well will this work with different EVM versions? Yes, it will, because we're dealing with source. We're well before EVM. Slang is a compiler, and it'll produce bytecode. So that's a question about the compiler, not about querying. Have we published the grammar for slang? Yes, we have. Slang is actually a declarative meta project. So you can use it for any programming language, not just Solidity. So we have a very good open source, we have a very good declarative meta project, so you can use it for any programming language, not just solidity. We have published that. Everything we do is open source. Nomic, foundation on GitHub, all of our development is in the open. You can reuse all of this. Awesome. How important is it to maintain code with different Solidity versions? Why not just use one version? Because if you want to be able to provide tools that analyse a large number of contracts, which may be an earlier version of Solidity than the latest one, for example, 0411. There's a contract on Mainnet that is 0411. You want to get the source to that. You want to provide analysis tools for it. That's why we want to support all of these versions. Now we don't go back to 001 or whatever the first version was. So we're pragmatic about it. But these are the versions that are live, and our goal is to support all the versions that are live. How do you determine the cut-off version? What's pragmatic? Analysis of Mainnet and all the contracts that are live. Okay, is it the number of contracts? Is it the value stored in those? Is it the amount of transactions? No, the earliest. I didn't do this analysis, so I must admit I don't know what process we used to determine that, but it is the case that the earliest contract in use, apparently, is 0411. Fantastic, thank you. What's the best practice for using slang to analyze a large number of programs? Where should the data be stored and in what format? Imagine this is something you run locally, correct? Yes, it is. Perfect. Thank you. All right. What update in Solidity was the hardest to adapt for? You mentioned quirks and weird updates in Solidity. Which one was the hardest? None of them was particularly hard. The volume of quirks is what is a challenge. I mean, we've got hundreds of edge cases. We had to analyse Sol-C because that's the only definition of the language and go through the code and then test it and then go to Sanctuary and look at massive numbers of contracts, run our compiler over them or run our parser over them. What breaks? What doesn't? We've got extensive test cases. So that was the challenge. It's the number, not any particular difficulty. Fantastic. Thank you. Do you dream in bytecode? No, I don't dream in... I dream compilers, actually. All right. Anthony, thank you a lot for your time and all the best to you. Thank you a lot for your time and all the best to you. Thank you. People, our next session will start in a few minutes. I am off for today. Thank you for those who spent a bit of time with me this morning. And I'll see you soon.", + "sources_streamethId": "67356fb59dbb7a90e189a824", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673572269dbb7a90e19a37f9.vtt", + "transcript_text": " In numbers, the total size of the game is about a total of 150 bytes. That's the entire state of the game. The front end is built in React and Phaser. The source code for the back end is about 500 or so lines of C++, not including boilerplate. That might be something like half of that. And it took about a month for a team of devs and collaborators from Gauss Labs and Xerox PARC and PSE to build this together. So why am I showing you this? Why am I showing you this incredibly simple game that a kid could have probably built as their first programming project? Well, essentially, the interesting thing about this game is that the back end of the game is running entirely inside of fully homomorphic encryption. And to our knowledge, this is the first time a multi-user application with a back end that is running inside of FHE has actually been built. So that means that all of the state of this game is encrypted using FHE, and all player actions and everything happening inside of the game is encrypted using FHE, and all player actions and everything happening inside of the game is happening inside of FHE. Thank you. So one question you might ask is why bother running this inside of FHE, right? FHE is super expensive. Well, one thread that we've been pulling at in the last year is this idea that technologies like FHE can enable us to run what we think about as hallucinated servers. So let me describe what I mean by that. Today, if a group of us wanted to come together and to build some sort of application that all of us might use, like let's say a social network for DevCon attendees, the way we would do that today is that we would have someone rent out an AWS server, write some sort of back-end code, and then deploy that back-end code to the web server, and then each of us would, using our computer or using our browser or client or whatever else, talk to this back-end server, making API requests to update and retrieve the state of the application. making API requests to update and retrieve the state of the application. Now using technologies like programmable cryptography, another way we can imagine doing this sort of thing opens up. So we can imagine in a world with programmable cryptography that rather than there existing a specific physical server with a physical footprint that's running all of the computations of the application, instead every participant of the application might store something like a cryptographic shard of the overall state. And using technologies like multi-party computation or fully homomorphic encryption, we could cryptographically simulate the execution of this virtual machine using things like ZK-proofs or FHE to advance the state of this arbitrary machine using things like zk proofs or fhe to advance the state of This arbitrary computation one step at a time ensuring its consistency ensuring that everybody is only you know Having access to the data that they're supposed to be able to While doing so in a in a decentralized multi-party way without needing to rely on a physical server anywhere that actually is the source of truth for the system. So, you know, this sort of opens up a lot of interesting questions. What if our digital services ran as these distributed hallucinated computations between just the relevant parties? We could imagine having this abstraction for a server where instead of, you know, servers run by Zuck, we have a server made of math that's perfectly secure, privacy preserving, verifiable, interoperable with every other service built in this way, etc. Of course, right now we're very early on our journey. So in terms of the game in numbers, in order to run this extraordinarily simple game, 150 bytes of state with four frogs on a 32 by 32 grid, we are using nine machines to coordinate a variety of different MPCs together. So we have four MacBooks downstairs, and we also have five 192 core AWS machines in the cloud, costing us about $200 an hour to run this game. Every binary gate involved in the execution of any operation takes about 10 milliseconds to evaluate, which is about a 1 billion times overhead on top of ordinary computation. And for every bit of plain text state in this game, this bit will blow up to about 3,000 bits. Actually, I think this might be 3,000 bytes of ciphertext. I need to check on that, but it's a huge overhead. So the way that I think about what's going on is it's sort of like we've built almost this particle accelerator and spent enormous amount of resources just so that we can suspend in the middle of the cryptographic ether for a brief instant something that looks like the Higgs boson, and we can sort of hold that", "eventId": "devcon-7", - "slot_start": 1731648600000, - "slot_end": 1731650400000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1y7kvxWFxGZ-TBTEld48n6Dz0MGYoIGHria1lhFAdTZo", - "resources_slides": null, + "slot_start": 1731552300000, + "slot_end": 1731554100000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1Xn-t83UrHqZiD2z9Y1uuRL-w6SCGvLF-dX6-cK0TwYM", + "resources_slides": "https://drive.google.com/file/d/1TDKtJEBB4kjXWteZ20IloEGdAG-cuZEE/view", "speakers": [ - "antony-blakey" + "pedro-gomes" ] }, "vector": [ 0, 0, 0, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -677870,6 +675942,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -678277,7 +676350,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -678455,7 +676527,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -678488,7 +676559,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -678531,8 +676601,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -678662,8 +676730,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -678825,6 +676891,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -678997,11 +677064,11 @@ 0, 0, 0, + 2, 0, 0, 0, 2, - 2, 0, 0, 0, @@ -679019,40 +677086,48 @@ }, { "session": { - "id": "small-brain-games-mud-day-demo", - "sourceId": "9ZBKKS", - "title": "Small Brain Games - MUD Day Demo", - "description": "This is a project demo for MUD Day CLS: onchain games and non-financial applications. \r\n\r\nFor the past 1.5 years, I've been building fully onchain games–games where the entire state is onchain for some reason (have launched 7!). In this demo, I will showcase some of these games that I have built.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "smart-contracts-with-privacy-case-study-buying-renewable-power", + "sourceId": "F9PWUP", + "title": "Smart Contracts with Privacy - Case Study - Buying Renewable Power", + "description": "Getting the world’s industries to switch to renewable power is immensely important for our planet’s future, but renewable power purchasing agreements turn out to be complicated to manage and administer. Buyers and sellers must interact indirectly through the electricity market and agreements contain complex rules. Keeping track of these is complicated and expensive - UNLESS you have a blockchain-based smart contract. This is how we did it, using ZK for privacy, on chain!", + "track": "Real World Ethereum", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Business", "featured": false, "doNotRecord": false, - "keywords": [], "tags": [ - "Gaming", - "Autonomous World", - "Autonomous World", - "Gaming" + "Privacy", + "Zero-Knowledge", + "Use Cases", + "enterprise", + "Privacy", + "Use Cases", + "Zero-Knowledge" ], - "language": "en", - "speakers": [ - "small-brain" + "keywords": [ + "Enterprise" ], + "duration": 1440, + "language": "en", + "sources_swarmHash": "3d1b5977c282fddf02aa30d70116459a362882bb2c1df1029d7cba048a07cf9b", + "sources_youtubeId": "mFX6m60ceIY", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67349b569dbb7a90e11ec407", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731557700000, - "slot_end": 1731558000000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1rEXXVcN2oqvYGgP1WxdgoBQUTVgnEnjAZjAEYHOPJv8" + "slot_start": 1731493800000, + "slot_end": 1731495600000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1iPCFSCb5vpiqtzwoYxszBwbVcjQ5iI86jv7FH1Uo3E8", + "resources_slides": "https://drive.google.com/file/d/1Y_rxgbY335V1M40cjFY71QkPdI8XdPf0/view", + "speakers": [ + "paul-brody" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -679189,36 +677264,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -679677,6 +677722,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -679842,6 +677888,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -679909,37 +677956,6 @@ 0, 0, 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -679976,6 +677992,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -680336,6 +678353,63 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -680369,6 +678443,7 @@ 0, 0, 0, + 0, 2, 0, 0, @@ -680378,46 +678453,54 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "smart-accounts-need-smart-sessions", - "sourceId": "SJDY99", - "title": "Smart Accounts need Smart Sessions", - "description": "The world of dapps is evolving and wallets are becoming smarter. This is powered by developments in Smart Accounts which unlock more user-friendly experiences. Learn about how WalletConnect is introducing Smart Sessions and walkthrough all the standards (EIPs, ERCs and CAIPs) that will make the future of wallet UX possible.", - "track": "Usability", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "solarpunk-vs-lunarpunk-the-evolution-and-integration-of-these-movements", + "sourceId": "SFY3FB", + "title": "Solarpunk vs. Lunarpunk: The Evolution and Integration of these Movements", + "description": "In this talk, I will explore how the ideals of solarpunk and lunarpunk can be integrated to address privacy, inclusivity, and justice. We will explain how combining the strengths of both movements we can potentially create a cohesive vision for a sustainable, equitable, and free future.", + "track": "Cypherpunk & Privacy", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "interoperability" + "Coordination", + "Anonymity", + "Solarpunk", + "Ethereum for Good", + "Social", + "culture", + "Anonymity", + "Coordination", + "Ethereum for Good", + "Social", + "Solarpunk" ], "keywords": [ - "standards", - "wallets", - "interoperability" + "Lunarpunk", + "Culture" ], - "duration": 1802, + "duration": 567, "language": "en", - "sources_swarmHash": "06f9344ba6e1d54564b078134d5ad55ec3e142a2bb173b240d8df7aa64772788", - "sources_youtubeId": "GeYbDsOW4hQ", + "sources_swarmHash": "ec3e42c5c5bdbed8c1e3b858f53d8832afe881cea558c321fb3a1c657e542700", + "sources_youtubeId": "2SYWYVJonuk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67356fb59dbb7a90e189a824", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673572269dbb7a90e19a37f9.vtt", - "transcript_text": " In numbers, the total size of the game is about a total of 150 bytes. That's the entire state of the game. The front end is built in React and Phaser. The source code for the back end is about 500 or so lines of C++, not including boilerplate. That might be something like half of that. And it took about a month for a team of devs and collaborators from Gauss Labs and Xerox PARC and PSE to build this together. So why am I showing you this? Why am I showing you this incredibly simple game that a kid could have probably built as their first programming project? Well, essentially, the interesting thing about this game is that the back end of the game is running entirely inside of fully homomorphic encryption. And to our knowledge, this is the first time a multi-user application with a back end that is running inside of FHE has actually been built. So that means that all of the state of this game is encrypted using FHE, and all player actions and everything happening inside of the game is encrypted using FHE, and all player actions and everything happening inside of the game is happening inside of FHE. Thank you. So one question you might ask is why bother running this inside of FHE, right? FHE is super expensive. Well, one thread that we've been pulling at in the last year is this idea that technologies like FHE can enable us to run what we think about as hallucinated servers. So let me describe what I mean by that. Today, if a group of us wanted to come together and to build some sort of application that all of us might use, like let's say a social network for DevCon attendees, the way we would do that today is that we would have someone rent out an AWS server, write some sort of back-end code, and then deploy that back-end code to the web server, and then each of us would, using our computer or using our browser or client or whatever else, talk to this back-end server, making API requests to update and retrieve the state of the application. making API requests to update and retrieve the state of the application. Now using technologies like programmable cryptography, another way we can imagine doing this sort of thing opens up. So we can imagine in a world with programmable cryptography that rather than there existing a specific physical server with a physical footprint that's running all of the computations of the application, instead every participant of the application might store something like a cryptographic shard of the overall state. And using technologies like multi-party computation or fully homomorphic encryption, we could cryptographically simulate the execution of this virtual machine using things like ZK-proofs or FHE to advance the state of this arbitrary machine using things like zk proofs or fhe to advance the state of This arbitrary computation one step at a time ensuring its consistency ensuring that everybody is only you know Having access to the data that they're supposed to be able to While doing so in a in a decentralized multi-party way without needing to rely on a physical server anywhere that actually is the source of truth for the system. So, you know, this sort of opens up a lot of interesting questions. What if our digital services ran as these distributed hallucinated computations between just the relevant parties? We could imagine having this abstraction for a server where instead of, you know, servers run by Zuck, we have a server made of math that's perfectly secure, privacy preserving, verifiable, interoperable with every other service built in this way, etc. Of course, right now we're very early on our journey. So in terms of the game in numbers, in order to run this extraordinarily simple game, 150 bytes of state with four frogs on a 32 by 32 grid, we are using nine machines to coordinate a variety of different MPCs together. So we have four MacBooks downstairs, and we also have five 192 core AWS machines in the cloud, costing us about $200 an hour to run this game. Every binary gate involved in the execution of any operation takes about 10 milliseconds to evaluate, which is about a 1 billion times overhead on top of ordinary computation. And for every bit of plain text state in this game, this bit will blow up to about 3,000 bits. Actually, I think this might be 3,000 bytes of ciphertext. I need to check on that, but it's a huge overhead. So the way that I think about what's going on is it's sort of like we've built almost this particle accelerator and spent enormous amount of resources just so that we can suspend in the middle of the cryptographic ether for a brief instant something that looks like the Higgs boson, and we can sort of hold that", + "sources_streamethId": "6734a1589dbb7a90e1486e16", "eventId": "devcon-7", - "slot_start": 1731552300000, - "slot_end": 1731554100000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1Xn-t83UrHqZiD2z9Y1uuRL-w6SCGvLF-dX6-cK0TwYM", - "resources_slides": null, + "slot_start": 1731496800000, + "slot_end": 1731497400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1Zg48147sw4ud8uPsdsYKyuXSSdSVDoJZ0LSxumOJZ4o", + "resources_slides": "https://drive.google.com/file/d/1s9s8u7UDCQiJ9PxVxcUCmqjI_7EbqWPF/view", "speakers": [ - "pedro-gomes" + "manualzuru" ] }, "vector": [ @@ -680426,9 +678509,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -680610,12 +678690,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -681025,6 +679099,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -681203,6 +679278,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -681301,6 +679377,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -681329,6 +679407,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -681353,6 +679432,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -681407,6 +679487,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -681561,7 +679642,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -681739,12 +679819,10 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, + 2, 0, 0, 0, @@ -681757,51 +679835,46 @@ }, { "session": { - "id": "smart-contracts-with-privacy-case-study-buying-renewable-power", - "sourceId": "F9PWUP", - "title": "Smart Contracts with Privacy - Case Study - Buying Renewable Power", - "description": "Getting the world’s industries to switch to renewable power is immensely important for our planet’s future, but renewable power purchasing agreements turn out to be complicated to manage and administer. Buyers and sellers must interact indirectly through the electricity market and agreements contain complex rules. Keeping track of these is complicated and expensive - UNLESS you have a blockchain-based smart contract. This is how we did it, using ZK for privacy, on chain!", - "track": "Real World Ethereum", + "id": "solidity-inline-assembly-for-developer-experience", + "sourceId": "F7XJZW", + "title": "Solidity Inline-Assembly for Developer Experience", + "description": "We demonstrate how inline-assembly is used at Solady to improve the account abstraction developer experience, write concise code, and create novel features.\r\n\r\nSolady is a Solidity library (MIT-licensed). \r\n\r\nSome of our biggest users include Coinbase, Optimism, Uniswap.", + "track": "Developer Experience", "type": "Talk", "expertise": "Intermediate", - "audience": "Business", + "audience": "Developper", "featured": false, "doNotRecord": false, "tags": [ - "Privacy", - "Zero-Knowledge", - "Use Cases", - "enterprise", - "Privacy", - "Use Cases", - "Zero-Knowledge" + "Gas", + "Account Abstraction", + "solidity", + "Account Abstraction", + "Gas" ], "keywords": [ - "Enterprise" + "Solidity" ], - "duration": 1440, + "duration": 1000, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "82e9412440d2f77b0682445fbd724edc9ece1726af1fa89fec760eb0ec69b406", + "sources_youtubeId": "CUHov__69b0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67349b569dbb7a90e11ec407", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6735ca1e9dbb7a90e1955648", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ca1e9dbb7a90e1955648.vtt", + "transcript_text": " Intro Baiklah, saya sudah berumur berkali-kali. Dalam masa percuma saya menjaga library ini yang dipanggil Solady iaitu library yang berbeza keterangan yang dikirakan kebanyakan dalam pembinaan dalam. Jadi hari ini saya akan bercakap tentang bagaimana kita menggunakan assembly dalam dalam untuk membuat perkara lebih mudah untuk pengguna library. Baiklah, jadi untuk mulakan, beberapa masa lalu saya menulis tweet ini mengatakan pemula menggunakan assembly untuk menyelamatkan gas kerana itu adalah perkara yang paling jelas. Pemula menggunakan assembly untuk mengelakkan limit Dragon Spirit. Kerana assembly membuat kode byte anda lebih kecil. Seperti bahawa walaupun tanpa EOF, jika anda menggunakan Solady, anda mungkin dapat melihat seperti 20-30% kekurangan kode byte. Kemudian assembly pengguna lanc untuk menghidupkan masa. Setelah anda terbuka untuk menulis pembukaan dalam dalam, anda hanya menulis kod U Optima secara langsung. Jadi pembukaan dalam dalam dan U, kita boleh gunakannya secara bergantung dalam konteks ini. Okey, jadi hari ini saya akan menerangkan sedikit kedua dan tiga, kedua dan ketiga. Baiklah, jadi beberapa contoh untuk dimulakan. Yang pertama adalah leap penyelesaian aman. Saya juga akan bercakap mengenai leap yang telah dilakukan di Solady. Jadi di Solady, kita mempunyai library ini untuk proksis. Dan proksis kami adalah kebanyakan, saya akan mengatakan kebanyakan jika tidak semua proksis kami seperti yang telah ditulis secara langsung dalam kode kata. Kemudian kami juga menerangkan beberapa keadaan yang lebih tinggi seperti contohnya, lip salt dan bagaimana menggunakan assembly inline-z dapat membantu dalam ujian. Okey, jadi contohnya, kita gunakan ini, katakan jika anda ingin menghantar ether kepada seseorang dalam kontrak aksi tetapi bagaimana jika pembawa adalah kontrak yang mengusir atau tidak memanfaatkan fungsi terima. Jadi di Solady, kita mempunyai fungsi ini yang dipanggil force save transfer if yang menggunakan self-destruct untuk menghantar ether kepada orang. Jadi sebagai pengguna, anda tidak perlu peduli tentang semua ini. Anda hanya menggunakan fungsi dan anda boleh sangat selamat bahawa ether akan dihantar kepada orang. Dan mengapa ini berguna? Kami tahu bahawa wrap if tidak berada di adres yang sama pada setiap L2. Jadi jika anda ingin mengembalikan kontrak anda ke adres Create2 yang sama dan anda menggunakan RepIf, ia agak mengalami masalah. Jadi, sebaliknya anda boleh menggunakan fungsi ini dan anda boleh menyelamatkan ruang dan masa otak anda. ruang otak dan masa. Di DeepClone, kita mempunyai fungsi ini yang dipanggil deployERC1967 yang memanfaatkan proksi brightcode yang minimal yang boleh diubah. Jadi ia berdasarkan ERC 7760 ini. Proksi ini telah ditulis selama lebih dari sebulan. Tetapi baru-baru ini kami ingin menyatukan semua ini menjadi ERC supaya penjelajah berbuka dapat mengimplementasikan auto-verifikasi dengan mudah. Oleh kerana ERC ini telah berada di sekitar, seperti proksi ini telah berada di sekitar selama semasa, ia juga telah mendapat sedikit penggunaan. Jadi dalam jenis basa, ia adalah proksi pilihan yang digunakan untuk wallet Smart Coinbase dan untuk Polygon, saya rasa mereka menggunakannya untuk sesuatu yang lain juga. Jadi ada lebih dari 100,000 contoh proksi ini yang digunakan di sekitar. Dan kelebihannya adalah bahawa ia cepat, ia kecil dan dalam beberapa kes ia auto-verifikasi. Jadi ia menjaga masa anda. Anda tidak perlu melakukan beberapa alat API atau kebutuhan. Ia hanya memeriksa sendiri. Jadi DeepSort juga mempunyai beberapa fungsi yang berlaku tinggi yang boleh digunakan untuk tujuan ujian. Seperti kita ada operasi set pada array yang disort. Anda boleh gunakannya untuk ujian invariant. Dan untuk ujian plus, kita tahu jika anda menulis ujian di Foundry, anda perlu menentukan variable random dalam argumen fungsi dan ada jumlah ruang yang terbatas yang boleh anda lakukan. Dan anda perlu memikirkan variable yang menarik yang saya rasa sangat mengambil masa. Jadi, saya menggunakan banyak magik hitam untuk menulis metode yang memungkinkan anda untuk menggunakan untuk menghasilkan serta-merta nombor yang serta di mana-mana di dalam kode anda. Jadi anda boleh melakukan sesuatu seperti pengujian cepat yang diperlukan. Dan jika anda ingin tahu bagaimana untuk mengintegrasikannya ke dalam projek anda, anda boleh mengikuti link di bawah. Okey, jadi ini adalah beberapa kajian pelajaran yang anda mungkin ingin periksa. Satu perkara yang saya rasa agak menarik adalah dynamic array leak. Contohnya dalam SOID, jika anda menentukan array, anda tahu anda tidak boleh melakukan pen atau pushback tetapi di mana dynamic array leak ini mempunyai fungsi itu. Okey, jadi mengapa assembly dalam dalam adalah baik untuk penulis bibli seperti saya. Pertama, ia memberikan beberapa teknik seperti ia membantu saya melakukan beberapa penyelesaian kode supaya pengguna bibli tidak perlu risau bahawa bibli saya menyebabkan mereka mempunyai stack terlalu dalam. Anda juga boleh mencari kumaf yang tidak terkendalikan dalam soliditi normal. Dan jika anda tahu bagaimana untuk memperkuat kompiler dengan arah yang benar, anda dapat mencapai beberapa ekstrak masa kompil tanda tanpa kos. Jadi, contohnya, mengelakkan stack terlalu dalam. Kami mempunyai fungsi ini yang dipanggil lon-what dalam fixed point math lib. Jika anda menggunakannya sendiri, kadang-kadang kompauler cuba untuk menggabungkannya dan ia menyebabkan stack terlalu dalam. Tetapi jika anda menulis fungsi ini dalam assembly yang bergabung, anda boleh menggabungkan penggunaan yang diberikan, anda membantu kompon menggabung dengan cara yang mengelakkan stack terlalu dalam atau tanpa IR. Jadi dalam SOAD, kita sangat khas yang ia mesti berfungsi dalam sebanyak situasi yang mungkin. Jadi sama ada anda menggunakan via IR atau sama ada anda memilih untuk tidak menggunakan via IR, kita ingin memastikan ia berfungsi. Anda tidak perlu risau bahawa library kita adalah kalibran stack2d. Itulah sebabnya kita sangat khas dengan semua detail kecil ini. Okey, kemudian, inline-SMB juga membolehkan kita membuat Math yang keren. Anda tidak perlu memahami apa itu, tetapi ada dengan Math, tetapi ada beberapa kode-kode op seperti kode byte dan kode op yang tidak berkaitan dengan branch, yang membolehkan kita membuat Math lebih cepat. Contohnya dalam metode log2 ini. Dan keuntungan mempunyai fungsi yang tidak berkaitan dengan branch yang boleh dilakukan dengan assembly dalam dalam adalah ketika kompauler soliditi cuba untuk mengoptimisir, ia akan mula memeriksa apakah fungsi anda mempunyai beberapa pangkalan. Jika ia mempunyai pangkalan, maka fungsi itu mungkin tidak boleh dilakukan dalam dalam dengan pipeline VRIR dan jika anda menggunakan pipeline legacy, ia pasti tidak akan berada dalam dalam. Oleh itu, dengan menulis barang dalam InlineSMB, kami membantu kompiler menghasilkan kode yang lebih efisien dengan memberitahu kompiler bahawa fungsi ini boleh diterapkan dalam garis. Oleh itu, menggunakan trik ini, kami juga dapat mencapai teknik lain seperti jika anda ingin membandingkan string pendek dengan string pendek lain, sec, tradisional anda akan menggunakan jenis kata-kata Kikak 256 tetapi jika anda tahu assembly inline, anda boleh melakukan sebuah jenis magik hitam yang akan memberitahu kompiler bahawa daripada melakukan alokasi memori yang tidak berguna, hanya menggunakan opcode Optima. Jadi semua ini, walaupun ia kelihatan seperti banyak kode op, tetapi pada akhirnya setelah anda menekan Compile, ia secara sesuai menyelenggarakan menjadi hanya beberapa kode op. Dan ini hanya mungkin jika seluruh cat kode itu tidak berkumpulan. Baiklah, jadi perkara tentang Solidity adalah bahawa ia adalah bahasa yang sangat indah. Anda boleh menulis semua perkara yang rendah dan pengguna tidak perlu bimbangkan detail. Jadi semua ketakutan, kegiatan, kegiatan hitam telah dikeluarkan dari pengguna. Pengguna perlu hanya peduli tentang apa adalah API tinggi dan apa adalah penggunaan semua fungsi yang berbeza. Jadi anda boleh menulis dengan bersih dan berkesan, pen soliti yang berada di tingkat tinggi yang dapat dibaca tanpa bergerak dalam lubang rabit yang tidak ada di dalam. Ada beberapa tujuan yang diperlukan seperti North Star of Solidity. Kami ingin memastikan soliti adalah bahasa kontrak yang terbaik dengan mengeluarkan setelah menggunakan utiliti yang sangat optimis yang menjadikan Compiler Solidity sekarang dan ke depan. Jadi idea adalah orang-orang berkata kita ingin menulis bahasa kita dalam Rust kerana ia mempunyai library yang lebih luas. Kita ingin menulis dalam C++ because there's a wider library. But in SoilDT, we have this exotic math function called LambertW01 which is only available in Python, maybe some math language in SoilDT. You can find it in any other major languages. So that is so ladies go. If it's Turing complete, if it's possible within the Turing complet kita akan memperbaikinya. Kami juga ingin mengukur Ethereum melalui pemotongan layer app dan juga membuat kode cantik. Kami juga mempunyai pelan untuk EOF. Walaupun saya agak, ia agak besar, tetapi saya melihat pentingnya kerana kode EOF sebenarnya lebih optimal untuk perkara seperti SP1, jadi akhirnya, saya rasa hampir segera, SoLady akan mempunyai EOF. Ia tidak akan menjadi jenis yang berbeza. Jadi kita mempunyai direktri sumber quite soon, Solady will have an EOF. It won't be a breaking chain. So we have a source directory, and then we have an EOF directory. So you can use either one. We won't force you to use either EOF or the legacy. We'll give you an option. Okay, so here are some links. You can visit solady.org, which is a shortcut to Solady. Or you can visit my GitHub or my ex. Sometimes I post like small Solady lessons on my ex. Okay, I think that's all for my talk. Thank you. Thank you, Victorize, for this talk. I guess we have all now a better idea of how to leverage assembly, inline assembly in our code. In the meantime, we have plenty of time for questions. So if you have any, make sure to please send them through this QR code. I'll be starting with the first one on the leaderboard. Did assembly ever bite you in the butt? Where vanilla Solitude would have been more safe? Okay, when I was starting out like maybe two years ago, I think the safe transfer leap forgot to clean some upper bits because I directly potted it from soulmate. But then someone spotted that bug. So these days, I am super paranoid about unclean, dirty upper bits. So it becomes like a second nature. So I think recently it should be quite safe. All right, next one. When to use let and when to use mstore? Okay, let is a way menentukan variabel dalam pembinaan. Jika seperti katakan, bagaimana-bagaimana anda tidak boleh memaksa kompiler untuk mengganti jangkauan terlalu dalam, maka anda mungkin perlu menggunakan mStore untuk menggunakan ruang kacau untuk variabel sementara. Tetapi anda paling mungkin hanya boleh menggunakan dua bahagian have to use mStore to use the scratch space for temporary variables. But you at most can only use two parts of the scratch space, like two slots, because the other slots are for other purpose, for important stuff. Next one, why do you prefer Solidity Inline Assembly over other low-level languages like Huff? Okay, so the thing about... I like Huff, but the benefit of Solidity is that it allows you to recycle your efforts. And also, it allows you to generate more compact bytecode. For example, in Huff, the inline is really inline. But whereas in Solidity, if you have like 10 use, if your function is a giant chunk of bytecode and it's used like in 30 different places, you might not actually want to inline it every single time. Next one, at L2's call data with call data feedback function, I guess from Libzip, has greater gas versus non-compressed call data feedback function, I guess from LibZip, has greater gas versus non-compressed call data, but has slightly cheaper gas at L1. Is that correct? What happens under the hood? Okay, this depends on your L2. Some L2s already forward the gas savings of compressed call data to the users. So if you use CD4BET on Optimism, you will most probably not be able to save any core data money. So if you're deep on OP stack, just don't use that. When using assembly-based libraries, is there a higher likelihood that it is less compatible with future Solidity versions? I would? Saya akan mengatakan bahawa sebenarnya itu kurang kemungkinan kerana, contohnya jika anda melihat penyimpanan transien, Solidity mengimplementasikannya pada tahap pembinaan pertama, kemudian kemudian ia menjadi seperti perkara yang lebih tinggi. Jadi satu perkara yang perlu anda risaukan adalah bahawa Solidity mungkin ber in ways that the high-level parts interpret the memory slightly differently. So that you have to keep up with the change log. Victoria also has a nice friend, a nice hat, and a panda on his hat. Do you want to show everyone? Yeah, I guess that helps you as well in coding. Next one, I guess how Sol-AD's features saturated? How many more libraries and functions do you plan to add? Okay, I think right now we have mostly saturated. We have a lot of work to do to port to EOF. Especially the lib clone is going to be... But the Solidity team has actually much more stuff to do compared to me. Another one about Sol-AD. Will the new Solidity compiler in Rust impact Sol-AD? I think this new Solidity compiler called Solr, they try to become like a feature parity with the official Solidity compiler. Like for example, in C++ you have Clang, you have GCC. I think we might go that route down the road. And if you are just writing in a high-level language, you don't need to care whether you use Clang or GCC. All right, that wraps up for our Q&A session. Thank you very much for the questions and give a big round of applause for Vectorize.", "eventId": "devcon-7", - "slot_start": 1731493800000, - "slot_end": 1731495600000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1iPCFSCb5vpiqtzwoYxszBwbVcjQ5iI86jv7FH1Uo3E8", - "resources_slides": null, + "slot_start": 1731576600000, + "slot_end": 1731578400000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1ww4IN7FSAReDpOBeMK96jT38LWmsqkRdbQBoBnUIH-k", + "resources_slides": "https://drive.google.com/file/d/1FlbxteD9YFQooQvu_TjjjOA7Ug97kf-k/view", "speakers": [ - "paul-brody" + "vectorized" ] }, "vector": [ - 0, - 0, - 0, 0, 0, 0, @@ -682394,12 +680467,11 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -682562,7 +680634,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -682603,6 +680674,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -682629,7 +680701,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -682666,7 +680737,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -682780,6 +680850,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -682916,6 +680987,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -683028,7 +681100,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -683120,12 +681191,12 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -683135,57 +681206,54 @@ }, { "session": { - "id": "solarpunk-vs-lunarpunk-the-evolution-and-integration-of-these-movements", - "sourceId": "SFY3FB", - "title": "Solarpunk vs. Lunarpunk: The Evolution and Integration of these Movements", - "description": "In this talk, I will explore how the ideals of solarpunk and lunarpunk can be integrated to address privacy, inclusivity, and justice. We will explain how combining the strengths of both movements we can potentially create a cohesive vision for a sustainable, equitable, and free future.", - "track": "Cypherpunk & Privacy", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "solidity-then-now-and-the-future", + "sourceId": "HZ3DEF", + "title": "Solidity: Then, Now, & the Future!", + "description": "In this talk, I will be presenting the prospect of Q1 2025 release of the Solidity language compiler including the following sections:\r\n\r\n- Latest features and developments\r\n- via-ir: what's happening and what's next\r\n- Experimental Solidity: The future of the language\r\n- Timeline & roadmap", + "track": "Developer Experience", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Coordination", - "Anonymity", - "Solarpunk", - "Ethereum for Good", - "Social", - "culture", - "Anonymity", - "Coordination", - "Ethereum for Good", - "Social", - "Solarpunk" + "Tooling", + "Languages", + "solidity", + "Languages", + "Tooling" ], "keywords": [ - "Lunarpunk", - "Culture" + "Smart Contract Development", + "Solidity" ], - "duration": 567, + "duration": 1612, "language": "en", - "sources_swarmHash": "ec3e42c5c5bdbed8c1e3b858f53d8832afe881cea558c321fb3a1c657e542700", - "sources_youtubeId": "2SYWYVJonuk", + "sources_swarmHash": "f0d38870de6eb47e00161622e7047dec080197153c6387f0b560a0d6c505b0fa", + "sources_youtubeId": "56JNxjPH-QY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734a1589dbb7a90e1486e16", + "sources_streamethId": "6735c6dc9dbb7a90e139e25d", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731496800000, - "slot_end": 1731497400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Zg48147sw4ud8uPsdsYKyuXSSdSVDoJZ0LSxumOJZ4o", - "resources_slides": null, + "slot_start": 1731574800000, + "slot_end": 1731576600000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1GmwHGEiPwMU4yfyA7ipBeOYh8M7CK0BgtepZdbx3JFA", + "resources_slides": "https://drive.google.com/file/d/1V_p-KHAYMNNyteGUsy4WU3jpZfxabh-I/view", "speakers": [ - "manualzuru" + "vishwa-mehta" ] }, "vector": [ 0, 0, 0, + 6, + 0, 0, 0, - 6, 0, 0, 0, @@ -683948,6 +682016,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -683957,7 +682026,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -684024,6 +682092,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -684056,8 +682125,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -684086,7 +682153,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -684111,8 +682177,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -684166,8 +682230,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -684297,6 +682359,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -684493,6 +682556,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -684503,8 +682567,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -684516,49 +682578,53 @@ }, { "session": { - "id": "solidity-inline-assembly-for-developer-experience", - "sourceId": "F7XJZW", - "title": "Solidity Inline-Assembly for Developer Experience", - "description": "We demonstrate how inline-assembly is used at Solady to improve the account abstraction developer experience, write concise code, and create novel features.\r\n\r\nSolady is a Solidity library (MIT-licensed). \r\n\r\nSome of our biggest users include Coinbase, Optimism, Uniswap.", - "track": "Developer Experience", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Developper", + "id": "solo-staking-in-the-dark-forest-a-survival-guide", + "sourceId": "REJ3SW", + "title": "Solo staking in the dark forest: a survival guide", + "description": "Solo stakers are key to keeping the Ethereum ecosystem geographically decentralized and censorship resistant. But PBS leaves solo stakers extremely vulnerable to a variety of narrowly targeted DDOS attacks, made possible by public information on the p2p network. This talk will explain why privacy matters on the p2p layer, provide an overview of the attacks solo stakers would face in PBS, and demonstrate some of these in a sandbox environment.", + "track": "Core Protocol", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Stakers/Validators", "featured": false, "doNotRecord": false, "tags": [ - "Gas", - "Account Abstraction", - "solidity", - "Account Abstraction", - "Gas" + "Staking", + "Privacy", + "Security", + "MEV", + "metadata", + "MEV", + "Privacy", + "Security" ], "keywords": [ - "Solidity" + "Metadata" ], - "duration": 1000, + "duration": 582, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "2d1f8fe35ffd0fab6e8af4f1e2723e4a8f364a230dce06f962976bdd2be70268", + "sources_youtubeId": "ZHXWCH6N9tQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735ca1e9dbb7a90e1955648", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ca1e9dbb7a90e1955648.vtt", - "transcript_text": " Intro Baiklah, saya sudah berumur berkali-kali. Dalam masa percuma saya menjaga library ini yang dipanggil Solady iaitu library yang berbeza keterangan yang dikirakan kebanyakan dalam pembinaan dalam. Jadi hari ini saya akan bercakap tentang bagaimana kita menggunakan assembly dalam dalam untuk membuat perkara lebih mudah untuk pengguna library. Baiklah, jadi untuk mulakan, beberapa masa lalu saya menulis tweet ini mengatakan pemula menggunakan assembly untuk menyelamatkan gas kerana itu adalah perkara yang paling jelas. Pemula menggunakan assembly untuk mengelakkan limit Dragon Spirit. Kerana assembly membuat kode byte anda lebih kecil. Seperti bahawa walaupun tanpa EOF, jika anda menggunakan Solady, anda mungkin dapat melihat seperti 20-30% kekurangan kode byte. Kemudian assembly pengguna lanc untuk menghidupkan masa. Setelah anda terbuka untuk menulis pembukaan dalam dalam, anda hanya menulis kod U Optima secara langsung. Jadi pembukaan dalam dalam dan U, kita boleh gunakannya secara bergantung dalam konteks ini. Okey, jadi hari ini saya akan menerangkan sedikit kedua dan tiga, kedua dan ketiga. Baiklah, jadi beberapa contoh untuk dimulakan. Yang pertama adalah leap penyelesaian aman. Saya juga akan bercakap mengenai leap yang telah dilakukan di Solady. Jadi di Solady, kita mempunyai library ini untuk proksis. Dan proksis kami adalah kebanyakan, saya akan mengatakan kebanyakan jika tidak semua proksis kami seperti yang telah ditulis secara langsung dalam kode kata. Kemudian kami juga menerangkan beberapa keadaan yang lebih tinggi seperti contohnya, lip salt dan bagaimana menggunakan assembly inline-z dapat membantu dalam ujian. Okey, jadi contohnya, kita gunakan ini, katakan jika anda ingin menghantar ether kepada seseorang dalam kontrak aksi tetapi bagaimana jika pembawa adalah kontrak yang mengusir atau tidak memanfaatkan fungsi terima. Jadi di Solady, kita mempunyai fungsi ini yang dipanggil force save transfer if yang menggunakan self-destruct untuk menghantar ether kepada orang. Jadi sebagai pengguna, anda tidak perlu peduli tentang semua ini. Anda hanya menggunakan fungsi dan anda boleh sangat selamat bahawa ether akan dihantar kepada orang. Dan mengapa ini berguna? Kami tahu bahawa wrap if tidak berada di adres yang sama pada setiap L2. Jadi jika anda ingin mengembalikan kontrak anda ke adres Create2 yang sama dan anda menggunakan RepIf, ia agak mengalami masalah. Jadi, sebaliknya anda boleh menggunakan fungsi ini dan anda boleh menyelamatkan ruang dan masa otak anda. ruang otak dan masa. Di DeepClone, kita mempunyai fungsi ini yang dipanggil deployERC1967 yang memanfaatkan proksi brightcode yang minimal yang boleh diubah. Jadi ia berdasarkan ERC 7760 ini. Proksi ini telah ditulis selama lebih dari sebulan. Tetapi baru-baru ini kami ingin menyatukan semua ini menjadi ERC supaya penjelajah berbuka dapat mengimplementasikan auto-verifikasi dengan mudah. Oleh kerana ERC ini telah berada di sekitar, seperti proksi ini telah berada di sekitar selama semasa, ia juga telah mendapat sedikit penggunaan. Jadi dalam jenis basa, ia adalah proksi pilihan yang digunakan untuk wallet Smart Coinbase dan untuk Polygon, saya rasa mereka menggunakannya untuk sesuatu yang lain juga. Jadi ada lebih dari 100,000 contoh proksi ini yang digunakan di sekitar. Dan kelebihannya adalah bahawa ia cepat, ia kecil dan dalam beberapa kes ia auto-verifikasi. Jadi ia menjaga masa anda. Anda tidak perlu melakukan beberapa alat API atau kebutuhan. Ia hanya memeriksa sendiri. Jadi DeepSort juga mempunyai beberapa fungsi yang berlaku tinggi yang boleh digunakan untuk tujuan ujian. Seperti kita ada operasi set pada array yang disort. Anda boleh gunakannya untuk ujian invariant. Dan untuk ujian plus, kita tahu jika anda menulis ujian di Foundry, anda perlu menentukan variable random dalam argumen fungsi dan ada jumlah ruang yang terbatas yang boleh anda lakukan. Dan anda perlu memikirkan variable yang menarik yang saya rasa sangat mengambil masa. Jadi, saya menggunakan banyak magik hitam untuk menulis metode yang memungkinkan anda untuk menggunakan untuk menghasilkan serta-merta nombor yang serta di mana-mana di dalam kode anda. Jadi anda boleh melakukan sesuatu seperti pengujian cepat yang diperlukan. Dan jika anda ingin tahu bagaimana untuk mengintegrasikannya ke dalam projek anda, anda boleh mengikuti link di bawah. Okey, jadi ini adalah beberapa kajian pelajaran yang anda mungkin ingin periksa. Satu perkara yang saya rasa agak menarik adalah dynamic array leak. Contohnya dalam SOID, jika anda menentukan array, anda tahu anda tidak boleh melakukan pen atau pushback tetapi di mana dynamic array leak ini mempunyai fungsi itu. Okey, jadi mengapa assembly dalam dalam adalah baik untuk penulis bibli seperti saya. Pertama, ia memberikan beberapa teknik seperti ia membantu saya melakukan beberapa penyelesaian kode supaya pengguna bibli tidak perlu risau bahawa bibli saya menyebabkan mereka mempunyai stack terlalu dalam. Anda juga boleh mencari kumaf yang tidak terkendalikan dalam soliditi normal. Dan jika anda tahu bagaimana untuk memperkuat kompiler dengan arah yang benar, anda dapat mencapai beberapa ekstrak masa kompil tanda tanpa kos. Jadi, contohnya, mengelakkan stack terlalu dalam. Kami mempunyai fungsi ini yang dipanggil lon-what dalam fixed point math lib. Jika anda menggunakannya sendiri, kadang-kadang kompauler cuba untuk menggabungkannya dan ia menyebabkan stack terlalu dalam. Tetapi jika anda menulis fungsi ini dalam assembly yang bergabung, anda boleh menggabungkan penggunaan yang diberikan, anda membantu kompon menggabung dengan cara yang mengelakkan stack terlalu dalam atau tanpa IR. Jadi dalam SOAD, kita sangat khas yang ia mesti berfungsi dalam sebanyak situasi yang mungkin. Jadi sama ada anda menggunakan via IR atau sama ada anda memilih untuk tidak menggunakan via IR, kita ingin memastikan ia berfungsi. Anda tidak perlu risau bahawa library kita adalah kalibran stack2d. Itulah sebabnya kita sangat khas dengan semua detail kecil ini. Okey, kemudian, inline-SMB juga membolehkan kita membuat Math yang keren. Anda tidak perlu memahami apa itu, tetapi ada dengan Math, tetapi ada beberapa kode-kode op seperti kode byte dan kode op yang tidak berkaitan dengan branch, yang membolehkan kita membuat Math lebih cepat. Contohnya dalam metode log2 ini. Dan keuntungan mempunyai fungsi yang tidak berkaitan dengan branch yang boleh dilakukan dengan assembly dalam dalam adalah ketika kompauler soliditi cuba untuk mengoptimisir, ia akan mula memeriksa apakah fungsi anda mempunyai beberapa pangkalan. Jika ia mempunyai pangkalan, maka fungsi itu mungkin tidak boleh dilakukan dalam dalam dengan pipeline VRIR dan jika anda menggunakan pipeline legacy, ia pasti tidak akan berada dalam dalam. Oleh itu, dengan menulis barang dalam InlineSMB, kami membantu kompiler menghasilkan kode yang lebih efisien dengan memberitahu kompiler bahawa fungsi ini boleh diterapkan dalam garis. Oleh itu, menggunakan trik ini, kami juga dapat mencapai teknik lain seperti jika anda ingin membandingkan string pendek dengan string pendek lain, sec, tradisional anda akan menggunakan jenis kata-kata Kikak 256 tetapi jika anda tahu assembly inline, anda boleh melakukan sebuah jenis magik hitam yang akan memberitahu kompiler bahawa daripada melakukan alokasi memori yang tidak berguna, hanya menggunakan opcode Optima. Jadi semua ini, walaupun ia kelihatan seperti banyak kode op, tetapi pada akhirnya setelah anda menekan Compile, ia secara sesuai menyelenggarakan menjadi hanya beberapa kode op. Dan ini hanya mungkin jika seluruh cat kode itu tidak berkumpulan. Baiklah, jadi perkara tentang Solidity adalah bahawa ia adalah bahasa yang sangat indah. Anda boleh menulis semua perkara yang rendah dan pengguna tidak perlu bimbangkan detail. Jadi semua ketakutan, kegiatan, kegiatan hitam telah dikeluarkan dari pengguna. Pengguna perlu hanya peduli tentang apa adalah API tinggi dan apa adalah penggunaan semua fungsi yang berbeza. Jadi anda boleh menulis dengan bersih dan berkesan, pen soliti yang berada di tingkat tinggi yang dapat dibaca tanpa bergerak dalam lubang rabit yang tidak ada di dalam. Ada beberapa tujuan yang diperlukan seperti North Star of Solidity. Kami ingin memastikan soliti adalah bahasa kontrak yang terbaik dengan mengeluarkan setelah menggunakan utiliti yang sangat optimis yang menjadikan Compiler Solidity sekarang dan ke depan. Jadi idea adalah orang-orang berkata kita ingin menulis bahasa kita dalam Rust kerana ia mempunyai library yang lebih luas. Kita ingin menulis dalam C++ because there's a wider library. But in SoilDT, we have this exotic math function called LambertW01 which is only available in Python, maybe some math language in SoilDT. You can find it in any other major languages. So that is so ladies go. If it's Turing complete, if it's possible within the Turing complet kita akan memperbaikinya. Kami juga ingin mengukur Ethereum melalui pemotongan layer app dan juga membuat kode cantik. Kami juga mempunyai pelan untuk EOF. Walaupun saya agak, ia agak besar, tetapi saya melihat pentingnya kerana kode EOF sebenarnya lebih optimal untuk perkara seperti SP1, jadi akhirnya, saya rasa hampir segera, SoLady akan mempunyai EOF. Ia tidak akan menjadi jenis yang berbeza. Jadi kita mempunyai direktri sumber quite soon, Solady will have an EOF. It won't be a breaking chain. So we have a source directory, and then we have an EOF directory. So you can use either one. We won't force you to use either EOF or the legacy. We'll give you an option. Okay, so here are some links. You can visit solady.org, which is a shortcut to Solady. Or you can visit my GitHub or my ex. Sometimes I post like small Solady lessons on my ex. Okay, I think that's all for my talk. Thank you. Thank you, Victorize, for this talk. I guess we have all now a better idea of how to leverage assembly, inline assembly in our code. In the meantime, we have plenty of time for questions. So if you have any, make sure to please send them through this QR code. I'll be starting with the first one on the leaderboard. Did assembly ever bite you in the butt? Where vanilla Solitude would have been more safe? Okay, when I was starting out like maybe two years ago, I think the safe transfer leap forgot to clean some upper bits because I directly potted it from soulmate. But then someone spotted that bug. So these days, I am super paranoid about unclean, dirty upper bits. So it becomes like a second nature. So I think recently it should be quite safe. All right, next one. When to use let and when to use mstore? Okay, let is a way menentukan variabel dalam pembinaan. Jika seperti katakan, bagaimana-bagaimana anda tidak boleh memaksa kompiler untuk mengganti jangkauan terlalu dalam, maka anda mungkin perlu menggunakan mStore untuk menggunakan ruang kacau untuk variabel sementara. Tetapi anda paling mungkin hanya boleh menggunakan dua bahagian have to use mStore to use the scratch space for temporary variables. But you at most can only use two parts of the scratch space, like two slots, because the other slots are for other purpose, for important stuff. Next one, why do you prefer Solidity Inline Assembly over other low-level languages like Huff? Okay, so the thing about... I like Huff, but the benefit of Solidity is that it allows you to recycle your efforts. And also, it allows you to generate more compact bytecode. For example, in Huff, the inline is really inline. But whereas in Solidity, if you have like 10 use, if your function is a giant chunk of bytecode and it's used like in 30 different places, you might not actually want to inline it every single time. Next one, at L2's call data with call data feedback function, I guess from Libzip, has greater gas versus non-compressed call data feedback function, I guess from LibZip, has greater gas versus non-compressed call data, but has slightly cheaper gas at L1. Is that correct? What happens under the hood? Okay, this depends on your L2. Some L2s already forward the gas savings of compressed call data to the users. So if you use CD4BET on Optimism, you will most probably not be able to save any core data money. So if you're deep on OP stack, just don't use that. When using assembly-based libraries, is there a higher likelihood that it is less compatible with future Solidity versions? I would? Saya akan mengatakan bahawa sebenarnya itu kurang kemungkinan kerana, contohnya jika anda melihat penyimpanan transien, Solidity mengimplementasikannya pada tahap pembinaan pertama, kemudian kemudian ia menjadi seperti perkara yang lebih tinggi. Jadi satu perkara yang perlu anda risaukan adalah bahawa Solidity mungkin ber in ways that the high-level parts interpret the memory slightly differently. So that you have to keep up with the change log. Victoria also has a nice friend, a nice hat, and a panda on his hat. Do you want to show everyone? Yeah, I guess that helps you as well in coding. Next one, I guess how Sol-AD's features saturated? How many more libraries and functions do you plan to add? Okay, I think right now we have mostly saturated. We have a lot of work to do to port to EOF. Especially the lib clone is going to be... But the Solidity team has actually much more stuff to do compared to me. Another one about Sol-AD. Will the new Solidity compiler in Rust impact Sol-AD? I think this new Solidity compiler called Solr, they try to become like a feature parity with the official Solidity compiler. Like for example, in C++ you have Clang, you have GCC. I think we might go that route down the road. And if you are just writing in a high-level language, you don't need to care whether you use Clang or GCC. All right, that wraps up for our Q&A session. Thank you very much for the questions and give a big round of applause for Vectorize.", + "sources_streamethId": "6736d5be74749a4b892d57fc", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d5be74749a4b892d57fc.vtt", + "transcript_text": " Hello, hello. Yeah, I guess it's my turn. Yeah, surprise title. Actually, I'm going to talk about why Dark Forest is even more dangerous to solo stakers with an attack that strikes at the heart of the Ethereum validation. So together, actually, we're going to play as an attacker. And we're going to find out a validator or a solo staker that actually uses MEV boost, which is next to their consensus client. In order to find them, we actually are going to do the exercise together. But once we find them, what we're going to do is we're going to disrupt the block production of the solo staker and stop their rewards and manipulate the Randall value which is responsible for assigning new block proposers. So my name is Q, I'm the decentralized technology architect at Hopper. At Hopper we build privacy preserving infrastructure. So we'll actually use a sandbox environment for demo purpose, but there's nothing actually preventing this attack happening in real life right now. Our sandbox actually has three consensus clients with a total of 192 validators in a kurtosis network with also one MEV relay and one block builder. For now, those validators are just a list of anonymous public keys, but not for long. So while this test has been running, let me explain how we actually conduct this attack. So first, we actually need to de-anonymize validators to identify our target. So validators actually need to attest block productions, and those attestations are propagated into the peer-to-peer network in GossipSub. We can actually just silently observe those attestations to create a probabilistic correlation between the validator's public key to the IP address of the consensus layer client. And we actually have this, oh, the test has actually been running. We just let it run for a little while. Block has been proposed. Just leave it there. So now is the moment to actually explain the other information that we need to conduct this attack. We actually need a little bit of help from the trusted map relay. Maybe we're just observing the map, or we're just colluding with the map relay, or we're just the relay ourselves. MapBoost users actually need to trust the relay to deliver, well, basically trust them on delivering correct content of the block on time. However, they need to trust more than that. So that's the moment we look into the database of the relay and look at one of the new data table that we created, which is called metadata. Here, we collect all the metadata of each HTTP request from connected validators and block builders. So it actually gives us a strong link between the validator public key to the MapBoost IP address, which sits right next to the consensus layer client. And this is just because validators, they have to, they must register with every relay that's connected to. Okay, here, now we have the exercise going on. As an attacker, we just have a dashboard with all the information we need. Right here is the attestation that we have with the occurrence of the attestation that we accumulate with, like, the more attestation we have, the stronger link we actually build between the public key of the peer IDs. And then we know the IP address from the peer ID pretty easily, and then plug the information of the MapBoost ID, MapBoost IP address on top of that. And next question is who to attack. So here we actually just pick slot 246 and 247, but we actually have a wider choice because validators, they are, like the proposers are announced two epochs ahead of time. Right? To launch this attack, we use traditional DDoS technique, introducing the ICAP flood and SYN flood. But just to make our attack a bit more clear, we actually use a memory stress test just directly on this instance to obtain the same result. And here we can see that block 246 and 247, they are skipped. And now it's time to check the rundown value of the slot 246 and 247. They're actually the same. So congratulations, fellow attackers. We managed to stop the block production and also control the Randall value. Yep. So if I can skip to the last slide. Yeah, so here we just demo the attack as an attacker gene. I welcome everybody who are interested in this topic to further discuss the implication and improvements on how to avoid this attack. We're going to have a report released soon on the Etherchurch forum, as well as a discussion that's going to be held at 1.30 later at the blue discussion corner. To close my talk, I would like to give a heartfelt thank you to Ethereum Foundation that gave us a grant to conduct this research. Thank you very much. Thank you very much. Thank you very much, Q, and sorry for missing your name. Now, are there any questions? Come on, don't be shy. So just to clarify, so we were able to skip the solo sticker slot here? Is that the attack, or was it just the random manipulation? So we actually managed to make the solo sticker not being able to propose at the slot that it was given. So by doing so, we indirectly actually manipulate the rendout. And like, is there any, what do you think are the financial implications of this? Do the big staking pools be doing this to maximize their rewards? Actually, good question. Because there was one of the further discussion that we had around this experiment is that we believe that the current definition of MEV, which is a single, we always look at just one, whatever transaction that's been included into one block. This narrow definition, which is also the definition written on the Ethereum, like the page of Ethereum Foundation, is a bit too narrow. So by kicking out the production of one block, we can effectively have this skip slot MEV. Just an example, like some people would like to have this multi-block MEV, right? This is one of the attack of having that to achieve this multi-block MEV. And also, it just basically, it also creates a threat on the resilience of the network, right? If you can easily identify a block proposer and then just kick them out at the place where it's supposed to produce a block, then does it mean that, especially for solo stakers who have very poor, let's say, who doesn't really have access to strong network protection, does it mean that those percentage of solo stakers of Ethereum network, they are at risk. Last question, please. How difficult is it to obtain the IP address of a certain validator and what is the role of MevBoost in this kind of process? Yeah, thanks. It is very easy, actually. Right now, you can just modify a little bit of your, I don't know, Lighthouse client. Then you can collect attestations by just dumping them into a database. Very simple data analytics, and then you can have this correlation. So the longer you run that, the stronger link that you have it. Even though there's some technical details about how we're actually going to tackle the aggregated attestations. But yeah, you know, you're going to figure it out. And we also know the pattern of gossips up. So combining these two, the analytics is actually pretty accurate. And the role of the map relay is that because solo stakers, let's say they already invest into a hardware, right, to, and also put into some stake to run their own staking setup, their goal is, one of their main goal is to make sure that they have economic returns. And very likely they're going to introduce whatever that helps them to build the most profitable block. And here right now we have, thanks to your research, Tony, is that we know that there are more than 90% of the validators, they are using MapRelay or MapBoost architecture. And Relay, which is an obvious, very obvious single point of failure, no, let's say a single entity, like a central entity", "eventId": "devcon-7", - "slot_start": 1731576600000, - "slot_end": 1731578400000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1ww4IN7FSAReDpOBeMK96jT38LWmsqkRdbQBoBnUIH-k", - "resources_slides": null, + "slot_start": 1731639900000, + "slot_end": 1731640500000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1d-GmGcNLmt1uMkzzdpBPgSsDGcejG31g_wfOtXcVIvg", + "resources_slides": "https://drive.google.com/file/d/1zQOsDWjaz5U_rR_njTavH_W8LwNu-EDH/view", "speakers": [ - "vectorized" + "qianchen-q-yu" ] }, "vector": [ 0, 0, 0, + 0, 6, 0, 0, @@ -685303,7 +683369,9 @@ 0, 0, 0, + 6, 0, + 6, 0, 0, 0, @@ -685358,13 +683426,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -685424,6 +683485,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -685436,6 +683498,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -685534,7 +683597,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -685672,7 +683734,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -685786,6 +683847,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -685878,13 +683940,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, - 0, - 0, 0, 0, 0 @@ -685892,44 +683952,49 @@ }, { "session": { - "id": "solidity-then-now-and-the-future", - "sourceId": "HZ3DEF", - "title": "Solidity: Then, Now, & the Future!", - "description": "In this talk, I will be presenting the prospect of Q1 2025 release of the Solidity language compiler including the following sections:\r\n\r\n- Latest features and developments\r\n- via-ir: what's happening and what's next\r\n- Experimental Solidity: The future of the language\r\n- Timeline & roadmap", + "id": "solving-multichain-ux-lessons-from-cosmos-for-the-rollup-ecosystem", + "sourceId": "QKRCF7", + "title": "Solving Multichain UX: Lessons from Cosmos for the Rollup Ecosystem", + "description": "This talk addresses how we tackled challenges in the Cosmos ecosystem like liquidity fragmentation, multi-chain accounts, and cross-chain contract standards, and how these solutions can be used to improve cross-chain UX in the rollup ecosystem. \r\n\r\nIf time allows, we'll also dig into designing flexible and scalable abstractions for rapid deployment of integrations (bridges, dexs, wallets) across not just many chains, but many diverse tech stacks.", "track": "Developer Experience", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developper", "featured": false, "doNotRecord": false, "tags": [ - "Tooling", - "Languages", - "solidity", - "Languages", - "Tooling" + "Fragmentation", + "UI/UX", + "Account Abstraction", + "defi", + "cross-chain", + "aggregation", + "Account Abstraction", + "Fragmentation", + "UI/UX" ], "keywords": [ - "Smart Contract Development", - "Solidity" + "DeFi", + "Cross-chain", + "Aggregation" ], - "duration": 1612, + "duration": 1470, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "bfcbec6589bfc1a512d43c1f597aa804af938b8641ac913c6cd2aa96b53a8edb", + "sources_youtubeId": "2J2XDbN8Q6M", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735c6dc9dbb7a90e139e25d", + "sources_streamethId": "6735d1009dbb7a90e1500f8e", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731574800000, - "slot_end": 1731576600000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1GmwHGEiPwMU4yfyA7ipBeOYh8M7CK0BgtepZdbx3JFA", - "resources_slides": null, + "slot_start": 1731577800000, + "slot_end": 1731579600000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/10vnF2ObOK5u8Z8XcfbB0o6Q0DIS1LwGHZA_ieNhsIXg", + "resources_slides": "https://drive.google.com/file/d/188K8egWhH8b_3TPWp4zFMKL7Qu9OfUQt/view", "speakers": [ - "vishwa-mehta" + "nicolas-lara" ] }, "vector": [ @@ -686531,16 +684596,8 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -686705,7 +684762,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -686733,6 +684789,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -686740,6 +684797,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -686747,6 +684805,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -686781,7 +684840,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -686879,6 +684937,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -687049,7 +685108,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -687158,6 +685216,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -687166,6 +685225,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -687251,7 +685311,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -687260,6 +685319,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -687269,46 +685329,48 @@ }, { "session": { - "id": "solo-staking-in-the-dark-forest-a-survival-guide", - "sourceId": "REJ3SW", - "title": "Solo staking in the dark forest: a survival guide", - "description": "Solo stakers are key to keeping the Ethereum ecosystem geographically decentralized and censorship resistant. But PBS leaves solo stakers extremely vulnerable to a variety of narrowly targeted DDOS attacks, made possible by public information on the p2p network. This talk will explain why privacy matters on the p2p layer, provide an overview of the attacks solo stakers would face in PBS, and demonstrate some of these in a sandbox environment.", - "track": "Core Protocol", + "id": "sovereignists-vs-globalists", + "sourceId": "ZHQPKA", + "title": "Sovereignists vs. Globalists", + "description": "Sovereignists vs. Globalists is the real battle we should be fighting.\r\n\r\nFundamentally the goal of the space is to be Sovereign. I think very few people came into the space with the idea that well we should all rely on a single, one World government to control everything we do. But rather how do we give users a choice about what kind of systems they actually interact with on a day-to-day basis.\r\n\r\nWhat we should be thinking about when building truly decentralized truly resilient systems, is how to", + "track": "Cypherpunk & Privacy", "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Stakers/Validators", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Staking", - "Privacy", - "Security", - "MEV", - "metadata", - "MEV", - "Privacy", - "Security" + "Decentralization Improvements", + "Digital Sovereignty", + "Emergency Plan", + "resiliency", + "technology", + "Decentralization Improvements", + "Digital Sovereignty", + "Emergency Plan" ], "keywords": [ - "Metadata" + "Vision", + "future", + "resilient technologies" ], - "duration": 582, + "duration": 589, "language": "en", - "sources_swarmHash": "2d1f8fe35ffd0fab6e8af4f1e2723e4a8f364a230dce06f962976bdd2be70268", - "sources_youtubeId": "ZHXWCH6N9tQ", + "sources_swarmHash": "d3f169f1c5a963a6d1fb5a2c42bc23a601f8fc35e83d4ce8cc99511927430f94", + "sources_youtubeId": "P34RfDISJRc", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736d5be74749a4b892d57fc", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d5be74749a4b892d57fc.vtt", - "transcript_text": " Hello, hello. Yeah, I guess it's my turn. Yeah, surprise title. Actually, I'm going to talk about why Dark Forest is even more dangerous to solo stakers with an attack that strikes at the heart of the Ethereum validation. So together, actually, we're going to play as an attacker. And we're going to find out a validator or a solo staker that actually uses MEV boost, which is next to their consensus client. In order to find them, we actually are going to do the exercise together. But once we find them, what we're going to do is we're going to disrupt the block production of the solo staker and stop their rewards and manipulate the Randall value which is responsible for assigning new block proposers. So my name is Q, I'm the decentralized technology architect at Hopper. At Hopper we build privacy preserving infrastructure. So we'll actually use a sandbox environment for demo purpose, but there's nothing actually preventing this attack happening in real life right now. Our sandbox actually has three consensus clients with a total of 192 validators in a kurtosis network with also one MEV relay and one block builder. For now, those validators are just a list of anonymous public keys, but not for long. So while this test has been running, let me explain how we actually conduct this attack. So first, we actually need to de-anonymize validators to identify our target. So validators actually need to attest block productions, and those attestations are propagated into the peer-to-peer network in GossipSub. We can actually just silently observe those attestations to create a probabilistic correlation between the validator's public key to the IP address of the consensus layer client. And we actually have this, oh, the test has actually been running. We just let it run for a little while. Block has been proposed. Just leave it there. So now is the moment to actually explain the other information that we need to conduct this attack. We actually need a little bit of help from the trusted map relay. Maybe we're just observing the map, or we're just colluding with the map relay, or we're just the relay ourselves. MapBoost users actually need to trust the relay to deliver, well, basically trust them on delivering correct content of the block on time. However, they need to trust more than that. So that's the moment we look into the database of the relay and look at one of the new data table that we created, which is called metadata. Here, we collect all the metadata of each HTTP request from connected validators and block builders. So it actually gives us a strong link between the validator public key to the MapBoost IP address, which sits right next to the consensus layer client. And this is just because validators, they have to, they must register with every relay that's connected to. Okay, here, now we have the exercise going on. As an attacker, we just have a dashboard with all the information we need. Right here is the attestation that we have with the occurrence of the attestation that we accumulate with, like, the more attestation we have, the stronger link we actually build between the public key of the peer IDs. And then we know the IP address from the peer ID pretty easily, and then plug the information of the MapBoost ID, MapBoost IP address on top of that. And next question is who to attack. So here we actually just pick slot 246 and 247, but we actually have a wider choice because validators, they are, like the proposers are announced two epochs ahead of time. Right? To launch this attack, we use traditional DDoS technique, introducing the ICAP flood and SYN flood. But just to make our attack a bit more clear, we actually use a memory stress test just directly on this instance to obtain the same result. And here we can see that block 246 and 247, they are skipped. And now it's time to check the rundown value of the slot 246 and 247. They're actually the same. So congratulations, fellow attackers. We managed to stop the block production and also control the Randall value. Yep. So if I can skip to the last slide. Yeah, so here we just demo the attack as an attacker gene. I welcome everybody who are interested in this topic to further discuss the implication and improvements on how to avoid this attack. We're going to have a report released soon on the Etherchurch forum, as well as a discussion that's going to be held at 1.30 later at the blue discussion corner. To close my talk, I would like to give a heartfelt thank you to Ethereum Foundation that gave us a grant to conduct this research. Thank you very much. Thank you very much. Thank you very much, Q, and sorry for missing your name. Now, are there any questions? Come on, don't be shy. So just to clarify, so we were able to skip the solo sticker slot here? Is that the attack, or was it just the random manipulation? So we actually managed to make the solo sticker not being able to propose at the slot that it was given. So by doing so, we indirectly actually manipulate the rendout. And like, is there any, what do you think are the financial implications of this? Do the big staking pools be doing this to maximize their rewards? Actually, good question. Because there was one of the further discussion that we had around this experiment is that we believe that the current definition of MEV, which is a single, we always look at just one, whatever transaction that's been included into one block. This narrow definition, which is also the definition written on the Ethereum, like the page of Ethereum Foundation, is a bit too narrow. So by kicking out the production of one block, we can effectively have this skip slot MEV. Just an example, like some people would like to have this multi-block MEV, right? This is one of the attack of having that to achieve this multi-block MEV. And also, it just basically, it also creates a threat on the resilience of the network, right? If you can easily identify a block proposer and then just kick them out at the place where it's supposed to produce a block, then does it mean that, especially for solo stakers who have very poor, let's say, who doesn't really have access to strong network protection, does it mean that those percentage of solo stakers of Ethereum network, they are at risk. Last question, please. How difficult is it to obtain the IP address of a certain validator and what is the role of MevBoost in this kind of process? Yeah, thanks. It is very easy, actually. Right now, you can just modify a little bit of your, I don't know, Lighthouse client. Then you can collect attestations by just dumping them into a database. Very simple data analytics, and then you can have this correlation. So the longer you run that, the stronger link that you have it. Even though there's some technical details about how we're actually going to tackle the aggregated attestations. But yeah, you know, you're going to figure it out. And we also know the pattern of gossips up. So combining these two, the analytics is actually pretty accurate. And the role of the map relay is that because solo stakers, let's say they already invest into a hardware, right, to, and also put into some stake to run their own staking setup, their goal is, one of their main goal is to make sure that they have economic returns. And very likely they're going to introduce whatever that helps them to build the most profitable block. And here right now we have, thanks to your research, Tony, is that we know that there are more than 90% of the validators, they are using MapRelay or MapBoost architecture. And Relay, which is an obvious, very obvious single point of failure, no, let's say a single entity, like a central entity", + "sources_streamethId": "6736df8b74749a4b899627d6", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736df8b74749a4b899627d6.vtt", + "transcript_text": " Perfect. Oh, wow. Thank you very much. So, today on the last day of DEF CON, I'm going to talk about Sovereignness vs. Globalists. This is very much a talk that should get you to think about the ethos that we all follow behind building in this space and what specific sort of implementations we should be optimizing for. So first of all, the goal of the space is to be sovereign. It is to be able to control your own assets, to freely move around, to have censorship-resistant assets, censorship-resistant assets. This is fundamentally the goal of the space is to be sovereign. And with that, I would start that a single decentralized network is still centralized. And we'll do a nice Q&A in a sec on whether you agree with this. But fundamentally, if there's something, a single of something, you cannot leave, right? Like a single of something can always become compromised. So a single decentralized network is you cannot leave. A single of something can always become compromised. So a single decentralized network is still actually very centralized. From a physical perspective, it won't survive World War III. And this is sort of like, I think when you think a little bit about this problem, like neither Ethereum or Bitcoin or any other global consensus is really going to be surviving World War III. It just seems unlikely that we're going to spend a bunch of bandwidth on global consensus for really going to be surviving World War III. It just seems unlikely that we're going to spend a bunch of bandwidth on global consensus for magic internet money when all global fiber has been cut. From a social perspective, it can be captured. And so as a result, we shouldn't design blockchains like a single one world government. Quick show of hands here. Who thinks that a single one world government is a good idea. Yes, this is always zero. And so no one ever seems to think that a single one world government is a good idea and so we shouldn't be designing our systems sort of in the vein of there's a single global security model to rule us all. Because fundamentally, different people have different requirements for their security models. So my parents live in Germany. They trust a local government, they want something that the local government can sort of interject and revert. This is fundamentally the trust model they care about. If you live in Argentina, you don't. You probably much more prefer global security, independent instances. But you still want to, like, if you travel to Germany, you still want to be able to roam into the local security zone. For myself, I prefer the Ethereum mainnet over my local government. But security and the security model that you care about is a fundamentally personal choice. And we should be designing these systems to work for everyone in the world and not say, well, everyone must have the same notion of what security that they care about. The really important part here, though, is we need to maintain composability and allow applications to roam. So it's not a viable option to say, well, everyone install the next 500 iPhone applications in order to be compatible with all these different security environments. Phones in the real world actually did this really well, where I could buy a phone in Switzerland. It still works here, it still works in the US, it still works in China, right? Like, I can use the same fundamental interface to interact in many different specific security models. This also brings me to why we want to be thinking about this from a scale-free network perspective, and why this is the only true scalable way to do this with local sovereign security, because fundamentally, states should only be co-located together when it's valuable together. Like, not all financial transactions in the world need to be under the same security model. They should be under different ones, where, for example, we are physically in proximity. So all financial transactions in Bangkok should be locally settled to Bangkok. And this is also the strongest argument for why this will always be faster than Solana. Like Solana can never beat us here because like they cannot violate the laws of physics around speed of light. So like if we do local settlement, we will always be the single possibly fastest thing. And yeah, you only want to have to opt into co-location of state when you actually have the need for it because it's expensive. And so yeah, the basic example is that users and applications can roam between instances. So sometimes I want to be on global Ethereum, sometimes I want to be on local Bangkok, but I can use the same fundamental interfaces and the same type of applications to roam across all these different security models. And this leads to a very nice other thing is that everyone becomes their own chain. Because we can get rid of the weird abstraction barrier between users and chains where users are just one-on-one chains. For example, Circle is a good example of this. Circle, no matter how hard they pretend not to be, is a blockchain. They're the blockchain that issues USDC on a one-on-one key. Fundamentally, when I think about why I joined the space seven years ago, it's because we want to build for World War III. Like we want to build World War III resilient infrastructure. Because fundamentally, the world is becoming more unstable. It's becoming very multipolar at the moment. And so the infrastructure that we built in the past sort of won't survive a dedicated attack. Like all our past infrastructure was built around Pax Americana, and it's not resilient enough. So we need a replacement upgrade for existing coordination infrastructure. Because on day one of World War III, global connectivity will just stop. This is, I think, this needs to be treated as given that global fiber is going to go away and most satellites will drop out of the sky. The other thing to consider is that privacy, we need this not to hide something, but fundamentally for national defense, community defense. Think of privacy as something that you need to have in order to protect your community from the outside world. Yes. So I will quickly skip over this. But fundamentally, local instances must work regardless of global connectivity. And so you want to be as local as required, but as global as possible, depending on the underlying physical networking characteristics that you find. One of the best examples for this is, oh, if you're in this room, you're in charge, very fundamentally. You should be running your own nodes, you should be running your own infrastructure. If you sit here, you have to provide digital infrastructure for your parents and your local community. No one else is going to do this. A good example of this is Ham Radios in Zug, where a bunch of volunteers formed the Ham Radio Club that provides six-hour emergency backup, like within six hours they have emergency backup comms online to in case connectivity drops in Zug, where I live in Switzerland. So they have like these trucks with generators and antennas, and they drive to the mountain peaks in order to restore local area networking again for emergency services. That's a quick TLDR. A single decentralized network is still very centralized and a single global security model is fundamentally terrifying. And fundamentally, you have to run your infrastructure. If you rely on Fiori, you're doing it wrong. And privacy for national defense. Yeah, we've got to be building World War III resilient infrastructure as a framing. Thank you very much. Thank you very much, Adrian. So, if there are any questions, please raise your hand. I will toss the mic to you. Don't worry, it doesn't hurt. Soft, again. Doesn't break. Let's go. Almost. All right, but besides the Sook network, what are the possible practical implementations of this approach? For example, if we lose connectivity, we should still be capable of forming a local coordination, effectively local on-demand chain, to just use within this room. Even if we just want to outside of the World War III example, but in the more specific example, we want to play a trading game, or we want to do local trading. We should run this on our own chain here, because this is going to be the single fastest thing you can do. Whenever you care about latency, you want to really strongly consider what kind of state you need to co-locate next to each other. And if you need to have fast latency or low latency, you need to be as close to each other as possible. Good question, by the way. Are there any more questions for Adrian? Have one? You got it on the back. How do you deal with the double spend problem if you can't reconcile global state? So you can tax state, and again, very good question, actually. Most people don't think about this. But so you can tag each individual piece of state to be, and I skipped over this in the slide, to be controlled by a specific state controller. And for example, if you want to do local settlement, and we currently have all our state and global Ethereum, we'd move it temporarily into the custody of the local state controller. And this could be a blockchain run by, I don't know, 100 people here, or like the 4,000 people at the conference. And then whenever we want to tear this down, we can move our state back out. This requires a little bit of thinking around how do you do state layouts in these databases that we ended up calling blockchains. If you want to learn more about this, I suggest looking into the Anoma resource machine, which is a very nice resource model where every individual piece of state, it can be complex state as well, like a smart contract, is actually tagged as an individual UTXO that can be moved around to different controller instances. Very cool. Thank you very much, Adrian. Thank you very much.", "eventId": "devcon-7", - "slot_start": 1731639900000, - "slot_end": 1731640500000, + "slot_start": 1731648600000, + "slot_end": 1731649200000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1d-GmGcNLmt1uMkzzdpBPgSsDGcejG31g_wfOtXcVIvg", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1Ce0TClLRzVeI_KHk3Q7wjGn9iUM0mxltuQHeo2UgQuw", + "resources_slides": "https://drive.google.com/file/d/1whtOkyBrKZ67tAcpHy85M2XKaajSgL2s/view", "speakers": [ - "qianchen-q-yu" + "adrian-brink" ] }, "vector": [ @@ -687316,6 +685378,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -688063,7 +686126,7 @@ 0, 0, 0, - 6, + 0, 0, 6, 0, @@ -688109,6 +686172,35 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -688179,7 +686271,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -688235,6 +686326,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -688269,6 +686361,38 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -688542,7 +686666,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -688562,12 +686685,14 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -688575,6 +686700,41 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "speed-hacking-challenge", + "sourceId": "RSYU7K", + "title": "Speed Hacking Challenge", + "description": "​Prize Pool: $50,000\r\n\r\n​A High-Stakes Speed Hacking/ CTF Challenge\r\nAre you ready to dive headfirst into a thrilling web3 adventure? Join us for ETH Escape, a heart-pounding Speed Hacking & Capture the Flag (CTF) challenge designed to test your coding skills and problem-solving abilities on Ethereum.\r\n\r\nhttps://lu.ma/viyjky8t", + "track": "[CLS] ETH Escape - Speed Hacking Challenge", + "type": "Mixed Formats", + "expertise": "", + "audience": "Engineering", + "featured": false, + "doNotRecord": false, + "tags": [], + "keywords": [], + "duration": 8930, + "language": "en", + "sources_swarmHash": "bc7eb48bd3a673d7dc1c4deec5b1496ac7f9e02af789aead2f3e359692311eab", + "sources_youtubeId": "CRMPai0pUpw", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673cc095982f234a126a635e", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673cc095982f234a126a635e.vtt", + "transcript_text": " Thank you. Thank you. Thank you. Thank you. 15 minutes in, 90 minutes remain. No solves yet. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. All right. We have our first solve here at minute 76. Ddust2, Ddust2, where are you? Congratulations on getting this started. Kicked off here in the finals. You've got the lead. How does it feel? A little bit nervous, actually. A little bit nervous. What is your hacking spirit animal that you try to inhabit while you hack? Alright, we'll go with the elephant. They're very smart. Thank you. Thank you. Thank you. Second solve, NP Hard. We've got two people up on the leaderboard to take the top two spots so far with 72 minutes left. NP Hard, where are you? Remind me. Alright, good work. What did you eat for breakfast that's making you so successful today? Power of Corgi. Alright, love it. Power of Corgi, and the shirt says it all. Let's see, you can jump into the number three spot. I'm betting it's happening soon, and then it's gonna be a race so you could finish the next two challenges. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. In the leaderboard, third solve. Looks like they went with a different challenge to start. So one, now in first place, currently poised for $10,000 rewards. We've got DDoS2 in second place, currently poised for $10,000 rewards. We've got DDoS 2 in second place, currently poised for 7,500 rewards, and NP-Hard for 5,000 in rewards, currently as it stands. Where is one located? Top of the leaderboard. Where are you? Is that you? All right, they're staying in on. Okay, fair enough. 65 minutes remain. Remember if we have three people solve all three, that will end the clock before the remaining 64 minutes. away guys. Thank you. Thank you. Thank you. Thank you. One hour remaining. One hour remaining. One hour remaining. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. We've got a real race for the prizes now, Rage. Congrats for getting on the board. We've got three folks who have solved one particular challenge, and then one at the top of the leaderboard who solved a different challenge. So wide open for who's going to get the rewards but again as it stands now we've got one taking home ten thousand dollars in addition to their winnings already 7500 for ddust2 and np hard would get five thousand dollars in this round so anybody's game still 51 minutes and 45 seconds left. Keep hustling. Everybody's doing a great job. Thank you. Another update to the leaderboard. One pushing ahead by solving the second challenge. So on their way, it's possibly capturing the $10,000 grand prize. A reminder that places four through 14 in the finals all receive the same amount. So if we have a top three that solve all three before the timer's up, the finals will be over. Thank you. Thank you. Welcome to the Leaderboard Mage Intern. It's heating up. Who can get into those top three spots for the big prize money? 47 minutes to go. Thank you. Thank you. Bill joins the leaderboard in position two to shake things up a bit in the top three. MP Hard drops to number four. So good work, Bill, getting in there for the $7,500 spot as of right now. Thank you. Thank you. Thank you. Thank you. Welcome to the leaderboard, Slipper. Slipper with two solves very quickly. So put them into second place now over Bill. So it's heating up. We've got two folks with two solves. The first one to get the last one is gonna take home the $10,000 prize unless someone can solve two before they get to that last one. With third place wide open still, but being held by Bill as we speak. Just under 40 minutes left of the final round coming into it before in case it ends as people start to solve things. I want to give a big thank you to the Ethereum Foundation. I want to give a huge thank you to our friends here at Friendly Maltese Citizens putting together these awesome challenges. We appreciate all the hard work you did on that. And then we also want to give a huge shout out to the upcoming Ethereum Protocol Attackathon with rewards up to 1.5 million and the wonderful support that we've all received from Bybit, Wormhole, Arbitrum, The Graph, GMX, and Base. But thank you again to friendly Maltese citizens. You guys did a great job to create a great event. So keep it up, everyone. Keep your heads down. Coming in on the home stretch here. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. 30 minutes remaining, 30 minutes remaining. Thank you. Thank you. Bye. Thank you. Thank you. Thank you. Thank you. Thank you. The only change to the leaderboard we have is Bill getting a second solve a few minutes ago. That means that our 1, 2, and 3 right now are all racing to get that third solve. First one to do it gets the $10,000 grand prize. Coming down to the wire, we have 22 minutes left. Three people in the driver's seat to stay in one two and three and Plenty others behind you can certainly get up there as well. So keep it up. Let's see what happens Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. All right, players, we have 15 minutes left. And as a token of my appreciation for letting me bother you all day while you're trying to focus, we will release a hint right now to see who can try to take home the prize in the last 15 minutes. Thank you. There were typos in the first hint, so there's a second hint out there now to clarify. Thank you. As it stands now, we still have one in first place with $10,000 prize. As it stands now, $7,500 to Slipper if things stand as they are. And Bill would take home $5,000 prize as it stands now. $7,500 to Slipper if things stand as they are and Bill would take home $5,000. The rest of the pool up to the rest. However, it does seem that if there's one solution to one of these, to one of the ones that has not been solved yet, that person can easily jump into first place if there's no other solvers. So anybody's game in this room in the last 12 minutes, good luck. Thank you. Thank you. Ten minutes. Thank you. Thank you. Thank you. Thank you. Thank you. Five minutes remaining, but we can now say with confidence that Slipper is the inaugural champion of the DevCon CTF and will take home the grand prize of $10,000 in addition to what he's already made today. So $10,000 plus what he's already made and the physical gift we'll be giving him. Huge hand for his work today. There's still a few minutes left to see who can take the other spots because it's wide open with one more solve for pretty much anyone on the board right now. Thank you. Thank you. Thank you. Five seconds. In five seconds will anybody be able to get a solvent. Thank you. All right, pencils down. A big round of applause to the whole group here. Everyone in this room should be pretty proud. That was a lot of people that came in this room and did some speed hacking today. And you are the best among them today. A huge congrats to Slipper. Where are you? Slipper, come on up for a minute. Congratulations on winning the whole thing. I have a question for you. You took a little bit of a mitigated risk there in holding two answers at the same time it looked like you submitted them back to back. Was that intentional strategy to put pressure on one? So I saw that I had already submitted the one solution, but it turns out I was not shown on the scoreboard. So I just realized I didn't submit the one successfully. So it looked like a brilliant strategy that was actually a brilliant accident. Congratulations, you did a fantastic job today. How do you think you'll spend the money? How much would I get? $5,000, because you and I will split $5,000. No, $10,000 plus what you already got from the previous round. So you'll get a little over $10,000 and a gift from us at the awards ceremony later. I haven't decided yet. But I would thank ChatGPT for helping me. So I would pay for them. So a nice donation should go to ChatGPT for helping during the competition. Congratulations to you for being the inaugural winner. I want to shout out really quickly again our great friends over here, the friendly Maltese citizens for putting these challenges together. I think they did a really great job of putting challenges that were of all difficulty types and managing the event and the platform. So we really appreciate their help here, our team here as well. I want to thank the Ethereum Foundation for allowing us to run this, the first one they've done, I believe, at DevCon, and hopefully not the last, and for being our partner in doing so. And I really want to thank the, as you know, the upcoming attack-a-thon of the auto competition for the theory and protocol, the entire things in scope. Saw a lot of really talented people in this room today who I think could do a really great job working on this project that starts in about 12 or 11 days. Eight weeks of hunting, a ton of educational materials to get you up to speed, and the sponsors of that program who are supporting it we'd also like to thank, which is Bybit, Wormhole, Arbitrum, The Graph, GMX, and BASE. So a huge round of applause I also want to I want to congratulate our second and third place winners for respectively earning seventy five hundred dollars and five thousand dollars so that's one where's one raise your hand one stand up one congratulations Congratulations. And then Bill. Where's Bill? Bill, congratulations. Winning $5,000 plus what you did earlier today. So really great work by all of you. I think you're all coming out in the black today by getting some rewards and cash. We also have a reward for you you later which we'll do at the Award ceremony here After we do a little bit of a fireside chat Regarding security So hopefully you'll stick around for that so you can collect Your gear and then get to Our team with Your wallet numbers if you haven't already done that So you can collect your USDC in the Next few weeks, the last thing is a lot Of the folks in this room, which is amazing, didn't even sign up until they walked up today and just came in and dominated. So really great job on you all for showing up, coming through here, getting into the finals, making a few dollars. And because of that, I don't believe you got swag packs. So we have swag packs for you as well. Ash if you could raise your hand over there He has them for you. So make sure you collect those as well before you before you depart But yeah, thank you all for participating Find anyone in an immunify shirt to give some feedback to because we want to make sure we give feedback to the etherium foundation So when they run these again, they can kind of decide, you know What the best are out to do and get some good feedback from you all of what's too hard, what you would have changed and we'll try to adapt it that way. Thanks for participating in the first one and have a great day. Stay tuned for the fireside chat. Thanks gentlemen. Thank you. Thank you. Thank you. so Thank you. All right. We'll start the fireside chat here in about three minutes, folks. Thank you. Thank you. Thank you. Thank you. Thank you. All right, folks, we're going to get started here in a minute. If we could actually just get folks to sit down to the left or the right and not in front of the camera that would be great. Thank you. Just a reminder to all finalists who just participated. Quick reminder to all finalists who just participated. Please stay here for the award ceremony so we can give you your gift. And ensure that you've talked to our team and given us your wallet address so we could deliver your reward money. All right. With that, I think we'll start to get the fireside chat here going since we got the live stream. And what I think we'll do first to make it quite easy on ourselves is just introduce who we have here you've heard me talking all day so enough of me but I'm Mike O'Keefe I head up sales and customer success at Immunify which is CrowdSec bug bounty programs and audit competitions and I will let's just move left to right to start. Alright hey everyone my, everyone. My name is Michael Llewellyn. I'm the head of solutions architecture at OpenZeppelin. I focus a lot on working with our security audit team with some of our top clients, working on kind of weird, out-of-the-ordinary sort of audit requests, maybe things that are outside the EBM, and then also do a lot of work on security operations for DAOs, security councils, and lots of other very particular things for the industry. Hello, my name is Neville. So I'm a program analysis expert primarily in management in B-Dub. So I'm also a co-founder. My time is spread throughout, you know, like engineering, management, sales, and a lot of operations as a co-founder. But yeah, I'm a security researcher at heart. Hey, I'm Luna. I'm the co-founder and CEO of Zellick. My background is in vulnerability research, reverse engineering. So I used to do iOS zero-click. That's the kind of background I have and then yeah nowadays I mainly just do Zollock with my co-founder Jazzy we recently acquired a company called Code Farina you might have heard of it I worked very closely with that team as well now so that's kind of what I do yeah I'm Pietro I'm a lead auditor at Chain Security and we are an auditing company based in Zurich who specializes in complex DeFi projects and we've audited a bunch of projects that you might know, Maker, Curve, Lido, Polymarket. We've got bugs if you want later. We find them and we package them. And we specialize on EVM ecosystem. Awesome. And so thank you all for joining the panel. Thanks for coming here today. Nice way to end the long day of deep focus these folks had. And I think it's been great because I think Immunify has worked with all of you in different capacities throughout the years. So it's a great collection of folks here. I'm going to start with one that's a little off script that we hadn't talked about yet. It's Pietro. Pietro, you participated today. What did you think of the CTF? What did you think of the strength of the challenges and how you felt you performed? Yeah, so I'm not too satisfied with how I performed. I review code eight hours a day all year, but I very rarely deploy anything, very rarely interact with RPCs myself. So I found the bug quite rapidly. I was in round three, and I think they were very easy to spot. But then I had some trouble with the on-chain part. It was an interesting challenge still, but I'm curious to see what the other rounds were about. There were six with different challenges, and the finals also seemed very interesting, but I haven't read the code, so I'm curious to see that. Perhaps a team format would work better for you next time. You spot the bugs, and then you hand it off to a teammate. Yeah, probably, or just taking a couple of hours to get used to deploying stuff beforehand also would have worked. Well, we were thrilled to have you participate. A lot of the audience today, a lot of the participants in the CTF today, which was fantastic, were developers, security researchers, auditors. We had a really great mix and many others, I'm sure, as well. So I think keeping that in mind, keeping the audience in mind when we go through the next few questions over the next 15 minutes or so, we can kind of cater to those audiences however you please and whatever angle you guys want to come at it from. But the first thing I'd like to ask is a lot of these folks are involved in CrowdSec security. That's kind of what the, you know, what the Ethereum protocol attack-a-thon will be about. I'm curious how you all see CrowdSec security next to your businesses and how you operate. And maybe where I'll start actually is with you, because you all recently acquired Coderina. so you obviously see some value next to the audit So i'd love to hear your perspective Yeah, i mean the main reason we acquired code arena is we just think we can deliver like better experience better security for our customers The main thing that i would show is this concept that we have introduced called audits plus So when you have a consultative audit you're're going to get, like, you know, a small number of very highly skilled people, and they're going to look at the most important parts of the protocol, but it's fundamentally a time-boxed audit, right? So they're going to get maybe one week, two weeks, three weeks to look at this thing, so they can't afford to look at every single, like, gas optimization that there could be, or like, oh, no, this function's not documented, they need to go find and make sure that the critical invariance of the protocol are firm. Now, on the other hand, a lot of times you do want to get information about all the possible things that could go wrong, and not just that, you might want to have, you know, hundreds or thousands of eyes on the code base, because that is what you want before you not just, you know, ship it to mainnet, but also before you put it in front of a bug bounty, because that would be very expensive. The earlier you catch a bug in your development lifecycle, the cheaper it is, whether that be at the integration test or the unit test or the code review, internal audit. The earlier you catch these things, it's cheaper. So you want to get a consultative audit because you can have a firm authority say like, okay, yeah, we looked at it and we're pretty sure it's secure. And then you want the additional reassurance of like, okay, not just that, we've got a crowd of people that looked at it and it's definitely been very thoroughly picked apart. So because of that, we think that consultative audits and competitive audits or, you know, crowd security, these two go hand in hand in creating something that's extremely high assurance. So that's kind of what we think. We think that crowd security is very important. Yeah, that's why we call it crowd arena. Wonderful. I'll open it up to the rest of the panel for anyone that's got a response to this question as well. Well, crowds, this kind of security is really, really effective. And it's really enabled by the community. I mean, listen, everyone is motivated to learn new technologies, make a name for themselves, and also do the right thing, and finally make a little bit of money. And especially for bug bounties, which is the last line of defense, I mean, having participated in bug bounties myself, I feel that you are highly motivated, you know, like to spend a whole night looking for a bug or something like that. So, yeah, it is highly effective, you know, in certain contexts, but it really depends who's there, you know, looking at your code. To be fair, White Hat program is the last line of defense? Right, but that's something that I wouldn't do because of legal issues and things like that. Sorry, I was just posting. I can't help myself. Pre-deployment, post-deployment. Yeah. It's a model we've definitely seen a lot of our clients start to work with. Like we do mainly consultative audits, slash private audits, lots of different names for it. We expect to continue to see demand, but we've definitely seen a lot of clients that are specifically integrating us into a broader process where we're typically doing a private audit or consultative audit, and then they plan to later go to a crowdsourced competition. So it's good from the perspective if we did, by some chance, which is always some chance, miss something, like there is additional layers of security that are going to potentially catch that, so it's not all on us. And potentially there's several private audits as well. And then afterwards, we've actually found ourselves working more and more with clients in the fixed review process of competitive audits because of the fact that you can't have a lot of issues coming in. Even if they're low, that could be applied to the code base, and having someone who's already seen that code base and picked it apart a bit can help the fix review process as well and ensure the code is as secure as possible before it goes to mainnet. I do expect this is going to be a model that continues to grow, and there's going to be probably a lot of coordination between both private and competitive audits and the ways that clients use them. Yeah, so we work with traditional audits, but we also recommend a competitive audit if you can also afford it. We have seen the class of bugs that are code are not necessarily the same between the two. Competitive audits don't guarantee that you will have good people, but in general you have a high probability that people who are skilled at what you're doing will look at it and they will find specific things in which they are very proficient. What we do is really more trying to cover everything. And we try to give guarantees in the sense that we know what we understand and we know what we don't understand at the end of an audit. With a competitive audit, it's harder to know what's been seen and what's not been seen. Yeah. And one thing we really like about the collaboration between an audit firm, like OpenZeppelin, Chain Security, or Zelleck, for example, and a competitive audit is that, for example, you do the private audit and then the researchers who are assigned on that private audit, now they have a very high context understanding of the code base, and they can prepare the notes that then arm the, or equip the community auditors to be even more effective than they would if they had just been going in blind, so to speak. They can be like, hey, watch out for these parts. These parts are especially important to check, but of course, don't let that rabbit hole you. One more thing I'd add is if you start with a private audit first, you're more likely to be told you're not ready, go back and try again. As opposed to a competition that's just gonna say, all right, it's payday. So, probably do the private art first. 400 issues. So true. Excellent point. So I'll skip around a little bit, especially for the audience who I know is here since I surveyed them earlier. Particularly for developers, what are some of the best practices you see as auditors that you wish were more widely adopted? And why do you think they may not be widely adopted? So I think developers often think about security in a later stage of a project. And they're not thinking about their code in an adversarial way from the get-go. They should do more code reviews, internals. They should do more internal audits in a way where some part of the team tries to break the protocol. I think this is very useful. Yeah Two things you can do today To improve your security Is 100% code coverage I know you guys out there You guys do not have 100% code coverage I know And the other thing is Write your functions I read this great blog post by I think it was Nillian I think they're totally right You should do function requirements and then checks and variants and then protocol and variants at the end. So you should just assert that the, like if you have like a, you know, CFAMM, just assert the constant function is still constant at the end. Or like, you know, like the whatever solvent, that kind of thing. If you just throw in a bunch of assertions at the end, it can just kill a lot of attacks, or at least mitigate them. What might have been a crit might now be a medium. Yeah, so one other thing that I wish people do a bit more is more static analysis and more formal verification. We don't see this as much, probably because it's a little bit hard to use some of these tools. Also, these tools haven't evolved, haven't benefited from enough money for R&D at this point. And also, the underlying language changes so much, so it's hard to make them really, really spot on. But yeah, it might also be a cultural thing, and this also like this is like offloaded to towards the auditor so the auditors do use these tools but like the developers themselves don't use them as much currently yeah i definitely put like a big plus on invariants and just like doing it throughout the process like both defining them building a threat model around it testing against it and even when you're going to production like monitoring and looking for for flaws in the invariance occurring later. Another thing that I think doesn't happen nearly enough is for people to include the access control and deployment process and other things, if not in the scope of the audit, then at the very least being very thorough about how that's going to impact the code. We have actually seen things that we were basically putting down as a credible vulnerability because we saw in the documentation about how they were going to deploy the code in a way that was basically going to wreck them. They actually were arguing with us saying, oh, it's not in the code. I'm like, well, it's in the docs that was in the code that was in the scope and it would have wrecked your protocol so we're going to count it. I think people, including the deployment process, is almost part of the code and scripting it and being very careful about how the keys that are signing deployments being done, using a multi-sig or something else to handle that process. And yeah, never putting yourself in a position where some EOA key on some developer's laptop is hopefully not going to get phished and wreck the protocol. And I wanted to add on to what Pietro had to say. It is true, I think all of us will empathize with the fact that a lot of teams, essentially, this is very common, and I totally understand why they feel that way, because they just need to, you know, go to market. They think of security in, like, a, oh, we'll just do it at the end kind of way. But that's really bad. It's actually more expensive. it's more slow because you're going to get an audit by one of us because all these people here are very good firms. And we're going to tell you, like, hey, your design does not work. You're going to have to rewrite this entire component, come back in two months after you've rewritten it. So before you thought you were going to launch in December and now you're launching in March. So it's like tax planning. It's like post hoc tax planning. It doesn't work. It's really bad. You should think about your security as you are building your protocol at the design phase. Yeah. Yeah. And something you said earlier is the biggest ROI splash you're going to get is catching that stuff to the left of the audit beforehand, which is not as sexy as the bug bounty business or just giving it to the auditors or all of that stuff. But I equated it to the job of Homeland Security. The best job you do to the left of the audit is never going to be talked about, but it's the highest ROI and the most difficult to calculate, avoiding those situations. Sorry. A lot of the work you do initially is also relevant to security, even if it's not security-oriented sometimes. Like, good design is something that makes security assessment much easier, even if you have been thinking about security the whole time. So I would say being good at software development helps security in general, yes. That dovetails nicely into another question I want to ask the panel here, and then maybe we'll have time for a question or two from the audience if they have some, which is talking to some folks here today who are developers early in their projects, some of them being first or second time through this process. which is talking to some folks here today who are developers early in their projects, some of them being first or second time through this process. What's the best piece of security advice you can give them at the earliest stage of their journey of creating something new? Yeah. Oh, sorry. Michael, did you want to go? Yeah, like figure out what you want your protocol to do and what you want it to not do and write them down and then like look at them while you're developing your code and then add tests for them, add assertions for them. Like we find this to be a very common problem in our audits is that we talk to the customer like, okay, so like what are you worried about? And they're like, well, you know, we don't want to get hacked. I'm like, okay, what does that mean? Right? You should know what it means for your protocol to get hacked. So then you can, like, actually think about like, okay, well, I'm worried about this or that. Basically having a real threat model. I'll add to that. Like, definitely 100% threat modeling is, like, where you should start. I think, because the other thing is, like, people like security has to scale with the project. Like, are you going to be able to afford to have, like, a full monitoring infrastructure set up, like, going to deployment if you're, like, you know, you don't have any TVL, you don't have a lot of things. Like, we will still say, like, yeah, you should probably do that. But if you, like, literally can't, you don't have the resources, like, a lot of teams security infrastructure support. But then suddenly in a couple of months, TVL has grown, it's gotten massive, there's a lot more value at stake. They never really thought through how are we going to upscale our security. I've seen people that will build up their access control and hard code in something like an EOA owner and they're like, well, maybe we should transfer it to a multi-signal. And they're like, actually, we don't know how to do that. We didn't prepare for it. That's a particularly egregious example that does exist in a billion-dollar protocol. Try looking for it, where someone just hard-coded the owner, and you just hope that whoever has that key never gets kidnapped or decides to go rogue. But just in general, be prepared to scale up your security as you have the resources't spend on it today, plan to know, okay, at some point at this level of TVL, let's start taking whatever profits might come from that and invest it in making our protocol more secure. Because most of y'all here, unless you win enough from this competition to pay for a whole audit, you can't get your code audited from a top tier firm, but maybe at some point you do or want to. And be prepared to upgrade and other things. Yeah, like you want to have escape hatches for yourself. It's kind of like lawyering stuff up. Like, has anyone here worked with lawyers before? They're like so expensive. It's like so annoying. So you tell them like, okay, okay, we don't need like this like frigging Cayman Islands, Panama thing. Just like make me the Delaware LLC and just we'll fix it later if it gets really big. But these things are, you know, lawyers are good at like, okay, yeah, we'll just move all this stuff over, it'll be fine. They're good at having these escape hatches for you because people have been making legal contracts for a long time. Think about your security in ways so that you have escape hatches for, okay, now my thing has scaled, how I'm going to keep this secure as it's growing. But you don't want to be dumping tons of money on making your protocol super fancy, secure, early on, because you probably don't need it. Yeah, I would say start really simple. You might have a lot of very good ideas, but really make the minimal protocol you can do, and you will save a lot of money on security, on audits. Reuse valid components that have been audited previously and understand how they work and the audit process will be much simpler for everybody one final thing that I'd like to add is probably what you're gonna be building 99% of the time it's not gonna be original like a lot of people have built something which is similar and you can look at what happened to that thing probably it failed probably it had some security issues probably it had some economic which is similar, and you can look at what happened to that thing. Probably it failed. Probably it had some security issues. Probably it had some economic issues, right, if it's a Web3 project. So go back, see what happened, how it failed, and make sure that your design at the very early stages and also your requirements are such that this will not happen. That's obviously very hard, but if you do it at an early stage, it's going to be much cheaper. Excellent. Any questions from the audience? Any that we could squeeze in one or two questions if people have it? Hi. So I'm building a debit card solution where I'm assuming that once a transaction is signed and went to the network, it will be successful because the whole thing is standardized. But on the parallel, I am running a consensus model which will monitor those transactions if those are successful or not. And if it fails, it will trigger the system and act accordingly. So what things I should keep in mind, like what could be the security threat which I could face? Yeah. So just understand that you're trying to build a system where someone signs a transaction, submits it, succeeds or fails, and this is just for a normal payment. So what we are trying to do, we are providing the transaction time to four to five seconds. So once the transaction is signed and sent to the network, we assume that that will be successful. But on the parallel, we are running a consensus model that will monitor those transactions and build a consensus that the transaction is successful or not. If it's successful, we are good to go. But if it fails, we will trigger the system and that system will act accordingly. Like if it fails due to network congestion or some logic error or something. So what other things i should keep in mind like what other Scenarios of which transaction could fail or how? It's hard to say exactly because i think there's a lot more i Would need to know about the system. But just off the top of my head, I would say there could be an instance where the way that you're, like the node that you're monitoring it from is somehow down. Or like I guess be prepared for whatever infrastructure you're relying on to be able to confirm success or failure. You have like a backup plan if it goes down or you're, like, ensure that someone isn't able to exploit that, making some very general assumptions here. Like, we are using blockless protocol, which have different nodes. And I think those nodes are building the consensus. Not a single node is building the consensus or a proof that a transaction is failed or successful. So I think that wouldn't be the case. But there could be something of which I should be aware about. Again, I'm trying to second guess about how your system works. But one of the things that comes up is whether the consensus algorithm has a finality gadget, whether you can actually tell whether a transaction has been finalized or not, because otherwise you'd need to wait a lot more time than four seconds. Four seconds does sound really really low though. Thank you. Time, one more question if anyone in the audience has one? One question over to the left there. Hi. So I'm not a security researcher myself, but I've seen a lot of hacks that are due to consensus mechanisms and sometimes mass social engineering of a protocol. I was wondering what do you think about how we can basically build mechanisms that are more hard-coded to prevent such exploits? More hard-coded? Yeah, because basically the human factor is more unpredictable. How can we hard-code consensus to be less... Maybe it's a difficult question. When you say consensus, do you mean like, for example, a multi-sig, does that count as consensus? Yes, and also at scale like for a DAO. For a DAO? Yeah, so transparency is very important here because it makes it a lot easier to see what you're actually voting for, for example. I think one notable example was there was one for Tornado Cache where they just proposed a thing and they were like, yeah, it totally does this, and it just did something else. So yeah, monitoring your DAO is pretty good, transparency. Another thing is just time locks. So make it so it's easy to cancel a thing that's in the pending state in the time lock. So this way there's a longer window for things that are potentially breaking to get canceled and noticed. Of course the downside there is you get a lot more latency, so if you have some critical thing that needs to get jammed through, it may make it hard to do that timely. So that's another trade-off. Do you guys have any ideas? Yeah, we've had some really fun experience, and I say fun in a sarcastic way, with DAOs that were under some form of governance attack. In one case, a whale buying a lot of tokens and trying to push through a proposal on compound that would have given them an even larger part of the treasury and then control the DAO. The way they did it was very tricky. They took a bunch of tokens they had purchased, they spread amongst a bunch of accounts. We did actually have security monitoring that picked this up as strange activity. But even then, it's like, okay, so who is a genuine new delegate and who's one of these many accounts controlled by this one entity? There's very few ways to do that in a permissionless system because it's essentially a civil attack in that sense as long as they have the resources to allocate. I think at least being aware, I think ultimately having a reputation in the ecosystem and then having some checks and balances on them and recovery mechanisms. In that case, having a multi-sig that can veto proposals if they're somehow violating the DAO but you have very specific rules, that multi-sig is elected by the DAO, they're accountable. That's where I'm starting to go towards, especially for DAOs. And for multisigs themselves, I think definitely having strong reputation. Possibly even KYC, it kind of depends. People are controversial around that. But I think if you can, if it's appropriate to say, we know these are individual people, do a signing ceremony where they're all in different places and they can verify they're all different people it's not like one guy controlling four or five different keys yeah I think it's like there's no good technical solution to these things these are just like okay having strong social mechanisms to recognize who's a good actor and who's not because it's not as easy as like Ethereum where it's like okay we're running a bunch of nodes that are all coming to deterministic answers And the general idea is just like make sure you have enough notes that they can always outvote the bad notes on the deterministic Answer with multi sigs and nows these are not determined because these are not deterministic answers are like yes This is good. Yes. This is bad. So there does have to be a lot more work on the consensus of who are the actors, why are they allowed to participate in the system, and how do you root out bad actors and prevent them from gaining control. As a shitpost, you could take the MakerDAO approach and you could make the decisions deterministic by offloading it to AI. All right, end of shitpost. It's also an interesting experiment anyways. I'm curious how it goes. So another mechanism I've seen is to make the quorum of a governance mechanism increase in size, the more contentious the vote is, so that the simple majority is not enough. So it's a variable quorum. Yeah, of course, it's like a tradeoff between how easy it is to get stuff done, same with time locks, and how safe it is. Yeah. I mean, on one end of the spectrum, it's just everything's hard-coded. On the other end of the spectrum, it's just like whatever goes. Any other final comments from the panel here today? I know we're a little bit over time, but I want to thank you all for joining. For anyone in the audience who hasn't had an opportunity to work with either Immunify or any of these firms. These are some of our favorites in the industry. There are many others that are great too, but I was really happy to put together a panel with this group because it's really great. Can't go wrong with anyone sitting up here. Anything? Any last words, panel? I think everyone on this panel is really awesome. Likewise. I agree. Very nice of you. Thank you all for joining us. Really appreciate it and hope everyone has a great rest of the week. And with that, I can let the panel go and I think we owe some rewards to our CTF participants today. Thank you all again. Vielen Dank. Thank you. Close out the day. We have here our awards for our wonderful CTF participants. We're going to go ahead and start with Slipper again. What's that? I'll let Slipper show you. All right. So in addition to your cash rewards, again, make sure that you've given your wallet information, your correct wallet information, double, triple check it, so you get your correct payment. But if Slipper is still here, we do have a physical gift for you if you wanna come up and grab it. Slipper, are you still here? Come on up real quick. So in addition to the cash, congratulations again on being the first one. Maybe you can show the rest of the participants what we'll be giving out here to the finalists. So in addition to the cash prize, we've got a nice set of Beats by Dre Studio Pro headphones for everybody to take home. And make sure you get your Immunify swag bag as well, commemorating the event here today. Let me go through and call names up to come get your stuff. I'm going to go in a random order now just because my spreadsheet is a little screwy. Wait, no, something's wrong. All right. Billy, is Billy here? Billy, come on up. Receive your gift or I can bring it to you. Billy? Here you are. Sir, congratulations. A great job done by you today. Chain security also left a little bit of swag here too, so feel free to grab something there. All right. Next, we have Tony. Tony, come on up. Congratulations to you. Way to be a finalist here in the CTF. A lot of hours spent here today. Heads down. Next, we're looking for Naoya. I hope I'm pronouncing that correctly. Is Naoya here? Naoya. Maybe that's the better pronunciation. All right, we'll hold that one off to the side. . I can call it last name. Is that all? I'm getting good username. But are you sure that's it? Yeah, this is from Mixi. No, no, no, thank you. Did you want to go next? All right, yep. Harry. Harry? All right. Congrats, great work today. Good work. Chi-Sue. Chi-Sue. Chi-Sue. Chi-Sue. Great work. You can give us one of these, j or rkskek. We've got somebody that's going by the name of just j. J. The email is rkskek and some numbers. Oh, you got one right. Okay. So that's okay. It's all the same. That's fine. Yep. So that means we're missing a bill still then right? Yeah, it's all good. Yeah, just, you can just, yeah, it's all the same. We can just pass them out. Great. All right. Perfect. Rage next. Rage. Rage, I love the hat. Good work. All right. There we go. Already new. Just need a bag. Congratulations. Great work. Great work. Thank you. great work mage intern mage intern great work M4K2. Congratulations. Vlad. Vlad, are you still here? All right, Vlad. Vlad, I think you even won your group, too, I believe. So congratulations. Nipun. Nipun. Nipun. Great work. The early leader stays on the leaderboard. And Valera, last but not least. Valera. Congratulations. Great work. Great work. Once again, thanks everybody for coming through. Thanks for participating in the CTF. I know it was a lot of hard focus work in one room all day. Please look forward to the $1.5 million Ethereum Protocol Attackathon that will be hosted soon and with the sponsors for that program. And again, a big thanks to the Ethereum Foundation for allowing us to run this event here at DEVCON and the friendly Maltese citizens. Okonami, are you still here? Okonami? Minamoi. Minimoi. And take care. Have a great rest of your week and enjoy the rest of DevCon. Thank you.", + "eventId": "devcon-7", + "slot_start": 1731551400000, + "slot_end": 1731573000000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1TFlUSOJNbrhtdG-u3_ajWbpR--vyfBXX6KSwtcFkFI0", + "resources_slides": "", + "speakers": [] + }, + "vector": [ 0, 0, 0, @@ -688594,6 +686754,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -688628,7 +686789,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -688636,68 +686796,15 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "solving-multichain-ux-lessons-from-cosmos-for-the-rollup-ecosystem", - "sourceId": "QKRCF7", - "title": "Solving Multichain UX: Lessons from Cosmos for the Rollup Ecosystem", - "description": "This talk addresses how we tackled challenges in the Cosmos ecosystem like liquidity fragmentation, multi-chain accounts, and cross-chain contract standards, and how these solutions can be used to improve cross-chain UX in the rollup ecosystem. \r\n\r\nIf time allows, we'll also dig into designing flexible and scalable abstractions for rapid deployment of integrations (bridges, dexs, wallets) across not just many chains, but many diverse tech stacks.", - "track": "Developer Experience", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Developper", - "featured": false, - "doNotRecord": false, - "tags": [ - "Fragmentation", - "UI/UX", - "Account Abstraction", - "defi", - "cross-chain", - "aggregation", - "Account Abstraction", - "Fragmentation", - "UI/UX" - ], - "keywords": [ - "DeFi", - "Cross-chain", - "Aggregation" - ], - "duration": 1470, - "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6735d1009dbb7a90e1500f8e", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731577800000, - "slot_end": 1731579600000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/10vnF2ObOK5u8Z8XcfbB0o6Q0DIS1LwGHZA_ieNhsIXg", - "resources_slides": null, - "speakers": [ - "nicolas-lara" - ] - }, - "vector": [ 0, 0, 0, - 6, 0, 0, 0, @@ -689294,7 +687401,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -689488,7 +687594,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -689496,7 +687601,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -689504,7 +687608,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -689636,7 +687739,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -689916,7 +688018,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -689925,7 +688026,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -689945,8 +688045,68 @@ 0, 0, 0, + 2, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + { + "session": { + "id": "speedrun-rollups-a-beginners-guide-to-l2s-zk-and-wtf-people-are-talking-about-on-panels", + "sourceId": "L3Z78Q", + "title": "Speedrun Rollups: A Beginner's Guide to L2s, ZK, and WTF People are Talking About on Panels", + "description": "The L2 landscape has grown, both in terms of size, but also the development of the tech and the new problems that need to be solved.\r\n\r\nThis talk aims to take you from zero to hero, equipping you with the history, development, and current state of L2s, so you can maximize your Devcon experience without having to carry around a dictionary to understand WTF people are talking about.", + "track": "Layer 2", + "type": "Workshop", + "expertise": "Beginner", + "audience": "Hobby", + "featured": false, + "doNotRecord": false, + "keywords": [ + "ELI5" + ], + "tags": [ + "Layer 2s", + "Scalability", + "ZK-EVMs", + "eli5", + "Layer 2s", + "Scalability", + "ZK-EVMs" + ], + "language": "en", + "sources_swarmHash": "", + "sources_youtubeId": "fJQQZsFYHUw", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "emily" + ], + "eventId": "devcon-7", + "slot_start": 1731389400000, + "slot_end": 1731394800000, + "slot_roomId": "classroom-e", + "resources_presentation": "https://docs.google.com/presentation/d/17fKWm64cWJz5zLVi9Av7ZypNBcbMuJYxb55zQcDbVJ8", + "resources_slides": "https://drive.google.com/file/d/1RM25t1m49nZWSMP9IV8DBxjeB_rcIX7g/view" + }, + "vector": [ 0, 0, 0, @@ -689954,6 +688114,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -690008,7 +688169,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -690020,67 +688180,15 @@ 0, 0, 0, - 2, 0, 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "sovereignists-vs-globalists", - "sourceId": "ZHQPKA", - "title": "Sovereignists vs. Globalists", - "description": "Sovereignists vs. Globalists is the real battle we should be fighting.\r\n\r\nFundamentally the goal of the space is to be Sovereign. I think very few people came into the space with the idea that well we should all rely on a single, one World government to control everything we do. But rather how do we give users a choice about what kind of systems they actually interact with on a day-to-day basis.\r\n\r\nWhat we should be thinking about when building truly decentralized truly resilient systems, is how to", - "track": "Cypherpunk & Privacy", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Community", - "featured": false, - "doNotRecord": false, - "tags": [ - "Decentralization Improvements", - "Digital Sovereignty", - "Emergency Plan", - "resiliency", - "technology", - "Decentralization Improvements", - "Digital Sovereignty", - "Emergency Plan" - ], - "keywords": [ - "Vision", - "future", - "resilient technologies" - ], - "duration": 589, - "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6736df8b74749a4b899627d6", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736df8b74749a4b899627d6.vtt", - "transcript_text": " Perfect. Oh, wow. Thank you very much. So, today on the last day of DEF CON, I'm going to talk about Sovereignness vs. Globalists. This is very much a talk that should get you to think about the ethos that we all follow behind building in this space and what specific sort of implementations we should be optimizing for. So first of all, the goal of the space is to be sovereign. It is to be able to control your own assets, to freely move around, to have censorship-resistant assets, censorship-resistant assets. This is fundamentally the goal of the space is to be sovereign. And with that, I would start that a single decentralized network is still centralized. And we'll do a nice Q&A in a sec on whether you agree with this. But fundamentally, if there's something, a single of something, you cannot leave, right? Like a single of something can always become compromised. So a single decentralized network is you cannot leave. A single of something can always become compromised. So a single decentralized network is still actually very centralized. From a physical perspective, it won't survive World War III. And this is sort of like, I think when you think a little bit about this problem, like neither Ethereum or Bitcoin or any other global consensus is really going to be surviving World War III. It just seems unlikely that we're going to spend a bunch of bandwidth on global consensus for really going to be surviving World War III. It just seems unlikely that we're going to spend a bunch of bandwidth on global consensus for magic internet money when all global fiber has been cut. From a social perspective, it can be captured. And so as a result, we shouldn't design blockchains like a single one world government. Quick show of hands here. Who thinks that a single one world government is a good idea. Yes, this is always zero. And so no one ever seems to think that a single one world government is a good idea and so we shouldn't be designing our systems sort of in the vein of there's a single global security model to rule us all. Because fundamentally, different people have different requirements for their security models. So my parents live in Germany. They trust a local government, they want something that the local government can sort of interject and revert. This is fundamentally the trust model they care about. If you live in Argentina, you don't. You probably much more prefer global security, independent instances. But you still want to, like, if you travel to Germany, you still want to be able to roam into the local security zone. For myself, I prefer the Ethereum mainnet over my local government. But security and the security model that you care about is a fundamentally personal choice. And we should be designing these systems to work for everyone in the world and not say, well, everyone must have the same notion of what security that they care about. The really important part here, though, is we need to maintain composability and allow applications to roam. So it's not a viable option to say, well, everyone install the next 500 iPhone applications in order to be compatible with all these different security environments. Phones in the real world actually did this really well, where I could buy a phone in Switzerland. It still works here, it still works in the US, it still works in China, right? Like, I can use the same fundamental interface to interact in many different specific security models. This also brings me to why we want to be thinking about this from a scale-free network perspective, and why this is the only true scalable way to do this with local sovereign security, because fundamentally, states should only be co-located together when it's valuable together. Like, not all financial transactions in the world need to be under the same security model. They should be under different ones, where, for example, we are physically in proximity. So all financial transactions in Bangkok should be locally settled to Bangkok. And this is also the strongest argument for why this will always be faster than Solana. Like Solana can never beat us here because like they cannot violate the laws of physics around speed of light. So like if we do local settlement, we will always be the single possibly fastest thing. And yeah, you only want to have to opt into co-location of state when you actually have the need for it because it's expensive. And so yeah, the basic example is that users and applications can roam between instances. So sometimes I want to be on global Ethereum, sometimes I want to be on local Bangkok, but I can use the same fundamental interfaces and the same type of applications to roam across all these different security models. And this leads to a very nice other thing is that everyone becomes their own chain. Because we can get rid of the weird abstraction barrier between users and chains where users are just one-on-one chains. For example, Circle is a good example of this. Circle, no matter how hard they pretend not to be, is a blockchain. They're the blockchain that issues USDC on a one-on-one key. Fundamentally, when I think about why I joined the space seven years ago, it's because we want to build for World War III. Like we want to build World War III resilient infrastructure. Because fundamentally, the world is becoming more unstable. It's becoming very multipolar at the moment. And so the infrastructure that we built in the past sort of won't survive a dedicated attack. Like all our past infrastructure was built around Pax Americana, and it's not resilient enough. So we need a replacement upgrade for existing coordination infrastructure. Because on day one of World War III, global connectivity will just stop. This is, I think, this needs to be treated as given that global fiber is going to go away and most satellites will drop out of the sky. The other thing to consider is that privacy, we need this not to hide something, but fundamentally for national defense, community defense. Think of privacy as something that you need to have in order to protect your community from the outside world. Yes. So I will quickly skip over this. But fundamentally, local instances must work regardless of global connectivity. And so you want to be as local as required, but as global as possible, depending on the underlying physical networking characteristics that you find. One of the best examples for this is, oh, if you're in this room, you're in charge, very fundamentally. You should be running your own nodes, you should be running your own infrastructure. If you sit here, you have to provide digital infrastructure for your parents and your local community. No one else is going to do this. A good example of this is Ham Radios in Zug, where a bunch of volunteers formed the Ham Radio Club that provides six-hour emergency backup, like within six hours they have emergency backup comms online to in case connectivity drops in Zug, where I live in Switzerland. So they have like these trucks with generators and antennas, and they drive to the mountain peaks in order to restore local area networking again for emergency services. That's a quick TLDR. A single decentralized network is still very centralized and a single global security model is fundamentally terrifying. And fundamentally, you have to run your infrastructure. If you rely on Fiori, you're doing it wrong. And privacy for national defense. Yeah, we've got to be building World War III resilient infrastructure as a framing. Thank you very much. Thank you very much, Adrian. So, if there are any questions, please raise your hand. I will toss the mic to you. Don't worry, it doesn't hurt. Soft, again. Doesn't break. Let's go. Almost. All right, but besides the Sook network, what are the possible practical implementations of this approach? For example, if we lose connectivity, we should still be capable of forming a local coordination, effectively local on-demand chain, to just use within this room. Even if we just want to outside of the World War III example, but in the more specific example, we want to play a trading game, or we want to do local trading. We should run this on our own chain here, because this is going to be the single fastest thing you can do. Whenever you care about latency, you want to really strongly consider what kind of state you need to co-locate next to each other. And if you need to have fast latency or low latency, you need to be as close to each other as possible. Good question, by the way. Are there any more questions for Adrian? Have one? You got it on the back. How do you deal with the double spend problem if you can't reconcile global state? So you can tax state, and again, very good question, actually. Most people don't think about this. But so you can tag each individual piece of state to be, and I skipped over this in the slide, to be controlled by a specific state controller. And for example, if you want to do local settlement, and we currently have all our state and global Ethereum, we'd move it temporarily into the custody of the local state controller. And this could be a blockchain run by, I don't know, 100 people here, or like the 4,000 people at the conference. And then whenever we want to tear this down, we can move our state back out. This requires a little bit of thinking around how do you do state layouts in these databases that we ended up calling blockchains. If you want to learn more about this, I suggest looking into the Anoma resource machine, which is a very nice resource model where every individual piece of state, it can be complex state as well, like a smart contract, is actually tagged as an individual UTXO that can be moved around to different controller instances. Very cool. Thank you very much, Adrian. Thank you very much.", - "eventId": "devcon-7", - "slot_start": 1731648600000, - "slot_end": 1731649200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Ce0TClLRzVeI_KHk3Q7wjGn9iUM0mxltuQHeo2UgQuw", - "resources_slides": null, - "speakers": [ - "adrian-brink" - ] - }, - "vector": [ 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -690600,6 +688708,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -690676,7 +688785,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -690813,6 +688921,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -690832,7 +688941,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -690876,7 +688984,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -690888,6 +688995,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -690987,7 +689095,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -691030,7 +689137,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -691065,7 +689171,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -691098,6 +689203,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -691154,6 +689260,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -691311,6 +689418,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -691319,11 +689427,55 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "speedrunning-chain-abstraction-eips", + "sourceId": "UVUPRS", + "title": "Speedrunning chain abstraction EIPs", + "description": "We look at different EIPs in pipeline across the CAKE stack and how they relate to chain abstraction.", + "track": "Usability", + "type": "Workshop", + "expertise": "Expert", + "audience": "Developer", + "featured": true, + "doNotRecord": false, + "tags": [ + "cross-chain" + ], + "keywords": [ + "ChainAbstraction", + "CredibleAccounts", + "Cross-chain" + ], + "duration": 3844, + "language": "en", + "sources_swarmHash": "53eb035d3a0038451c75612289911a4e9159e09ec957dcc72886ed37e605cf1d", + "sources_youtubeId": "ZjmdGQjUI0I", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673844a81b0f83434d61fdc2", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "eventId": "devcon-7", + "slot_start": 1731655200000, + "slot_end": 1731660600000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1up9DjzXHNhdVzKddYHp52RLJfA0EO60JAyhULDNogTk", + "resources_slides": "https://drive.google.com/file/d/1E6Ukanpkcrwi2L327-0oIFeGGYvSPbQt/view", + "speakers": [ + "ankit-chiplunkar" + ] + }, + "vector": [ 0, 0, 0, @@ -691332,6 +689484,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -691391,129 +689544,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ] - }, - { - "session": { - "id": "speed-hacking-challenge", - "sourceId": "RSYU7K", - "title": "Speed Hacking Challenge", - "description": "​Prize Pool: $50,000\r\n\r\n​A High-Stakes Speed Hacking/ CTF Challenge\r\nAre you ready to dive headfirst into a thrilling web3 adventure? Join us for ETH Escape, a heart-pounding Speed Hacking & Capture the Flag (CTF) challenge designed to test your coding skills and problem-solving abilities on Ethereum.\r\n\r\nhttps://lu.ma/viyjky8t", - "track": "[CLS] ETH Escape - Speed Hacking Challenge", - "type": "Mixed Formats", - "expertise": "", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 8930, - "language": "en", - "sources_swarmHash": "231d194de9c2212e97d97770d6c29c2f98f71266e04c8c9cff6dac2126ecbfe2", - "sources_youtubeId": "CRMPai0pUpw", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673cc095982f234a126a635e", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673cc095982f234a126a635e.vtt", - "transcript_text": " Thank you. Thank you. Thank you. Thank you. 15 minutes in, 90 minutes remain. No solves yet. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. All right. We have our first solve here at minute 76. Ddust2, Ddust2, where are you? Congratulations on getting this started. Kicked off here in the finals. You've got the lead. How does it feel? A little bit nervous, actually. A little bit nervous. What is your hacking spirit animal that you try to inhabit while you hack? Alright, we'll go with the elephant. They're very smart. Thank you. Thank you. Thank you. Second solve, NP Hard. We've got two people up on the leaderboard to take the top two spots so far with 72 minutes left. NP Hard, where are you? Remind me. Alright, good work. What did you eat for breakfast that's making you so successful today? Power of Corgi. Alright, love it. Power of Corgi, and the shirt says it all. Let's see, you can jump into the number three spot. I'm betting it's happening soon, and then it's gonna be a race so you could finish the next two challenges. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. In the leaderboard, third solve. Looks like they went with a different challenge to start. So one, now in first place, currently poised for $10,000 rewards. We've got DDoS2 in second place, currently poised for $10,000 rewards. We've got DDoS 2 in second place, currently poised for 7,500 rewards, and NP-Hard for 5,000 in rewards, currently as it stands. Where is one located? Top of the leaderboard. Where are you? Is that you? All right, they're staying in on. Okay, fair enough. 65 minutes remain. Remember if we have three people solve all three, that will end the clock before the remaining 64 minutes. away guys. Thank you. Thank you. Thank you. Thank you. One hour remaining. One hour remaining. One hour remaining. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. We've got a real race for the prizes now, Rage. Congrats for getting on the board. We've got three folks who have solved one particular challenge, and then one at the top of the leaderboard who solved a different challenge. So wide open for who's going to get the rewards but again as it stands now we've got one taking home ten thousand dollars in addition to their winnings already 7500 for ddust2 and np hard would get five thousand dollars in this round so anybody's game still 51 minutes and 45 seconds left. Keep hustling. Everybody's doing a great job. Thank you. Another update to the leaderboard. One pushing ahead by solving the second challenge. So on their way, it's possibly capturing the $10,000 grand prize. A reminder that places four through 14 in the finals all receive the same amount. So if we have a top three that solve all three before the timer's up, the finals will be over. Thank you. Thank you. Welcome to the Leaderboard Mage Intern. It's heating up. Who can get into those top three spots for the big prize money? 47 minutes to go. Thank you. Thank you. Bill joins the leaderboard in position two to shake things up a bit in the top three. MP Hard drops to number four. So good work, Bill, getting in there for the $7,500 spot as of right now. Thank you. Thank you. Thank you. Thank you. Welcome to the leaderboard, Slipper. Slipper with two solves very quickly. So put them into second place now over Bill. So it's heating up. We've got two folks with two solves. The first one to get the last one is gonna take home the $10,000 prize unless someone can solve two before they get to that last one. With third place wide open still, but being held by Bill as we speak. Just under 40 minutes left of the final round coming into it before in case it ends as people start to solve things. I want to give a big thank you to the Ethereum Foundation. I want to give a huge thank you to our friends here at Friendly Maltese Citizens putting together these awesome challenges. We appreciate all the hard work you did on that. And then we also want to give a huge shout out to the upcoming Ethereum Protocol Attackathon with rewards up to 1.5 million and the wonderful support that we've all received from Bybit, Wormhole, Arbitrum, The Graph, GMX, and Base. But thank you again to friendly Maltese citizens. You guys did a great job to create a great event. So keep it up, everyone. Keep your heads down. Coming in on the home stretch here. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. 30 minutes remaining, 30 minutes remaining. Thank you. Thank you. Bye. Thank you. Thank you. Thank you. Thank you. Thank you. The only change to the leaderboard we have is Bill getting a second solve a few minutes ago. That means that our 1, 2, and 3 right now are all racing to get that third solve. First one to do it gets the $10,000 grand prize. Coming down to the wire, we have 22 minutes left. Three people in the driver's seat to stay in one two and three and Plenty others behind you can certainly get up there as well. So keep it up. Let's see what happens Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. Thank you. All right, players, we have 15 minutes left. And as a token of my appreciation for letting me bother you all day while you're trying to focus, we will release a hint right now to see who can try to take home the prize in the last 15 minutes. Thank you. There were typos in the first hint, so there's a second hint out there now to clarify. Thank you. As it stands now, we still have one in first place with $10,000 prize. As it stands now, $7,500 to Slipper if things stand as they are. And Bill would take home $5,000 prize as it stands now. $7,500 to Slipper if things stand as they are and Bill would take home $5,000. The rest of the pool up to the rest. However, it does seem that if there's one solution to one of these, to one of the ones that has not been solved yet, that person can easily jump into first place if there's no other solvers. So anybody's game in this room in the last 12 minutes, good luck. Thank you. Thank you. Ten minutes. Thank you. Thank you. Thank you. Thank you. Thank you. Five minutes remaining, but we can now say with confidence that Slipper is the inaugural champion of the DevCon CTF and will take home the grand prize of $10,000 in addition to what he's already made today. So $10,000 plus what he's already made and the physical gift we'll be giving him. Huge hand for his work today. There's still a few minutes left to see who can take the other spots because it's wide open with one more solve for pretty much anyone on the board right now. Thank you. Thank you. Thank you. Five seconds. In five seconds will anybody be able to get a solvent. Thank you. All right, pencils down. A big round of applause to the whole group here. Everyone in this room should be pretty proud. That was a lot of people that came in this room and did some speed hacking today. And you are the best among them today. A huge congrats to Slipper. Where are you? Slipper, come on up for a minute. Congratulations on winning the whole thing. I have a question for you. You took a little bit of a mitigated risk there in holding two answers at the same time it looked like you submitted them back to back. Was that intentional strategy to put pressure on one? So I saw that I had already submitted the one solution, but it turns out I was not shown on the scoreboard. So I just realized I didn't submit the one successfully. So it looked like a brilliant strategy that was actually a brilliant accident. Congratulations, you did a fantastic job today. How do you think you'll spend the money? How much would I get? $5,000, because you and I will split $5,000. No, $10,000 plus what you already got from the previous round. So you'll get a little over $10,000 and a gift from us at the awards ceremony later. I haven't decided yet. But I would thank ChatGPT for helping me. So I would pay for them. So a nice donation should go to ChatGPT for helping during the competition. Congratulations to you for being the inaugural winner. I want to shout out really quickly again our great friends over here, the friendly Maltese citizens for putting these challenges together. I think they did a really great job of putting challenges that were of all difficulty types and managing the event and the platform. So we really appreciate their help here, our team here as well. I want to thank the Ethereum Foundation for allowing us to run this, the first one they've done, I believe, at DevCon, and hopefully not the last, and for being our partner in doing so. And I really want to thank the, as you know, the upcoming attack-a-thon of the auto competition for the theory and protocol, the entire things in scope. Saw a lot of really talented people in this room today who I think could do a really great job working on this project that starts in about 12 or 11 days. Eight weeks of hunting, a ton of educational materials to get you up to speed, and the sponsors of that program who are supporting it we'd also like to thank, which is Bybit, Wormhole, Arbitrum, The Graph, GMX, and BASE. So a huge round of applause I also want to I want to congratulate our second and third place winners for respectively earning seventy five hundred dollars and five thousand dollars so that's one where's one raise your hand one stand up one congratulations Congratulations. And then Bill. Where's Bill? Bill, congratulations. Winning $5,000 plus what you did earlier today. So really great work by all of you. I think you're all coming out in the black today by getting some rewards and cash. We also have a reward for you you later which we'll do at the Award ceremony here After we do a little bit of a fireside chat Regarding security So hopefully you'll stick around for that so you can collect Your gear and then get to Our team with Your wallet numbers if you haven't already done that So you can collect your USDC in the Next few weeks, the last thing is a lot Of the folks in this room, which is amazing, didn't even sign up until they walked up today and just came in and dominated. So really great job on you all for showing up, coming through here, getting into the finals, making a few dollars. And because of that, I don't believe you got swag packs. So we have swag packs for you as well. Ash if you could raise your hand over there He has them for you. So make sure you collect those as well before you before you depart But yeah, thank you all for participating Find anyone in an immunify shirt to give some feedback to because we want to make sure we give feedback to the etherium foundation So when they run these again, they can kind of decide, you know What the best are out to do and get some good feedback from you all of what's too hard, what you would have changed and we'll try to adapt it that way. Thanks for participating in the first one and have a great day. Stay tuned for the fireside chat. Thanks gentlemen. Thank you. Thank you. Thank you. so Thank you. All right. We'll start the fireside chat here in about three minutes, folks. Thank you. Thank you. Thank you. Thank you. Thank you. All right, folks, we're going to get started here in a minute. If we could actually just get folks to sit down to the left or the right and not in front of the camera that would be great. Thank you. Just a reminder to all finalists who just participated. Quick reminder to all finalists who just participated. Please stay here for the award ceremony so we can give you your gift. And ensure that you've talked to our team and given us your wallet address so we could deliver your reward money. All right. With that, I think we'll start to get the fireside chat here going since we got the live stream. And what I think we'll do first to make it quite easy on ourselves is just introduce who we have here you've heard me talking all day so enough of me but I'm Mike O'Keefe I head up sales and customer success at Immunify which is CrowdSec bug bounty programs and audit competitions and I will let's just move left to right to start. Alright hey everyone my, everyone. My name is Michael Llewellyn. I'm the head of solutions architecture at OpenZeppelin. I focus a lot on working with our security audit team with some of our top clients, working on kind of weird, out-of-the-ordinary sort of audit requests, maybe things that are outside the EBM, and then also do a lot of work on security operations for DAOs, security councils, and lots of other very particular things for the industry. Hello, my name is Neville. So I'm a program analysis expert primarily in management in B-Dub. So I'm also a co-founder. My time is spread throughout, you know, like engineering, management, sales, and a lot of operations as a co-founder. But yeah, I'm a security researcher at heart. Hey, I'm Luna. I'm the co-founder and CEO of Zellick. My background is in vulnerability research, reverse engineering. So I used to do iOS zero-click. That's the kind of background I have and then yeah nowadays I mainly just do Zollock with my co-founder Jazzy we recently acquired a company called Code Farina you might have heard of it I worked very closely with that team as well now so that's kind of what I do yeah I'm Pietro I'm a lead auditor at Chain Security and we are an auditing company based in Zurich who specializes in complex DeFi projects and we've audited a bunch of projects that you might know, Maker, Curve, Lido, Polymarket. We've got bugs if you want later. We find them and we package them. And we specialize on EVM ecosystem. Awesome. And so thank you all for joining the panel. Thanks for coming here today. Nice way to end the long day of deep focus these folks had. And I think it's been great because I think Immunify has worked with all of you in different capacities throughout the years. So it's a great collection of folks here. I'm going to start with one that's a little off script that we hadn't talked about yet. It's Pietro. Pietro, you participated today. What did you think of the CTF? What did you think of the strength of the challenges and how you felt you performed? Yeah, so I'm not too satisfied with how I performed. I review code eight hours a day all year, but I very rarely deploy anything, very rarely interact with RPCs myself. So I found the bug quite rapidly. I was in round three, and I think they were very easy to spot. But then I had some trouble with the on-chain part. It was an interesting challenge still, but I'm curious to see what the other rounds were about. There were six with different challenges, and the finals also seemed very interesting, but I haven't read the code, so I'm curious to see that. Perhaps a team format would work better for you next time. You spot the bugs, and then you hand it off to a teammate. Yeah, probably, or just taking a couple of hours to get used to deploying stuff beforehand also would have worked. Well, we were thrilled to have you participate. A lot of the audience today, a lot of the participants in the CTF today, which was fantastic, were developers, security researchers, auditors. We had a really great mix and many others, I'm sure, as well. So I think keeping that in mind, keeping the audience in mind when we go through the next few questions over the next 15 minutes or so, we can kind of cater to those audiences however you please and whatever angle you guys want to come at it from. But the first thing I'd like to ask is a lot of these folks are involved in CrowdSec security. That's kind of what the, you know, what the Ethereum protocol attack-a-thon will be about. I'm curious how you all see CrowdSec security next to your businesses and how you operate. And maybe where I'll start actually is with you, because you all recently acquired Coderina. so you obviously see some value next to the audit So i'd love to hear your perspective Yeah, i mean the main reason we acquired code arena is we just think we can deliver like better experience better security for our customers The main thing that i would show is this concept that we have introduced called audits plus So when you have a consultative audit you're're going to get, like, you know, a small number of very highly skilled people, and they're going to look at the most important parts of the protocol, but it's fundamentally a time-boxed audit, right? So they're going to get maybe one week, two weeks, three weeks to look at this thing, so they can't afford to look at every single, like, gas optimization that there could be, or like, oh, no, this function's not documented, they need to go find and make sure that the critical invariance of the protocol are firm. Now, on the other hand, a lot of times you do want to get information about all the possible things that could go wrong, and not just that, you might want to have, you know, hundreds or thousands of eyes on the code base, because that is what you want before you not just, you know, ship it to mainnet, but also before you put it in front of a bug bounty, because that would be very expensive. The earlier you catch a bug in your development lifecycle, the cheaper it is, whether that be at the integration test or the unit test or the code review, internal audit. The earlier you catch these things, it's cheaper. So you want to get a consultative audit because you can have a firm authority say like, okay, yeah, we looked at it and we're pretty sure it's secure. And then you want the additional reassurance of like, okay, not just that, we've got a crowd of people that looked at it and it's definitely been very thoroughly picked apart. So because of that, we think that consultative audits and competitive audits or, you know, crowd security, these two go hand in hand in creating something that's extremely high assurance. So that's kind of what we think. We think that crowd security is very important. Yeah, that's why we call it crowd arena. Wonderful. I'll open it up to the rest of the panel for anyone that's got a response to this question as well. Well, crowds, this kind of security is really, really effective. And it's really enabled by the community. I mean, listen, everyone is motivated to learn new technologies, make a name for themselves, and also do the right thing, and finally make a little bit of money. And especially for bug bounties, which is the last line of defense, I mean, having participated in bug bounties myself, I feel that you are highly motivated, you know, like to spend a whole night looking for a bug or something like that. So, yeah, it is highly effective, you know, in certain contexts, but it really depends who's there, you know, looking at your code. To be fair, White Hat program is the last line of defense? Right, but that's something that I wouldn't do because of legal issues and things like that. Sorry, I was just posting. I can't help myself. Pre-deployment, post-deployment. Yeah. It's a model we've definitely seen a lot of our clients start to work with. Like we do mainly consultative audits, slash private audits, lots of different names for it. We expect to continue to see demand, but we've definitely seen a lot of clients that are specifically integrating us into a broader process where we're typically doing a private audit or consultative audit, and then they plan to later go to a crowdsourced competition. So it's good from the perspective if we did, by some chance, which is always some chance, miss something, like there is additional layers of security that are going to potentially catch that, so it's not all on us. And potentially there's several private audits as well. And then afterwards, we've actually found ourselves working more and more with clients in the fixed review process of competitive audits because of the fact that you can't have a lot of issues coming in. Even if they're low, that could be applied to the code base, and having someone who's already seen that code base and picked it apart a bit can help the fix review process as well and ensure the code is as secure as possible before it goes to mainnet. I do expect this is going to be a model that continues to grow, and there's going to be probably a lot of coordination between both private and competitive audits and the ways that clients use them. Yeah, so we work with traditional audits, but we also recommend a competitive audit if you can also afford it. We have seen the class of bugs that are code are not necessarily the same between the two. Competitive audits don't guarantee that you will have good people, but in general you have a high probability that people who are skilled at what you're doing will look at it and they will find specific things in which they are very proficient. What we do is really more trying to cover everything. And we try to give guarantees in the sense that we know what we understand and we know what we don't understand at the end of an audit. With a competitive audit, it's harder to know what's been seen and what's not been seen. Yeah. And one thing we really like about the collaboration between an audit firm, like OpenZeppelin, Chain Security, or Zelleck, for example, and a competitive audit is that, for example, you do the private audit and then the researchers who are assigned on that private audit, now they have a very high context understanding of the code base, and they can prepare the notes that then arm the, or equip the community auditors to be even more effective than they would if they had just been going in blind, so to speak. They can be like, hey, watch out for these parts. These parts are especially important to check, but of course, don't let that rabbit hole you. One more thing I'd add is if you start with a private audit first, you're more likely to be told you're not ready, go back and try again. As opposed to a competition that's just gonna say, all right, it's payday. So, probably do the private art first. 400 issues. So true. Excellent point. So I'll skip around a little bit, especially for the audience who I know is here since I surveyed them earlier. Particularly for developers, what are some of the best practices you see as auditors that you wish were more widely adopted? And why do you think they may not be widely adopted? So I think developers often think about security in a later stage of a project. And they're not thinking about their code in an adversarial way from the get-go. They should do more code reviews, internals. They should do more internal audits in a way where some part of the team tries to break the protocol. I think this is very useful. Yeah Two things you can do today To improve your security Is 100% code coverage I know you guys out there You guys do not have 100% code coverage I know And the other thing is Write your functions I read this great blog post by I think it was Nillian I think they're totally right You should do function requirements and then checks and variants and then protocol and variants at the end. So you should just assert that the, like if you have like a, you know, CFAMM, just assert the constant function is still constant at the end. Or like, you know, like the whatever solvent, that kind of thing. If you just throw in a bunch of assertions at the end, it can just kill a lot of attacks, or at least mitigate them. What might have been a crit might now be a medium. Yeah, so one other thing that I wish people do a bit more is more static analysis and more formal verification. We don't see this as much, probably because it's a little bit hard to use some of these tools. Also, these tools haven't evolved, haven't benefited from enough money for R&D at this point. And also, the underlying language changes so much, so it's hard to make them really, really spot on. But yeah, it might also be a cultural thing, and this also like this is like offloaded to towards the auditor so the auditors do use these tools but like the developers themselves don't use them as much currently yeah i definitely put like a big plus on invariants and just like doing it throughout the process like both defining them building a threat model around it testing against it and even when you're going to production like monitoring and looking for for flaws in the invariance occurring later. Another thing that I think doesn't happen nearly enough is for people to include the access control and deployment process and other things, if not in the scope of the audit, then at the very least being very thorough about how that's going to impact the code. We have actually seen things that we were basically putting down as a credible vulnerability because we saw in the documentation about how they were going to deploy the code in a way that was basically going to wreck them. They actually were arguing with us saying, oh, it's not in the code. I'm like, well, it's in the docs that was in the code that was in the scope and it would have wrecked your protocol so we're going to count it. I think people, including the deployment process, is almost part of the code and scripting it and being very careful about how the keys that are signing deployments being done, using a multi-sig or something else to handle that process. And yeah, never putting yourself in a position where some EOA key on some developer's laptop is hopefully not going to get phished and wreck the protocol. And I wanted to add on to what Pietro had to say. It is true, I think all of us will empathize with the fact that a lot of teams, essentially, this is very common, and I totally understand why they feel that way, because they just need to, you know, go to market. They think of security in, like, a, oh, we'll just do it at the end kind of way. But that's really bad. It's actually more expensive. it's more slow because you're going to get an audit by one of us because all these people here are very good firms. And we're going to tell you, like, hey, your design does not work. You're going to have to rewrite this entire component, come back in two months after you've rewritten it. So before you thought you were going to launch in December and now you're launching in March. So it's like tax planning. It's like post hoc tax planning. It doesn't work. It's really bad. You should think about your security as you are building your protocol at the design phase. Yeah. Yeah. And something you said earlier is the biggest ROI splash you're going to get is catching that stuff to the left of the audit beforehand, which is not as sexy as the bug bounty business or just giving it to the auditors or all of that stuff. But I equated it to the job of Homeland Security. The best job you do to the left of the audit is never going to be talked about, but it's the highest ROI and the most difficult to calculate, avoiding those situations. Sorry. A lot of the work you do initially is also relevant to security, even if it's not security-oriented sometimes. Like, good design is something that makes security assessment much easier, even if you have been thinking about security the whole time. So I would say being good at software development helps security in general, yes. That dovetails nicely into another question I want to ask the panel here, and then maybe we'll have time for a question or two from the audience if they have some, which is talking to some folks here today who are developers early in their projects, some of them being first or second time through this process. which is talking to some folks here today who are developers early in their projects, some of them being first or second time through this process. What's the best piece of security advice you can give them at the earliest stage of their journey of creating something new? Yeah. Oh, sorry. Michael, did you want to go? Yeah, like figure out what you want your protocol to do and what you want it to not do and write them down and then like look at them while you're developing your code and then add tests for them, add assertions for them. Like we find this to be a very common problem in our audits is that we talk to the customer like, okay, so like what are you worried about? And they're like, well, you know, we don't want to get hacked. I'm like, okay, what does that mean? Right? You should know what it means for your protocol to get hacked. So then you can, like, actually think about like, okay, well, I'm worried about this or that. Basically having a real threat model. I'll add to that. Like, definitely 100% threat modeling is, like, where you should start. I think, because the other thing is, like, people like security has to scale with the project. Like, are you going to be able to afford to have, like, a full monitoring infrastructure set up, like, going to deployment if you're, like, you know, you don't have any TVL, you don't have a lot of things. Like, we will still say, like, yeah, you should probably do that. But if you, like, literally can't, you don't have the resources, like, a lot of teams security infrastructure support. But then suddenly in a couple of months, TVL has grown, it's gotten massive, there's a lot more value at stake. They never really thought through how are we going to upscale our security. I've seen people that will build up their access control and hard code in something like an EOA owner and they're like, well, maybe we should transfer it to a multi-signal. And they're like, actually, we don't know how to do that. We didn't prepare for it. That's a particularly egregious example that does exist in a billion-dollar protocol. Try looking for it, where someone just hard-coded the owner, and you just hope that whoever has that key never gets kidnapped or decides to go rogue. But just in general, be prepared to scale up your security as you have the resources't spend on it today, plan to know, okay, at some point at this level of TVL, let's start taking whatever profits might come from that and invest it in making our protocol more secure. Because most of y'all here, unless you win enough from this competition to pay for a whole audit, you can't get your code audited from a top tier firm, but maybe at some point you do or want to. And be prepared to upgrade and other things. Yeah, like you want to have escape hatches for yourself. It's kind of like lawyering stuff up. Like, has anyone here worked with lawyers before? They're like so expensive. It's like so annoying. So you tell them like, okay, okay, we don't need like this like frigging Cayman Islands, Panama thing. Just like make me the Delaware LLC and just we'll fix it later if it gets really big. But these things are, you know, lawyers are good at like, okay, yeah, we'll just move all this stuff over, it'll be fine. They're good at having these escape hatches for you because people have been making legal contracts for a long time. Think about your security in ways so that you have escape hatches for, okay, now my thing has scaled, how I'm going to keep this secure as it's growing. But you don't want to be dumping tons of money on making your protocol super fancy, secure, early on, because you probably don't need it. Yeah, I would say start really simple. You might have a lot of very good ideas, but really make the minimal protocol you can do, and you will save a lot of money on security, on audits. Reuse valid components that have been audited previously and understand how they work and the audit process will be much simpler for everybody one final thing that I'd like to add is probably what you're gonna be building 99% of the time it's not gonna be original like a lot of people have built something which is similar and you can look at what happened to that thing probably it failed probably it had some security issues probably it had some economic which is similar, and you can look at what happened to that thing. Probably it failed. Probably it had some security issues. Probably it had some economic issues, right, if it's a Web3 project. So go back, see what happened, how it failed, and make sure that your design at the very early stages and also your requirements are such that this will not happen. That's obviously very hard, but if you do it at an early stage, it's going to be much cheaper. Excellent. Any questions from the audience? Any that we could squeeze in one or two questions if people have it? Hi. So I'm building a debit card solution where I'm assuming that once a transaction is signed and went to the network, it will be successful because the whole thing is standardized. But on the parallel, I am running a consensus model which will monitor those transactions if those are successful or not. And if it fails, it will trigger the system and act accordingly. So what things I should keep in mind, like what could be the security threat which I could face? Yeah. So just understand that you're trying to build a system where someone signs a transaction, submits it, succeeds or fails, and this is just for a normal payment. So what we are trying to do, we are providing the transaction time to four to five seconds. So once the transaction is signed and sent to the network, we assume that that will be successful. But on the parallel, we are running a consensus model that will monitor those transactions and build a consensus that the transaction is successful or not. If it's successful, we are good to go. But if it fails, we will trigger the system and that system will act accordingly. Like if it fails due to network congestion or some logic error or something. So what other things i should keep in mind like what other Scenarios of which transaction could fail or how? It's hard to say exactly because i think there's a lot more i Would need to know about the system. But just off the top of my head, I would say there could be an instance where the way that you're, like the node that you're monitoring it from is somehow down. Or like I guess be prepared for whatever infrastructure you're relying on to be able to confirm success or failure. You have like a backup plan if it goes down or you're, like, ensure that someone isn't able to exploit that, making some very general assumptions here. Like, we are using blockless protocol, which have different nodes. And I think those nodes are building the consensus. Not a single node is building the consensus or a proof that a transaction is failed or successful. So I think that wouldn't be the case. But there could be something of which I should be aware about. Again, I'm trying to second guess about how your system works. But one of the things that comes up is whether the consensus algorithm has a finality gadget, whether you can actually tell whether a transaction has been finalized or not, because otherwise you'd need to wait a lot more time than four seconds. Four seconds does sound really really low though. Thank you. Time, one more question if anyone in the audience has one? One question over to the left there. Hi. So I'm not a security researcher myself, but I've seen a lot of hacks that are due to consensus mechanisms and sometimes mass social engineering of a protocol. I was wondering what do you think about how we can basically build mechanisms that are more hard-coded to prevent such exploits? More hard-coded? Yeah, because basically the human factor is more unpredictable. How can we hard-code consensus to be less... Maybe it's a difficult question. When you say consensus, do you mean like, for example, a multi-sig, does that count as consensus? Yes, and also at scale like for a DAO. For a DAO? Yeah, so transparency is very important here because it makes it a lot easier to see what you're actually voting for, for example. I think one notable example was there was one for Tornado Cache where they just proposed a thing and they were like, yeah, it totally does this, and it just did something else. So yeah, monitoring your DAO is pretty good, transparency. Another thing is just time locks. So make it so it's easy to cancel a thing that's in the pending state in the time lock. So this way there's a longer window for things that are potentially breaking to get canceled and noticed. Of course the downside there is you get a lot more latency, so if you have some critical thing that needs to get jammed through, it may make it hard to do that timely. So that's another trade-off. Do you guys have any ideas? Yeah, we've had some really fun experience, and I say fun in a sarcastic way, with DAOs that were under some form of governance attack. In one case, a whale buying a lot of tokens and trying to push through a proposal on compound that would have given them an even larger part of the treasury and then control the DAO. The way they did it was very tricky. They took a bunch of tokens they had purchased, they spread amongst a bunch of accounts. We did actually have security monitoring that picked this up as strange activity. But even then, it's like, okay, so who is a genuine new delegate and who's one of these many accounts controlled by this one entity? There's very few ways to do that in a permissionless system because it's essentially a civil attack in that sense as long as they have the resources to allocate. I think at least being aware, I think ultimately having a reputation in the ecosystem and then having some checks and balances on them and recovery mechanisms. In that case, having a multi-sig that can veto proposals if they're somehow violating the DAO but you have very specific rules, that multi-sig is elected by the DAO, they're accountable. That's where I'm starting to go towards, especially for DAOs. And for multisigs themselves, I think definitely having strong reputation. Possibly even KYC, it kind of depends. People are controversial around that. But I think if you can, if it's appropriate to say, we know these are individual people, do a signing ceremony where they're all in different places and they can verify they're all different people it's not like one guy controlling four or five different keys yeah I think it's like there's no good technical solution to these things these are just like okay having strong social mechanisms to recognize who's a good actor and who's not because it's not as easy as like Ethereum where it's like okay we're running a bunch of nodes that are all coming to deterministic answers And the general idea is just like make sure you have enough notes that they can always outvote the bad notes on the deterministic Answer with multi sigs and nows these are not determined because these are not deterministic answers are like yes This is good. Yes. This is bad. So there does have to be a lot more work on the consensus of who are the actors, why are they allowed to participate in the system, and how do you root out bad actors and prevent them from gaining control. As a shitpost, you could take the MakerDAO approach and you could make the decisions deterministic by offloading it to AI. All right, end of shitpost. It's also an interesting experiment anyways. I'm curious how it goes. So another mechanism I've seen is to make the quorum of a governance mechanism increase in size, the more contentious the vote is, so that the simple majority is not enough. So it's a variable quorum. Yeah, of course, it's like a tradeoff between how easy it is to get stuff done, same with time locks, and how safe it is. Yeah. I mean, on one end of the spectrum, it's just everything's hard-coded. On the other end of the spectrum, it's just like whatever goes. Any other final comments from the panel here today? I know we're a little bit over time, but I want to thank you all for joining. For anyone in the audience who hasn't had an opportunity to work with either Immunify or any of these firms. These are some of our favorites in the industry. There are many others that are great too, but I was really happy to put together a panel with this group because it's really great. Can't go wrong with anyone sitting up here. Anything? Any last words, panel? I think everyone on this panel is really awesome. Likewise. I agree. Very nice of you. Thank you all for joining us. Really appreciate it and hope everyone has a great rest of the week. And with that, I can let the panel go and I think we owe some rewards to our CTF participants today. Thank you all again. Vielen Dank. Thank you. Close out the day. We have here our awards for our wonderful CTF participants. We're going to go ahead and start with Slipper again. What's that? I'll let Slipper show you. All right. So in addition to your cash rewards, again, make sure that you've given your wallet information, your correct wallet information, double, triple check it, so you get your correct payment. But if Slipper is still here, we do have a physical gift for you if you wanna come up and grab it. Slipper, are you still here? Come on up real quick. So in addition to the cash, congratulations again on being the first one. Maybe you can show the rest of the participants what we'll be giving out here to the finalists. So in addition to the cash prize, we've got a nice set of Beats by Dre Studio Pro headphones for everybody to take home. And make sure you get your Immunify swag bag as well, commemorating the event here today. Let me go through and call names up to come get your stuff. I'm going to go in a random order now just because my spreadsheet is a little screwy. Wait, no, something's wrong. All right. Billy, is Billy here? Billy, come on up. Receive your gift or I can bring it to you. Billy? Here you are. Sir, congratulations. A great job done by you today. Chain security also left a little bit of swag here too, so feel free to grab something there. All right. Next, we have Tony. Tony, come on up. Congratulations to you. Way to be a finalist here in the CTF. A lot of hours spent here today. Heads down. Next, we're looking for Naoya. I hope I'm pronouncing that correctly. Is Naoya here? Naoya. Maybe that's the better pronunciation. All right, we'll hold that one off to the side. . I can call it last name. Is that all? I'm getting good username. But are you sure that's it? Yeah, this is from Mixi. No, no, no, thank you. Did you want to go next? All right, yep. Harry. Harry? All right. Congrats, great work today. Good work. Chi-Sue. Chi-Sue. Chi-Sue. Chi-Sue. Great work. You can give us one of these, j or rkskek. We've got somebody that's going by the name of just j. J. The email is rkskek and some numbers. Oh, you got one right. Okay. So that's okay. It's all the same. That's fine. Yep. So that means we're missing a bill still then right? Yeah, it's all good. Yeah, just, you can just, yeah, it's all the same. We can just pass them out. Great. All right. Perfect. Rage next. Rage. Rage, I love the hat. Good work. All right. There we go. Already new. Just need a bag. Congratulations. Great work. Great work. Thank you. great work mage intern mage intern great work M4K2. Congratulations. Vlad. Vlad, are you still here? All right, Vlad. Vlad, I think you even won your group, too, I believe. So congratulations. Nipun. Nipun. Nipun. Great work. The early leader stays on the leaderboard. And Valera, last but not least. Valera. Congratulations. Great work. Great work. Once again, thanks everybody for coming through. Thanks for participating in the CTF. I know it was a lot of hard focus work in one room all day. Please look forward to the $1.5 million Ethereum Protocol Attackathon that will be hosted soon and with the sponsors for that program. And again, a big thanks to the Ethereum Foundation for allowing us to run this event here at DEVCON and the friendly Maltese citizens. Okonami, are you still here? Okonami? Minamoi. Minimoi. And take care. Have a great rest of your week and enjoy the rest of DevCon. Thank you.", - "eventId": "devcon-7", - "slot_start": 1731551400000, - "slot_end": 1731573000000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1TFlUSOJNbrhtdG-u3_ajWbpR--vyfBXX6KSwtcFkFI0", - "resources_slides": null, - "speakers": [] - }, - "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -692048,6 +690078,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -692663,6 +690694,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -692770,44 +690802,46 @@ 0, 0, 0, - 0, - 0, 0 ] }, { "session": { - "id": "speedrun-rollups-a-beginners-guide-to-l2s-zk-and-wtf-people-are-talking-about-on-panels", - "sourceId": "L3Z78Q", - "title": "Speedrun Rollups: A Beginner's Guide to L2s, ZK, and WTF People are Talking About on Panels", - "description": "The L2 landscape has grown, both in terms of size, but also the development of the tech and the new problems that need to be solved.\r\n\r\nThis talk aims to take you from zero to hero, equipping you with the history, development, and current state of L2s, so you can maximize your Devcon experience without having to carry around a dictionary to understand WTF people are talking about.", - "track": "Layer 2", - "type": "Workshop", - "expertise": "Beginner", - "audience": "Hobby", + "id": "sszb-a-high-performance-ssz-implementation-in-rust", + "sourceId": "M3SK39", + "title": "Sszb: A High Performance SSZ Implementation in Rust", + "description": "This talk goes over my EPF project for the SSZ ecosystem:\r\n\r\n- a benchmarking suite for the various rust SSZ implementations in the ecosystem to properly evaluate performance and point developers to which library they should use.\r\n- a high performance ssz implementation that's faster than existing libraries in the ecosystem", + "track": "[CLS] EPF Day", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "ELI5" - ], "tags": [ - "Layer 2s", - "Scalability", - "ZK-EVMs", - "eli5", - "Layer 2s", - "Scalability", - "ZK-EVMs" + "Core", + "Protocol" ], - "language": "en", - "speakers": [ - "emily" + "keywords": [ + "serialization", + "ssz", + "rust" ], + "duration": 849, + "language": "en", + "sources_swarmHash": "4cff4a6eb8f2f4ec6d0f9fb9efaa5a524fff05ba39fc73cf45ace5648e60cf18", + "sources_youtubeId": "WIu4PGDZOqI", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673480e89dbb7a90e1c6fbd5", "eventId": "devcon-7", - "slot_start": 1731389400000, - "slot_end": 1731394800000, - "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/17fKWm64cWJz5zLVi9Av7ZypNBcbMuJYxb55zQcDbVJ8" + "slot_start": 1731487500000, + "slot_end": 1731488400000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1-4E6jtMXWSHSGuL8JFQX16HGIrgdIQ5cWNLRXq-ty9I", + "resources_slides": "https://drive.google.com/file/d/13FerR8YUacQpSD2TZC5voy8_RqId7w9Z/view", + "speakers": [ + "ghilia-weldesselasie" + ] }, "vector": [ 0, @@ -692817,8 +690851,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -692827,6 +690859,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -693412,10 +691445,9 @@ 0, 0, 0, - 6, - 0, 0, 0, + 6, 0, 0, 0, @@ -693627,7 +691659,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -693701,7 +691732,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -693869,6 +691899,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -693910,7 +691942,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -693967,7 +691998,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -694123,6 +692153,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -694135,8 +692166,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -694146,42 +692175,45 @@ }, { "session": { - "id": "speedrunning-chain-abstraction-eips", - "sourceId": "UVUPRS", - "title": "Speedrunning chain abstraction EIPs", - "description": "We look at different EIPs in pipeline across the CAKE stack and how they relate to chain abstraction.", - "track": "Usability", - "type": "Workshop", - "expertise": "Expert", - "audience": "Developer", - "featured": true, + "id": "stablecoin-technicalities-innovations-challenges-and-opportunities", + "sourceId": "XJBYKJ", + "title": "Stablecoin Technicalities: Innovations, Challenges, and Opportunities", + "description": "This session is dedicated to the evolving landscape of stablecoins, with a particular focus on the latest advancements and the role of PYUSD. This talk is tailored for developers and crypto-enthusiasts eager to explore the broader implications of stablecoin technology, integration challenges, and real-world applications of stablecoins in modern finance while focusing on PayPal's role in the Ethereum ecosystem.", + "track": "Real World Ethereum", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Engineering", + "featured": false, "doNotRecord": false, - "tags": [ - "cross-chain" - ], "keywords": [ - "ChainAbstraction", - "CredibleAccounts", - "Cross-chain" + "Stablecoins" + ], + "tags": [ + "Use Cases", + "Remittance", + "Product-market fit", + "stablecoin", + "Product-market fit", + "Remittance", + "Use Cases" ], - "duration": 3844, "language": "en", - "sources_swarmHash": "53eb035d3a0038451c75612289911a4e9159e09ec957dcc72886ed37e605cf1d", - "sources_youtubeId": "ZjmdGQjUI0I", + "sources_swarmHash": "c07ce4f3031afe3446ed68985bffb807c2d79170de4c7206322fc5502b945c3d", + "sources_youtubeId": "NShae3X5QHA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673844a81b0f83434d61fdc2", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731655200000, - "slot_end": 1731660600000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1up9DjzXHNhdVzKddYHp52RLJfA0EO60JAyhULDNogTk", - "resources_slides": null, "speakers": [ - "ankit-chiplunkar" - ] + "edwin-aoki" + ], + "eventId": "devcon-7", + "slot_start": 1731568200000, + "slot_end": 1731568800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1Mh_MTgJQI_Yj0brAf1A-CWrCUWCivpHPQFUodwNtN3M", + "resources_slides": "https://drive.google.com/file/d/1duJRVS4wYp8oDXSV7b1OqcTzzu5ZAZsA/view" }, "vector": [ 0, @@ -694190,9 +692222,10 @@ 0, 0, 0, + 6, + 0, 0, 0, - 6, 0, 0, 0, @@ -694989,6 +693022,14 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -695002,6 +693043,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -695284,6 +693329,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -695367,6 +693414,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -695406,28 +693455,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -695498,13 +693525,13 @@ 0, 0, 0, + 2, 0, 0, 0, 2, 0, 0, - 2, 0, 0, 0, @@ -695520,47 +693547,56 @@ }, { "session": { - "id": "sszb-a-high-performance-ssz-implementation-in-rust", - "sourceId": "M3SK39", - "title": "Sszb: A High Performance SSZ Implementation in Rust", - "description": "This talk goes over my EPF project for the SSZ ecosystem:\r\n\r\n- a benchmarking suite for the various rust SSZ implementations in the ecosystem to properly evaluate performance and point developers to which library they should use.\r\n- a high performance ssz implementation that's faster than existing libraries in the ecosystem", - "track": "[CLS] EPF Day", + "id": "staking-on-power-efficient-and-low-cost-hardware-from-arm64-to-risc-v-boards", + "sourceId": "J3SWYT", + "title": "Staking on Power Efficient and Low Cost Hardware: From ARM64 to RISC-V Boards", + "description": "The entry barrier to staking on Ethereum got lower, as ARM boards, the tooling and OS support have improved massively. We show the current landscape of hardware options and the software stack to go along with it. \r\nAs a glimpse into the future we will talk about RISC-V, an open CPU architecture, present the current state of RISC-V based single board computers. We will discuss the progress we have made to run Ethereum nodes on these boards and the road ahead to optimize clients.", + "track": "Core Protocol", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Stakers/Validators", "featured": false, "doNotRecord": false, - "tags": [ - "Core", - "Protocol" - ], "keywords": [ - "serialization", - "ssz", - "rust" + "node running", + "RISC-V", + "Hardware optimization" + ], + "tags": [ + "Validator Experience", + "Home staking", + "Decentralization", + "optimization", + "hardware", + "Decentralization", + "Home staking", + "Validator Experience" ], - "duration": 849, "language": "en", - "sources_swarmHash": "4cff4a6eb8f2f4ec6d0f9fb9efaa5a524fff05ba39fc73cf45ace5648e60cf18", - "sources_youtubeId": "WIu4PGDZOqI", + "sources_swarmHash": "5a873a44d8361c9e25d38237c41c3dc8af477ee3fc922209b9d3d7f25e0e05b1", + "sources_youtubeId": "owzXocC1biE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673480e89dbb7a90e1c6fbd5", - "eventId": "devcon-7", - "slot_start": 1731487500000, - "slot_end": 1731488400000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1-4E6jtMXWSHSGuL8JFQX16HGIrgdIQ5cWNLRXq-ty9I", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "ghilia-weldesselasie" - ] + "aavegotch1eth", + "haurog" + ], + "eventId": "devcon-7", + "slot_start": 1731571800000, + "slot_end": 1731573600000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/120GkPug8WQzGtUpAMbWnOOcB7P72J5K2YG_ZVHAuEF0", + "resources_slides": "https://drive.google.com/file/d/1hfqIoD4BO6zEoIc8kLYNfgVixMq9QRud/view" }, "vector": [ 0, 0, 0, 0, + 6, 0, 0, 0, @@ -695572,16 +693608,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -696161,7 +694187,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -696172,6 +694197,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -696351,6 +694378,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -696398,6 +694426,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -696413,6 +694442,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -696489,6 +694519,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -696616,8 +694647,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -696660,6 +694689,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -696875,13 +694905,13 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -696893,37 +694923,44 @@ }, { "session": { - "id": "stablecoin-technicalities-innovations-challenges-and-opportunities", - "sourceId": "XJBYKJ", - "title": "Stablecoin Technicalities: Innovations, Challenges, and Opportunities", - "description": "This session is dedicated to the evolving landscape of stablecoins, with a particular focus on the latest advancements and the role of PYUSD. This talk is tailored for developers and crypto-enthusiasts eager to explore the broader implications of stablecoin technology, integration challenges, and real-world applications of stablecoins in modern finance while focusing on PayPal's role in the Ethereum ecosystem.", - "track": "Real World Ethereum", + "id": "stark-proofs-eli5", + "sourceId": "BKTYWY", + "title": "STARK proofs ELI5", + "description": "Let's face it, ZK proofs are intimidating. But they don't have to be!\r\nZK proofs are complex not because of the depth math they use, but because of the large number of fields of mathematics they leverage features from.\r\nIn this talk, we'll break down STARK proofs into simple blocks and colorful analogies so that you get a good high level overview of how they work", + "track": "Applied Cryptography", "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Stablecoins" - ], "tags": [ - "Use Cases", - "Remittance", - "Product-market fit", - "stablecoin", - "Product-market fit", - "Remittance", - "Use Cases" + "ZKP", + "Use cases of cryptography", + "STARK", + "eli5", + "STARK", + "Use cases of cryptography", + "ZKP" ], - "language": "en", - "speakers": [ - "edwin-aoki" + "keywords": [ + "ELI5" ], + "duration": 496, + "language": "en", + "sources_swarmHash": "69d7d8817a7c0b608f741bd14a6d7e15b142dcc69b50fdaa2c91f7cf3ff65161", + "sources_youtubeId": "eHPp8mFCS6E", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731568200000, - "slot_end": 1731568800000, + "slot_start": 1731394200000, + "slot_end": 1731394800000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Mh_MTgJQI_Yj0brAf1A-CWrCUWCivpHPQFUodwNtN3M" + "resources_presentation": "https://docs.google.com/presentation/d/1wuFB_JXv5HWJjXdbPmQNAk43TRxm_cDU9haSzPCxKco", + "resources_slides": "https://drive.google.com/file/d/1LOYGnKxMC1rdJV_RHo560EiTRV7PuTYl/view", + "speakers": [ + "henri" + ] }, "vector": [ 0, @@ -696932,16 +694969,11 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -697531,7 +695563,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -697539,6 +695570,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -697707,6 +695739,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -697735,7 +695768,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -697754,9 +695786,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -697890,6 +695922,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -698043,7 +696076,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -698084,6 +696116,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -698128,7 +696161,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -698262,45 +696294,44 @@ }, { "session": { - "id": "staking-on-power-efficient-and-low-cost-hardware-from-arm64-to-risc-v-boards", - "sourceId": "J3SWYT", - "title": "Staking on Power Efficient and Low Cost Hardware: From ARM64 to RISC-V Boards", - "description": "The entry barrier to staking on Ethereum got lower, as ARM boards, the tooling and OS support have improved massively. We show the current landscape of hardware options and the software stack to go along with it. \r\nAs a glimpse into the future we will talk about RISC-V, an open CPU architecture, present the current state of RISC-V based single board computers. We will discuss the progress we have made to run Ethereum nodes on these boards and the road ahead to optimize clients.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Stakers/Validators", + "id": "start-contributing-to-economic-protocol-development", + "sourceId": "CEZPBS", + "title": "Start contributing to economic protocol development", + "description": "Protocol development needs more economists, yet many potential contributors do not know which problems are important to Ethereum protocol development. This talk bridges the gap for those interested in blockchain research who want to work on impactful problems. The talk will overview different economic research areas at the protocol level. Examples include an economic perspective on consensus systems, transaction fee mechanism design, and economic sides of current EIPs.", + "track": "Cryptoeconomics", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Research", "featured": false, "doNotRecord": false, - "keywords": [ - "node running", - "RISC-V", - "Hardware optimization" - ], "tags": [ - "Validator Experience", - "Home staking", - "Decentralization", - "optimization", - "hardware", - "Decentralization", - "Home staking", - "Validator Experience" + "Core Protocol", + "Economics", + "introduction", + "Core Protocol", + "Economics" ], - "language": "en", - "speakers": [ - "aavegotch1eth", - "haurog" + "keywords": [ + "Introduction" ], + "duration": 423, + "language": "en", + "sources_swarmHash": "6341c1973358b364e4a0b42f702c81d66a466e72787bc14c72216143e19bfb17", + "sources_youtubeId": "CaauVb5jcH8", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731571800000, - "slot_end": 1731573600000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/120GkPug8WQzGtUpAMbWnOOcB7P72J5K2YG_ZVHAuEF0" + "slot_start": 1731484800000, + "slot_end": 1731485400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1oT8-qF_kFLzRfy9StlucF5G7CCSCbwTrU3VGnmV4M-M", + "resources_slides": "https://drive.google.com/file/d/1rREcVDVR1m5EXuLcfwseRIHwcLqWJrWB/view", + "speakers": [ + "julian-ma" + ] }, "vector": [ - 0, - 0, 0, 0, 6, @@ -698905,31 +696936,11 @@ 0, 0, 0, - 6, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -699104,6 +697115,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -699136,23 +697148,10 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -699229,7 +697228,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -699400,7 +697398,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -699563,6 +697560,30 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -699613,7 +697634,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -699626,6 +697646,14 @@ 2, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -699635,57 +697663,59 @@ }, { "session": { - "id": "stark-proofs-eli5", - "sourceId": "BKTYWY", - "title": "STARK proofs ELI5", - "description": "Let's face it, ZK proofs are intimidating. But they don't have to be!\r\nZK proofs are complex not because of the depth math they use, but because of the large number of fields of mathematics they leverage features from.\r\nIn this talk, we'll break down STARK proofs into simple blocks and colorful analogies so that you get a good high level overview of how they work", - "track": "Applied Cryptography", - "type": "Lightning Talk", - "expertise": "Intermediate", + "id": "state-contention-rules-everything-around-me", + "sourceId": "XGHU89", + "title": "State Contention Rules Everything Around Me", + "description": "State contention causes MEV, prevents parallelization, breaks gas simulation, causes transactions to revert, etc. etc. We'll discuss state contention in practical and theoretical systems (e.g. OS threads and type systems) and how/why synchronization primitives developed. We'll cover why state is contentious, what state is contentious, what can be accomplished by making state non-contentitious, and strategies for refactoring existing systems to reduce contention.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Expert", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "ZKP", - "Use cases of cryptography", - "STARK", - "eli5", - "STARK", - "Use cases of cryptography", - "ZKP" - ], "keywords": [ - "ELI5" + "Synchronization", + "Concurrency" + ], + "tags": [ + "Layer 1", + "Architecture", + "Cross-L2", + "concurrency", + "Architecture", + "Cross-L2", + "Layer 1" ], - "duration": 496, "language": "en", - "sources_swarmHash": "69d7d8817a7c0b608f741bd14a6d7e15b142dcc69b50fdaa2c91f7cf3ff65161", - "sources_youtubeId": "eHPp8mFCS6E", + "sources_swarmHash": "58a4d430fa264cb64fc8af1f5aaec20d3e7b3280ed0426b56bb3e15c1cc2e82a", + "sources_youtubeId": "QrbJbjWKNX4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731394200000, - "slot_end": 1731394800000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1wuFB_JXv5HWJjXdbPmQNAk43TRxm_cDU9haSzPCxKco", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "henri" - ] + "james-prestwich" + ], + "eventId": "devcon-7", + "slot_start": 1731579000000, + "slot_end": 1731580800000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1cS2GTJFjotanBsdxY8DrP-qcMwV7ijAs3-hVV-oIS40", + "resources_slides": "https://drive.google.com/file/d/1vQwgbFkFgFDUOlO2X2e-tsY9jZIfdcrZ/view" }, "vector": [ 0, 0, 0, 0, + 6, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -700283,8 +698313,8 @@ 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -700438,6 +698468,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -700454,7 +698485,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -700485,6 +698515,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -700501,7 +698532,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -700573,6 +698603,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -700622,6 +698653,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -700637,7 +698669,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -700832,9 +698863,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -700990,9 +699018,6 @@ 0, 0, 2, - 0, - 0, - 0, 2, 0, 0, @@ -701011,47 +699036,45 @@ }, { "session": { - "id": "start-contributing-to-economic-protocol-development", - "sourceId": "CEZPBS", - "title": "Start contributing to economic protocol development", - "description": "Protocol development needs more economists, yet many potential contributors do not know which problems are important to Ethereum protocol development. This talk bridges the gap for those interested in blockchain research who want to work on impactful problems. The talk will overview different economic research areas at the protocol level. Examples include an economic perspective on consensus systems, transaction fee mechanism design, and economic sides of current EIPs.", - "track": "Cryptoeconomics", + "id": "state-minimized-layer-2s-and-why-ethereum-greater-evm", + "sourceId": "VDFBMT", + "title": "State Minimized Layer-2s and Why Ethereum > EVM", + "description": "Ethereum is at a critical juncture in its development. Many layer-2s are of the same mentality of copy and pasting their architecture and have not innovated over key blockchain problems such as parallel execution or state growth. If Ethereum is to compete with other alternative high performance blockchains, it has to solve for state growth. This talk will explore the landscape of state minimized layer-2s and show how Ethereum will be able to go beyond the state problem with non-EVM based design.", + "track": "Layer 2", "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Research", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Core Protocol", - "Economics", - "introduction", - "Core Protocol", - "Economics" + "Network State", + "node-requirements", + "Network", + "State" ], "keywords": [ - "Introduction" + "node-requirements" ], - "duration": 423, + "duration": 453, "language": "en", - "sources_swarmHash": "6341c1973358b364e4a0b42f702c81d66a466e72787bc14c72216143e19bfb17", - "sources_youtubeId": "CaauVb5jcH8", + "sources_swarmHash": "15cefb1dc3eb849cfbe6481fac471d64fa487ade696d65a54da5acf41bc079cc", + "sources_youtubeId": "juHr9CgkFCo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6736d2581b0f83434d6a4c04", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d2581b0f83434d6a4c04.vtt", + "transcript_text": " All right. Okay. So I'm going to be very, very fast. I have, like, more than 30 slides, and there's five minutes. So I'm going to have to make this work. It's fine though. A lot of this I can kind of skip. So today we're going to be doing state minimization and why Ethereum is greater than the EVM. So components of a blockchain, just to review quickly, you have this area of a blockchain node that would be, you could think of as state, state being these things that you need to both verify things and propose new blocks into a blockchain. Then you have things like execution, largely what we do with the CPU and RAM, sometimes a GPU if you're going to do GPU acceleration. Then you have data. And in this case, I'm just calling it data, which is just sort of the data you communicate over the internet to basically get everyone else in sync and, you know, kind of progress things forward and ensure that it's actually, you know, a system we can all use. So in many senses, on the data side, we have some solutions. On the execution side, we also have a lot of solutions. On the state side, though, you don't really hear about this problem very much. And you don't hear about it because a lot of people don't really have any good solutions. So that's why this talk is, you know, kind of interesting and also fun. So on the execution side, we've got all kinds of things coming out. Parallel transaxle execution is becoming more normalized. You know, you have the SVM. You have, as well, things like stylus, you know, dropping into WASM, stuff like that, as well. My project, FuelVM, we have our own virtual machine that helps with this. You know, and there's all kinds of different ways we can alleviate execution. Data has also been somewhat solved as well. So we post data now as roll-ups. We can post them over Ethereum, right, on EIP-4044. There's all kinds of sharding designs. There's all kinds of different kinds of DA layers that are coming up. So there's lots of solutions in that camp. But with state, there isn't really a lot of solutions. And so when we're talking about state, you kind of have a bunch of information that you really need to store in order if you want to progress things forward, right? So with Bitcoin, you have like the active UTXO set, the unspent UTXOs. With Ethereum, you have things like the account balances, smart contract code, smart contract state, token balances, et cetera. So there's a lot of stuff there. And Ethereum state system is quite interesting, but it's very, very, what you could say is really difficult to deal with. It's very difficult to optimize around. So you'll see there's a lot of different layers to how we're actually both taking this sort of key value store we have with Ethereum that we call like S-door, etc. when you're in contracts and how that filters all the way back up to single state roots, single succinct state roots within block headers. And it's quite enormous, the amount of state that's accrued here. So when we talk about scalability and we talk about state, a lot of people just blame the clients, they blame Geth, they blame these kinds of things like, why don't we just up the gas and screw everything, let's do it. And the thing is, Peter's already addressed this quite well, which is effectively that you can't just do this. And it's not slow because of Geth, quite frankly. It's not slow because of our clients. It's slow because we're all actually, or some people are very, very concerned about the state growth itself, right? And the requirements that puts onto things. And, you know, one way you could even look at state growth here is just like, I believe this is just the accounts on Ethereum, but you can just see that like, you have a ballooning of information. And all of these accounts have to be Merkle-ized, so it's really, really intense in terms of the amount of storage that we need to hold within Ethereum. So how can we address state growth? Basically, there's a bunch of different solutions for this. We can go on and on about it, but some ones that have been proposed before, right? State rent. There's also statelessness, which is sexy to say, but there isn't really a lot of great things underneath the hood for it. Unmerkleizing the state, so you just YOLO it like Solana does. As well, app level compression, so doing things at the application level. You can also just let it grow and maybe we just keep adding hard drives. Verkle trees, you also have some nice things here, right, where they kind of compress a certain amount of state using other techniques, but still pretty messy. And then we have another option, which I'll talk about at the end, which is bandwidth. So basically with state rent, we've all kind of heard this one before. You state, you rent it out, et cetera. Statelessness, just trying to do more things just kind of away from Ethereum state system. Unmerkleizing the state, it's not really a great approach as well. Application level techniques, you can do things more in call data, more things at the application level, but it's still not a great system in general. Here's Tully basically saying, I don't know, here's a bunch of options, but I'm not really sure. Here's Verkle trees, and again, Verkle trees, not really the best solution, because there's still a ton of state that gets accrued, right? So, alt VMs, again, Tully's system, not that great. Fuel state philosophy, we'll go into it in like two minutes, or like one minute, and then we'll go to questions. Thanks. So, fuel state philosophy is very different. We actually don't have any global state trees. We also use a system of native assets. We use UTXOs. We have as well different kinds of models to use state, but use it in the right way. We try to move a lot of the state that's typically within Merkle trees into an area which you would consider bandwidth. And in bandwidth, we actually have a lot more room to play with because everything, once you've moved it over bandwidth and kind of done the proving, then effectively you can just trim it. So there's a lot of really nice things you can do. So applications can be designed in a new technique that we have, which is called native state rehydration. And this really means that across our system, we have different techniques to basically create applications and different techniques to effectively remove state from the system and keep it very lean. These are basically, in our system, scripts, predicates, native assets, and a transaction model. I only have three seconds left, so I'm just going to skip through this. Again, UTXO model is very, very powerful in this setting, and it provides a lot of different options. And this is why in my super crazy lightning talk, I hope you guys enjoyed that. Ethereum, fuel is on Ethereum. It's a layer two. And Ethereum is much greater than the EVM with all these state problems. So thank you. Okay, cool. We have 40 seconds for one question. What about state expiry? What also keeps state in check? Yeah, so kind of, but again, you have this intense problem of a massive run-up of state. The problem with state expiry is that it's not so much that you can or can't do it. It's that basically once it expires, you would need a good system to then refill that state. And it helps, but I would say that there are better techniques where, again, you can use bandwidth. Over bandwidth, you kind of store things, and then once you've kind of consumed that or other people have consumed it, then they can bring it back only whenever they need it. You could say that's kind of like expiry, but I think expiry is too simple. You need a way to both kind of use state and then rehydrate state and then basically take it away. So in Fuel we have really nice systems for this.", "eventId": "devcon-7", - "slot_start": 1731484800000, - "slot_end": 1731485400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1oT8-qF_kFLzRfy9StlucF5G7CCSCbwTrU3VGnmV4M-M", - "resources_slides": null, + "slot_start": 1731582600000, + "slot_end": 1731583200000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1UJnCtYTecznVLrleCgEgafIef7JIuF9xeJmVPJ4TRHM", + "resources_slides": "https://drive.google.com/file/d/1f7gaG44K3TWFLS4lpyOIbnbYA58VsHN3/view", "speakers": [ - "julian-ma" + "nick-dodson" ] }, "vector": [ - 0, - 0, - 6, 0, 0, 0, @@ -701059,6 +699082,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -701658,10 +699682,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -701819,7 +699843,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -701835,7 +699858,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -701848,6 +699870,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -702282,6 +700305,8 @@ 0, 0, 2, + 2, + 2, 0, 0, 0, @@ -702359,17 +700384,13 @@ 0, 0, 0, - 0, - 0, - 0, + 2, 0, 0, 0, 2, 0, 0, - 2, - 0, 0, 0, 0, @@ -702385,65 +700406,55 @@ }, { "session": { - "id": "state-contention-rules-everything-around-me", - "sourceId": "XGHU89", - "title": "State Contention Rules Everything Around Me", - "description": "State contention causes MEV, prevents parallelization, breaks gas simulation, causes transactions to revert, etc. etc. We'll discuss state contention in practical and theoretical systems (e.g. OS threads and type systems) and how/why synchronization primitives developed. We'll cover why state is contentious, what state is contentious, what can be accomplished by making state non-contentitious, and strategies for refactoring existing systems to reduce contention.", - "track": "Core Protocol", + "id": "state-of-the-ens", + "sourceId": "VBSW3N", + "title": "State of the ENS", + "description": "Jeff Lau, co-founder of ENS, gives an update on the state of ENS, and our progress with migrating over to layer 2. ENS's approach to layer 2 aims to preserve users' ability to choose where their names are stored and administered, while massively reducing transaction costs and increasing scalability for the vast majority of users. Embracing its status as a public good, we want to make ENS the most useful to the largest number of people possible.", + "track": "Real World Ethereum", "type": "Talk", - "expertise": "Expert", + "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Synchronization", - "Concurrency" - ], "tags": [ - "Layer 1", - "Architecture", - "Cross-L2", - "concurrency", - "Architecture", - "Cross-L2", - "Layer 1" + "Protocol Design", + "Identity", + "Public good", + "usability", + "Identity", + "Protocol Design", + "Public good" ], - "language": "en", - "speakers": [ - "james-prestwich" + "keywords": [ + "Usability" ], + "duration": 1573, + "language": "en", + "sources_swarmHash": "4f4d5561be4b6ad259c73d440b96399b09651ae3087ffac98f44090ee6ba0c20", + "sources_youtubeId": "Lycp5FW-4x4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6736e62c1b0f83434d0b798d", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731579000000, - "slot_end": 1731580800000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1cS2GTJFjotanBsdxY8DrP-qcMwV7ijAs3-hVV-oIS40" + "slot_start": 1731638700000, + "slot_end": 1731640500000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1z_YHSVofOJSq48tqbAiqN423gAZrzi5rzZMND8BcHDw", + "resources_slides": "https://drive.google.com/file/d/14BWDzyHK6IbPK6J9dKnv1OTWYzm7Uro5/view", + "speakers": [ + "jeff-lau" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -703029,7 +701040,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -703049,6 +701059,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -703185,7 +701196,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -703235,6 +701245,7 @@ 2, 0, 0, + 2, 0, 0, 0, @@ -703294,6 +701305,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -703320,26 +701332,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -703370,7 +701362,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -703689,6 +701680,38 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -703737,6 +701760,7 @@ 0, 0, 2, + 0, 2, 0, 0, @@ -703755,43 +701779,26 @@ }, { "session": { - "id": "state-minimized-layer-2s-and-why-ethereum-greater-evm", - "sourceId": "VDFBMT", - "title": "State Minimized Layer-2s and Why Ethereum > EVM", - "description": "Ethereum is at a critical juncture in its development. Many layer-2s are of the same mentality of copy and pasting their architecture and have not innovated over key blockchain problems such as parallel execution or state growth. If Ethereum is to compete with other alternative high performance blockchains, it has to solve for state growth. This talk will explore the landscape of state minimized layer-2s and show how Ethereum will be able to go beyond the state problem with non-EVM based design.", - "track": "Layer 2", - "type": "Lightning Talk", - "expertise": "Intermediate", + "id": "stress-escape-relaxing-aromatic-oils-and-singing-gongs-and-bowls", + "sourceId": "KVDNNN", + "title": "Stress Escape (Relaxing Aromatic Oils and Singing Gongs and Bowls)", + "description": "By master Ice \r\n- Let go of stress with the calming sounds of gongs and bowls\r\n- Enhance by soothing essential oil scents. You’ll also receive a take-home essential oil roller to keep the relaxation going after the session.\r\n\r\nNov 15 13:00 - 13:45", + "track": "Entertainment", + "type": "Mixed Formats", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Network State", - "node-requirements", - "Network", - "State" - ], - "keywords": [ - "node-requirements" - ], - "duration": 453, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6736d2581b0f83434d6a4c04", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d2581b0f83434d6a4c04.vtt", - "transcript_text": " All right. Okay. So I'm going to be very, very fast. I have, like, more than 30 slides, and there's five minutes. So I'm going to have to make this work. It's fine though. A lot of this I can kind of skip. So today we're going to be doing state minimization and why Ethereum is greater than the EVM. So components of a blockchain, just to review quickly, you have this area of a blockchain node that would be, you could think of as state, state being these things that you need to both verify things and propose new blocks into a blockchain. Then you have things like execution, largely what we do with the CPU and RAM, sometimes a GPU if you're going to do GPU acceleration. Then you have data. And in this case, I'm just calling it data, which is just sort of the data you communicate over the internet to basically get everyone else in sync and, you know, kind of progress things forward and ensure that it's actually, you know, a system we can all use. So in many senses, on the data side, we have some solutions. On the execution side, we also have a lot of solutions. On the state side, though, you don't really hear about this problem very much. And you don't hear about it because a lot of people don't really have any good solutions. So that's why this talk is, you know, kind of interesting and also fun. So on the execution side, we've got all kinds of things coming out. Parallel transaxle execution is becoming more normalized. You know, you have the SVM. You have, as well, things like stylus, you know, dropping into WASM, stuff like that, as well. My project, FuelVM, we have our own virtual machine that helps with this. You know, and there's all kinds of different ways we can alleviate execution. Data has also been somewhat solved as well. So we post data now as roll-ups. We can post them over Ethereum, right, on EIP-4044. There's all kinds of sharding designs. There's all kinds of different kinds of DA layers that are coming up. So there's lots of solutions in that camp. But with state, there isn't really a lot of solutions. And so when we're talking about state, you kind of have a bunch of information that you really need to store in order if you want to progress things forward, right? So with Bitcoin, you have like the active UTXO set, the unspent UTXOs. With Ethereum, you have things like the account balances, smart contract code, smart contract state, token balances, et cetera. So there's a lot of stuff there. And Ethereum state system is quite interesting, but it's very, very, what you could say is really difficult to deal with. It's very difficult to optimize around. So you'll see there's a lot of different layers to how we're actually both taking this sort of key value store we have with Ethereum that we call like S-door, etc. when you're in contracts and how that filters all the way back up to single state roots, single succinct state roots within block headers. And it's quite enormous, the amount of state that's accrued here. So when we talk about scalability and we talk about state, a lot of people just blame the clients, they blame Geth, they blame these kinds of things like, why don't we just up the gas and screw everything, let's do it. And the thing is, Peter's already addressed this quite well, which is effectively that you can't just do this. And it's not slow because of Geth, quite frankly. It's not slow because of our clients. It's slow because we're all actually, or some people are very, very concerned about the state growth itself, right? And the requirements that puts onto things. And, you know, one way you could even look at state growth here is just like, I believe this is just the accounts on Ethereum, but you can just see that like, you have a ballooning of information. And all of these accounts have to be Merkle-ized, so it's really, really intense in terms of the amount of storage that we need to hold within Ethereum. So how can we address state growth? Basically, there's a bunch of different solutions for this. We can go on and on about it, but some ones that have been proposed before, right? State rent. There's also statelessness, which is sexy to say, but there isn't really a lot of great things underneath the hood for it. Unmerkleizing the state, so you just YOLO it like Solana does. As well, app level compression, so doing things at the application level. You can also just let it grow and maybe we just keep adding hard drives. Verkle trees, you also have some nice things here, right, where they kind of compress a certain amount of state using other techniques, but still pretty messy. And then we have another option, which I'll talk about at the end, which is bandwidth. So basically with state rent, we've all kind of heard this one before. You state, you rent it out, et cetera. Statelessness, just trying to do more things just kind of away from Ethereum state system. Unmerkleizing the state, it's not really a great approach as well. Application level techniques, you can do things more in call data, more things at the application level, but it's still not a great system in general. Here's Tully basically saying, I don't know, here's a bunch of options, but I'm not really sure. Here's Verkle trees, and again, Verkle trees, not really the best solution, because there's still a ton of state that gets accrued, right? So, alt VMs, again, Tully's system, not that great. Fuel state philosophy, we'll go into it in like two minutes, or like one minute, and then we'll go to questions. Thanks. So, fuel state philosophy is very different. We actually don't have any global state trees. We also use a system of native assets. We use UTXOs. We have as well different kinds of models to use state, but use it in the right way. We try to move a lot of the state that's typically within Merkle trees into an area which you would consider bandwidth. And in bandwidth, we actually have a lot more room to play with because everything, once you've moved it over bandwidth and kind of done the proving, then effectively you can just trim it. So there's a lot of really nice things you can do. So applications can be designed in a new technique that we have, which is called native state rehydration. And this really means that across our system, we have different techniques to basically create applications and different techniques to effectively remove state from the system and keep it very lean. These are basically, in our system, scripts, predicates, native assets, and a transaction model. I only have three seconds left, so I'm just going to skip through this. Again, UTXO model is very, very powerful in this setting, and it provides a lot of different options. And this is why in my super crazy lightning talk, I hope you guys enjoyed that. Ethereum, fuel is on Ethereum. It's a layer two. And Ethereum is much greater than the EVM with all these state problems. So thank you. Okay, cool. We have 40 seconds for one question. What about state expiry? What also keeps state in check? Yeah, so kind of, but again, you have this intense problem of a massive run-up of state. The problem with state expiry is that it's not so much that you can or can't do it. It's that basically once it expires, you would need a good system to then refill that state. And it helps, but I would say that there are better techniques where, again, you can use bandwidth. Over bandwidth, you kind of store things, and then once you've kind of consumed that or other people have consumed it, then they can bring it back only whenever they need it. You could say that's kind of like expiry, but I think expiry is too simple. You need a way to both kind of use state and then rehydrate state and then basically take it away. So in Fuel we have really nice systems for this.", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731582600000, - "slot_end": 1731583200000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1UJnCtYTecznVLrleCgEgafIef7JIuF9xeJmVPJ4TRHM", - "resources_slides": null, - "speakers": [ - "nick-dodson" - ] + "slot_start": 1731650400000, + "slot_end": 1731653100000, + "slot_roomId": "decompression-room", + "resources_presentation": "https://docs.google.com/presentation/d/1yzroGPmzEN55RgegoRuiSo7Qe_-eunH6UGPIczkFag0", + "resources_slides": "" }, "vector": [ 0, @@ -703801,9 +701808,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -704405,7 +702412,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -704592,7 +702598,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -705027,9 +703032,6 @@ 0, 0, 0, - 2, - 2, - 2, 0, 0, 0, @@ -705108,8 +703110,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 2, @@ -705130,46 +703132,40 @@ }, { "session": { - "id": "state-of-the-ens", - "sourceId": "VBSW3N", - "title": "State of the ENS", - "description": "Jeff Lau, co-founder of ENS, gives an update on the state of ENS, and our progress with migrating over to layer 2. ENS's approach to layer 2 aims to preserve users' ability to choose where their names are stored and administered, while massively reducing transaction costs and increasing scalability for the vast majority of users. Embracing its status as a public good, we want to make ENS the most useful to the largest number of people possible.", - "track": "Real World Ethereum", - "type": "Talk", - "expertise": "Beginner", - "audience": "Engineering", + "id": "structuring-censorship-resistant-privacy-protocols-risks-and-considerations", + "sourceId": "MVJFDX", + "title": "Structuring Censorship Resistant Privacy Protocols: Risks and Considerations", + "description": "This workshop is aimed at developers, legal professionals, and project managers involved in the creation and maintenance of privacy-focused projects and will guide participants through the various considerations and risks that need to be managed during the structuring, development and launch of these protocols.", + "track": "Cypherpunk & Privacy", + "type": "Workshop", + "expertise": "Intermediate", + "audience": "Product", "featured": false, - "doNotRecord": false, - "tags": [ - "Protocol Design", - "Identity", - "Public good", - "usability", - "Identity", - "Protocol Design", - "Public good" - ], + "doNotRecord": true, "keywords": [ - "Usability" + "Legal" + ], + "tags": [ + "Frameworks", + "Privacy", + "Censorship Resistance", + "legal", + "Censorship Resistance", + "Frameworks", + "Privacy" ], - "duration": 1573, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6736e62c1b0f83434d0b798d", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731638700000, - "slot_end": 1731640500000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1z_YHSVofOJSq48tqbAiqN423gAZrzi5rzZMND8BcHDw", - "resources_slides": null, "speakers": [ - "jeff-lau" - ] + "fatemeh-fannizadeh", + "andre-omietanski", + "amal-ibraymi" + ], + "eventId": "devcon-7", + "slot_start": 1731576600000, + "slot_end": 1731582000000, + "slot_roomId": "classroom-d", + "resources_presentation": "https://docs.google.com/presentation/d/1hNJE0EKTqY7KkSQmnZdpNsxrFfsKPlhwl0VFWn9f3pA", + "resources_slides": "https://drive.google.com/file/d/1_NF6zWt1cV0YWs0lyG8M-obAtbpAUQHD/view" }, "vector": [ 0, @@ -705177,7 +703173,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -705698,6 +703693,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -705785,6 +703781,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -705969,12 +703966,6 @@ 0, 0, 0, - 2, - 0, - 0, - 2, - 0, - 0, 0, 0, 0, @@ -706030,9 +704021,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -706041,6 +704032,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -706072,6 +704064,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -706484,17 +704477,15 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, - 0, - 2, - 0, 0, 0, 0, + 2, 0, 0, 0, @@ -706508,34 +704499,49 @@ }, { "session": { - "id": "stress-escape-relaxing-aromatic-oils-and-singing-gongs-and-bowls", - "sourceId": "KVDNNN", - "title": "Stress Escape (Relaxing Aromatic Oils and Singing Gongs and Bowls)", - "description": "By master Ice \r\n- Let go of stress with the calming sounds of gongs and bowls\r\n- Enhance by soothing essential oil scents. You’ll also receive a take-home essential oil roller to keep the relaxation going after the session.\r\n\r\nNov 15 13:00 - 13:45", - "track": "Entertainment", - "type": "Mixed Formats", - "expertise": "", + "id": "superliquid-mechanisms-for-decentralized-stablecoins", + "sourceId": "SLNQ8K", + "title": "Superliquid Mechanisms for Decentralized Stablecoins", + "description": "USDC and USDT outpace decentralized stablecoins in large part due to their liquidity. This talk covers the theory, data, and risks of stablecoin liquidity innovations. This will include mint/redemption mechanism design, liquidity pool design, rehypothecation, and protocol-owned liquidity. The analysis will distill how the flexibility of decentralized stablecoin issuance mechanisms can safely be used to their advantage over centralized stablecoins, which Gyroscope v2 is putting into practice.", + "track": "Cryptoeconomics", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Mechanism design", + "Economics", + "AMMs", + "defi", + "AMMs", + "Economics", + "Mechanism design" + ], + "keywords": [ + "Stablecoins", + "DeFi" + ], + "duration": 1533, "language": "en", - "speakers": [], + "sources_swarmHash": "59211204d3c04b626b444be0436bb8a47a78e7244db6c5f669e22c0bd4079e86", + "sources_youtubeId": "TdAa95XDFSw", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673d906517a97b4f4d386947", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731650400000, - "slot_end": 1731653100000, - "slot_roomId": "decompression-room", - "resources_presentation": "https://docs.google.com/presentation/d/1yzroGPmzEN55RgegoRuiSo7Qe_-eunH6UGPIczkFag0" + "slot_start": 1731641400000, + "slot_end": 1731643200000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1Uq2Z7r9A4ctbRuT4PbYzFJRFe2xqpvo_AnrVxHcMjiU", + "resources_slides": "https://drive.google.com/file/d/1IUb5eSKmm_AG1EKxnCuTLAUXodcL6xHE/view", + "speakers": [ + "ariah-klages-mundt" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 6, @@ -707021,6 +705027,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -707290,6 +705297,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -707317,6 +705325,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -707364,6 +705373,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -707471,6 +705481,37 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -707810,43 +705851,10 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, + 0, 2, 0, 0, @@ -707865,46 +705873,55 @@ }, { "session": { - "id": "structuring-censorship-resistant-privacy-protocols-risks-and-considerations", - "sourceId": "MVJFDX", - "title": "Structuring Censorship Resistant Privacy Protocols: Risks and Considerations", - "description": "This workshop is aimed at developers, legal professionals, and project managers involved in the creation and maintenance of privacy-focused projects and will guide participants through the various considerations and risks that need to be managed during the structuring, development and launch of these protocols.", - "track": "Cypherpunk & Privacy", + "id": "supernodes-on-a-shoestring-democratizing-ethereum-with-low-power-hardware", + "sourceId": "W3DKPQ", + "title": "Supernodes on a Shoestring: Democratizing Ethereum with Low-Power Hardware", + "description": "Learn to run a full Ethereum supernode (L1 & L2) on affordable hardware (ARM devices) This live demo will guide you through selecting the hardware, installing EoA image who automatically install and configure all the software. Become a part of the decentralized Ethereum on a easy and power efficient way.", + "track": "Core Protocol", "type": "Workshop", - "expertise": "Intermediate", - "audience": "Product", + "expertise": "Beginner", + "audience": "Engineering", "featured": false, - "doNotRecord": true, - "keywords": [ - "Legal" - ], + "doNotRecord": false, "tags": [ - "Frameworks", - "Privacy", - "Censorship Resistance", - "legal", - "Censorship Resistance", - "Frameworks", - "Privacy" + "Layer 1", + "Decentralization Improvements", + "Layer 2s", + "Decentralization", + "hardware", + "low-power", + "Decentralization", + "Decentralization Improvements", + "Layer 1", + "Layer 2s" ], - "language": "en", - "speakers": [ - "fatemeh-fannizadeh", - "andre-omietanski", - "amal-ibraymi" + "keywords": [ + "Node Operation", + "Low-Power Hardware" ], + "duration": 4662, + "language": "en", + "sources_swarmHash": "46fe2d92049008021f297bb1ee93996f8d721813789639773fef05ea0c778d9a", + "sources_youtubeId": "k2lYtOi1KJY", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731576600000, - "slot_end": 1731582000000, - "slot_roomId": "classroom-d", - "resources_presentation": "https://docs.google.com/presentation/d/1hNJE0EKTqY7KkSQmnZdpNsxrFfsKPlhwl0VFWn9f3pA" + "slot_start": 1731472200000, + "slot_end": 1731477600000, + "slot_roomId": "classroom-e", + "resources_presentation": "https://docs.google.com/presentation/d/1iW-qq2w5XkPf2rNpSWzKfErwV_ysrpVcA97rrOKKEyQ", + "resources_slides": "https://drive.google.com/file/d/11pPsDl0AmrhXn7n5c04QAvQrVP55d54A/view", + "speakers": [ + "diego-losada", + "fernando-collado" + ] }, "vector": [ 0, 0, 0, 0, - 0, 6, 0, 0, @@ -708200,6 +706217,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -708426,7 +706444,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -708513,13 +706530,9 @@ 0, 0, 0, - 6, - 6, - 0, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -708659,6 +706672,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -708667,6 +706681,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -708717,6 +706732,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -708752,11 +706768,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -708767,7 +706783,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -708799,7 +706814,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -708831,6 +706845,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -709216,15 +707231,13 @@ 0, 2, 0, + 2, 0, 0, 0, 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -709236,52 +707249,38 @@ }, { "session": { - "id": "superliquid-mechanisms-for-decentralized-stablecoins", - "sourceId": "SLNQ8K", - "title": "Superliquid Mechanisms for Decentralized Stablecoins", - "description": "USDC and USDT outpace decentralized stablecoins in large part due to their liquidity. This talk covers the theory, data, and risks of stablecoin liquidity innovations. This will include mint/redemption mechanism design, liquidity pool design, rehypothecation, and protocol-owned liquidity. The analysis will distill how the flexibility of decentralized stablecoin issuance mechanisms can safely be used to their advantage over centralized stablecoins, which Gyroscope v2 is putting into practice.", - "track": "Cryptoeconomics", - "type": "Talk", - "expertise": "Intermediate", + "id": "sybil-defense-protocol-a-devcon-treasure-hunt", + "sourceId": "MTYBFJ", + "title": "Sybil Defense Protocol - a Devcon Treasure Hunt", + "description": "Once again Social Dist0rtion Protocol and Daedalus Industries bring you a Treasure Hunt Challenge (THC) at Devcon with zero knowledge on-chain puzzle verification using the Treasure Hunt Creator (THC) framework. We present the highlights of this year's quest, share some in-game statistics, and announce winning teams.", + "track": "Entertainment", + "type": "Mixed Formats", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Mechanism design", - "Economics", - "AMMs", - "defi", - "AMMs", - "Economics", - "Mechanism design" - ], - "keywords": [ - "Stablecoins", - "DeFi" - ], - "duration": 1533, + "tags": [], + "keywords": [], + "duration": 238, "language": "en", - "sources_swarmHash": "59211204d3c04b626b444be0436bb8a47a78e7244db6c5f669e22c0bd4079e86", - "sources_youtubeId": "TdAa95XDFSw", + "sources_swarmHash": "4a803543baa820b331eb5088da4e12024d1c6d5223373e3ed88bb5f842acd8f8", + "sources_youtubeId": "7lDAZyQK9m8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673d906517a97b4f4d386947", + "sources_streamethId": "673db95b17a97b4f4dddd6b5", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731641400000, - "slot_end": 1731643200000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1Uq2Z7r9A4ctbRuT4PbYzFJRFe2xqpvo_AnrVxHcMjiU", - "resources_slides": null, - "speakers": [ - "ariah-klages-mundt" - ] + "slot_start": 1731654900000, + "slot_end": 1731655200000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/14xnzjXegPuqd055ll1dzP1BunpWbC7FiyZI4jlW0usU", + "resources_slides": "https://drive.google.com/file/d/1zUS2key6mmUn31XKyNzUSF5st9oDqFEq/view", + "speakers": [] }, "vector": [ 0, 0, - 6, 0, 0, 0, @@ -709289,6 +707288,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -709765,7 +707765,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -710037,7 +708036,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -710065,7 +708063,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -710113,7 +708110,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -710221,7 +708217,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -710593,8 +708588,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 2, @@ -710615,53 +708610,48 @@ }, { "session": { - "id": "supernodes-on-a-shoestring-democratizing-ethereum-with-low-power-hardware", - "sourceId": "W3DKPQ", - "title": "Supernodes on a Shoestring: Democratizing Ethereum with Low-Power Hardware", - "description": "Learn to run a full Ethereum supernode (L1 & L2) on affordable hardware (ARM devices) This live demo will guide you through selecting the hardware, installing EoA image who automatically install and configure all the software. Become a part of the decentralized Ethereum on a easy and power efficient way.", - "track": "Core Protocol", - "type": "Workshop", - "expertise": "Beginner", - "audience": "Engineering", + "id": "sybil-proof-mechanisms", + "sourceId": "7QENZH", + "title": "Sybil-Proof Mechanisms", + "description": "I discuss a fundamental impossibility result on proposer selection mechanisms: If different actors can generate different value from block proposal (or sequencing) rights, the only sybil-proof and incentive compatible way of assigning proposal rights is through an (arguably centralizing) auction. In other words, any proposer selection mechanism can at most satisfy two out of three fundamental requirements: incentive compatibility, sybil-resistance and decentralization.", + "track": "Cryptoeconomics", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Layer 1", - "Decentralization Improvements", - "Layer 2s", - "Decentralization", - "hardware", - "low-power", - "Decentralization", - "Decentralization Improvements", - "Layer 1", - "Layer 2s" + "PBS", + "Mechanism design", + "Game Theory", + "MEV", + "apps", + "Game Theory", + "Mechanism design", + "MEV", + "PBS" ], "keywords": [ - "Node Operation", - "Low-Power Hardware" + "APS" ], - "duration": 4662, + "duration": 534, "language": "en", - "sources_swarmHash": "46fe2d92049008021f297bb1ee93996f8d721813789639773fef05ea0c778d9a", - "sources_youtubeId": "k2lYtOi1KJY", + "sources_swarmHash": "128d8549b9f6be7505306f18e50a994b8b3afbd30a997bae8bf236e23af9240c", + "sources_youtubeId": "ifMRDZvV2kU", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731472200000, - "slot_end": 1731477600000, - "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/1iW-qq2w5XkPf2rNpSWzKfErwV_ysrpVcA97rrOKKEyQ", - "resources_slides": null, + "slot_start": 1731486600000, + "slot_end": 1731487200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1zjLtbzOM-9p0FmUus6R7GhQq9rHDQj5paePedPnL_rA", + "resources_slides": "https://drive.google.com/file/d/1zV0pmox5cTThXa2BFFIP_cPKpPJx323Z/view", "speakers": [ - "diego-losada", - "fernando-collado" + "christoph-schlegel" ] }, "vector": [ - 0, - 0, 0, 0, 6, @@ -710859,6 +708849,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -710960,11 +708951,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -711275,7 +709261,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -711422,10 +709407,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, + 6, 6, 0, 0, @@ -711457,6 +709439,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -711477,22 +709460,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -711513,7 +709480,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -711590,7 +709556,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -711898,7 +709863,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -711923,6 +709887,26 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -711976,13 +709960,16 @@ 0, 0, 0, - 2, 0, 2, 0, 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -711996,34 +709983,26 @@ }, { "session": { - "id": "sybil-defense-protocol-a-devcon-treasure-hunt", - "sourceId": "MTYBFJ", - "title": "Sybil Defense Protocol - a Devcon Treasure Hunt", - "description": "Once again Social Dist0rtion Protocol and Daedalus Industries bring you a Treasure Hunt Challenge (THC) at Devcon with zero knowledge on-chain puzzle verification using the Treasure Hunt Creator (THC) framework. We present the highlights of this year's quest, share some in-game statistics, and announce winning teams.", + "id": "synthetic-melodies-a-digital-soundscape", + "sourceId": "EZ3EVX", + "title": "Synthetic Melodies: A Digital Soundscape", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience! Dive into the bleeps and bloops curated by RBRD, weaving together experimental, ambient and IDM. Let’s connect through the universal language of music!", "track": "Entertainment", - "type": "Mixed Formats", + "type": "Music", "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], "keywords": [], - "duration": 238, + "tags": [], "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "7lDAZyQK9m8", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673db95b17a97b4f4dddd6b5", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731654900000, - "slot_end": 1731655200000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/14xnzjXegPuqd055ll1dzP1BunpWbC7FiyZI4jlW0usU", - "resources_slides": null, - "speakers": [] + "slot_start": 1731402000000, + "slot_end": 1731405600000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1kQVFXulZrmOXmwN9TZ75ma4CYWlC0Kv9WxWB6w0qyAg", + "resources_slides": "" }, "vector": [ 0, @@ -713336,11 +711315,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -713362,51 +711336,30 @@ }, { "session": { - "id": "sybil-proof-mechanisms", - "sourceId": "7QENZH", - "title": "Sybil-Proof Mechanisms", - "description": "I discuss a fundamental impossibility result on proposer selection mechanisms: If different actors can generate different value from block proposal (or sequencing) rights, the only sybil-proof and incentive compatible way of assigning proposal rights is through an (arguably centralizing) auction. In other words, any proposer selection mechanism can at most satisfy two out of three fundamental requirements: incentive compatibility, sybil-resistance and decentralization.", - "track": "Cryptoeconomics", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "synto-nikka", + "sourceId": "ZBSJDY", + "title": "Synto Nikka", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "PBS", - "Mechanism design", - "Game Theory", - "MEV", - "apps", - "Game Theory", - "Mechanism design", - "MEV", - "PBS" - ], - "keywords": [ - "APS" - ], - "duration": 534, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "128d8549b9f6be7505306f18e50a994b8b3afbd30a997bae8bf236e23af9240c", - "sources_youtubeId": "ifMRDZvV2kU", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731486600000, - "slot_end": 1731487200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1zjLtbzOM-9p0FmUus6R7GhQq9rHDQj5paePedPnL_rA", - "resources_slides": null, - "speakers": [ - "christoph-schlegel" - ] + "slot_start": 1731492000000, + "slot_end": 1731497400000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1qlDffU55LOyqC5g5m_XelYjXsBTWIYahAHtzcqgHwic", + "resources_slides": "" }, "vector": [ 0, 0, - 6, 0, 0, 0, @@ -713414,6 +711367,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -713601,7 +711555,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -714157,13 +712110,10 @@ 0, 0, 0, - 6, 0, 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -714194,7 +712144,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -714643,7 +712592,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -714718,9 +712666,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 2, @@ -714735,30 +712683,55 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "synthetic-melodies-a-digital-soundscape", - "sourceId": "EZ3EVX", - "title": "Synthetic Melodies: A Digital Soundscape", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience! Dive into the bleeps and bloops curated by RBRD, weaving together experimental, ambient and IDM. Let’s connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", - "audience": "Engineering", + "id": "tackling-east-asias-population-decline-issues-with-local-coops-subsystem-for-local-governance", + "sourceId": "QKMVPC", + "title": "Tackling East Asia's Population Decline Issues with Local Coop's Subsystem for Local Governance", + "description": "Local Coop envisions a world beyond nation-states and capitalism, fostering mutual aid and co-creation. It promotes self-reliant community autonomy and public goods, targeting East Asia's declining population. The system includes digital resident IDs with NFTs, democratizes emissions trading, and manages resources sustainably. Partnerships with local governments facilitate transferring public goods and services to Local Coop, optimized through technology and resident participation.", + "track": "Real World Ethereum", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Local/SEA", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Public good", + "Local Impact", + "service", + "public", + "Autonomous World", + "Local Impact", + "Public good" + ], + "keywords": [ + "Population Decline", + "Local Government", + "NFT", + "Public Service" + ], + "duration": 558, "language": "en", - "speakers": [], + "sources_swarmHash": "970e2579b6bacb4f94a244e5139a9f6388fba8e2a7198feced8dc071c16bd62d", + "sources_youtubeId": "pmWRDoV3ug4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735bb619dbb7a90e19e862b", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735bb619dbb7a90e19e862b.vtt", + "transcript_text": " Thank you. I believe that the most important thing is to implement new ideas and mechanisms into society. If you ask me the way to start the implementation into real society, I will answer in rural area of Japan where the population is rapidly declining. And Japan's population peaked in 2018 and has continued to decline and it is fair to say that Japan is one of the countries in the world with the most declining aging population. And it will become a serious issue not only in Japan, but also in East Asia. Thailand is about to enter a time of population decline. The map of Japan is colored by red by local governments that predict it to likely disappear by 2050, but 40% of the more than 1,700 basic local governments are in danger of disappearing. We have decided to focus our attention on Japan's local community and local government to develop and implement a new community operating system, Local Coop. By taking back the laws previously left to government agencies and putting them in our own hands, we will build a foundation for a more flexible and fulfilling way of life. Numerous small communities will be created, and the people who move freely between them will solve problems and create new values through co-creation and mutual assistance. Local co-op is truly a mechanism for funding local commons whereby local assets such as vacant houses and best forests in a community can be made accessible and nurtured as commons while raising funded external resources. The goal is to optimally redistribute and invest common funds without the involvement of the former government or council in governance. Now let's describe the specific case study. The first case is Yamaguchi village, a type of depopulated area in Japan. Currently, Yamaguchi village has a population of less than 770 and is facing difficulty in maintaining the infrastructure that is essential for daily life. With a sense of crisis that their village will be disappeared if nothing is done, we are contacted by the villagers about three years ago and offered this proposal. Let's let people around the world know what is going on in Yamakoshi Village and get our friends. So let me show the video. This is the rural community of Yamakoshi, Japan. In 2004, a massive earthquake hit this part of Japan. Many left and never came back. If my parents and mother are gone, if the young people don't come back, this village will be destroyed. To help deal with this, Haruka's got some ideas in the digital space. To avoid losing our identity, I started using NFTs. When you buy an NFT, it's almost like your passport to be a digital villager. How many do you have now? There's 1,012 people. There's 1,000 and a half people that have bought these NFTs. Yes. Whether you live in Japan or in the world, anyone can buy an NFT and become a member of Yamakoshi. It's worth not underestimating the value of that legacy. And I hope Web3 technology can help bridge the divide between young generations and historical legacies. Currently the number of the digital villages both domestic and international exceeds 1,700 more than double the number of the real villages. The second is about a relationship between the sustainably regenerated nature in the sustainable local community. We have focused an abundant of nature in Japan. 70% of the country land is forested and the archipelago is surrounded by the sea. On the other hand, as a result of the decline of the forestry and the other industry planted as a result of the decline of forestry and other industries, planted forests have become desolate, creatures are disappearing, fish are harder to catch, and the risk of landslides and other disasters is increasing. It is important to guarantee economic feasibility while restoring the cycle of nature. SINLA makes it possible to raise funds for the maintaining of Japan's local forests and the oceans by allowing pre-holding rights for the carbon credit to be created to be exchanged on the blockchain. By using the funds raised to maintain natural resources, carbon credit can be created. The project is also promoting the democratization of the emission trading so that individuals as well as companies both domestic and international can participate in the trading. As our first site for creating a carbon credit we have a partnership with Oasis City. On the other hand, carbon credit are not only things generated from forest maintenance. We are working to create biodiversity forests and restore the nature that humans have destroyed. To create a watershed where diverse creatures can return and disasters are less likely to occur. is now in the position to generate carbon quality worth 330,000 US dollars per year. This model, which can be financed by caring for the local nature, is beginning to be deployed in other regions. Local coop, the practice is still in the process process but we have discussed with a specific example. We are forming the common fund with a steady stream of the funds as a resource through the multiple channel. The common funds are matched using methods such as a quadratic funding. In Japan, we are currently conducting demonstrations in four areas and plan to conduct demonstrations outside of Japan in the near future. Local coup is a strategy to start by creating a subsystem of local government in Japan, which is facing the rapid population decline, and it is an excellent opportunity to design the original radical self-government. We intend to develop local coop as the main system to implement a plural world where people freely move between multiple communities and help each other, creating the communities that are self-serving but necessary for survival. Thank you. Thank you. It's very interesting because some of the community members in Taiwan actually gather resources to buy one of the NFT. So we're technically one of the villagers. Any questions from the audience? Anyone? Okay, death. I'm going to throw it. Oh, oops. Very far. I'm going to throw it. Oops. Very far. Nice. Thank you so much for that very brief overview. I'm wondering if there's ways for people outside of Japan who maybe don't have a Japanese passport to participate in these kind of schemes. Is that part of the conversation? Is there any way to design in digital nomads to come in and experience even for short periods of time? In Japan, there's some kind of discussion about the digital nomad visa ID, but it's kind of separated. So now we are just for example, Nishikigoi NFT is just digital resident certificate NFT. So you can participate in a discussion and you can participate in voting. And also you can visit to the village and stay and talk with local residents and people and participate in a festival, something like that. And it's interesting. There are so many elderly people over 80, 90 years old, but they all realize digital villages. So if you're a digital village and and go to the Yamakoshi village, probably elderly, the little grandmother, grandpa, but they realize the relationship between the local resident and the outside of people, digital visitor. That's wonderful, I think. Thank you for your question. Thank you. I'm sorry, but we only have...", "eventId": "devcon-7", - "slot_start": 1731402000000, - "slot_end": 1731405600000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1kQVFXulZrmOXmwN9TZ75ma4CYWlC0Kv9WxWB6w0qyAg" + "slot_start": 1731573600000, + "slot_end": 1731574200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/105LJog6X4qLZc6Fr_TdY9gMTLhUukbrbE677s9fsW6E", + "resources_slides": "https://drive.google.com/file/d/1pXURERD6HMluuQR3lPCLOiTsNv3OFk2Z/view", + "speakers": [ + "atsushi-hayashiatsu" + ] }, "vector": [ 0, @@ -714767,9 +712740,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -715379,6 +713349,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -715620,6 +713591,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -715755,6 +713728,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -715939,6 +713913,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -715966,13 +713941,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -716079,8 +714048,6 @@ 2, 0, 0, - 2, - 0, 0, 0, 0, @@ -716092,37 +714059,52 @@ 0, 0, 0, + 2, 0 ] }, { "session": { - "id": "synto-nikka", - "sourceId": "ZBSJDY", - "title": "Synto Nikka", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", + "id": "tales-from-interop", + "sourceId": "UQPDPQ", + "title": "Tales from interop", + "description": "A deep dive into the interop process for Pectra and how it evolved over the year. Find out how 100 people can work on 3 forks at the same time and how we avoided the devops bottlenecks.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Core Protocol", + "Security", + "Testing", + "devops", + "Core Protocol", + "Security", + "Testing" + ], + "keywords": [ + "DevOps" + ], + "duration": 1433, "language": "en", - "speakers": [], + "sources_swarmHash": "383122b77f86b227e151f74387c9f010ac758d64fca5abea34685147c14c417d", + "sources_youtubeId": "NHsi-lyOEUA", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731492000000, - "slot_end": 1731497400000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1qlDffU55LOyqC5g5m_XelYjXsBTWIYahAHtzcqgHwic" + "slot_start": 1731403800000, + "slot_end": 1731405600000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1EI6PvXpSa-LCMg1S_f31vrLcip8y61g5BqDRGaUIJe0", + "resources_slides": "https://drive.google.com/file/d/19rzwzD9EyqmEfORc3qmLOZME_VIxPIH3/view", + "speakers": [ + "parithosh-jayanthi" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -716739,6 +714721,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -716870,6 +714853,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -716888,6 +714872,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -717112,6 +715097,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -717355,6 +715341,19 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -717415,27 +715414,10 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, + 0, 2, 0, 0, @@ -717454,48 +715436,43 @@ }, { "session": { - "id": "tackling-east-asias-population-decline-issues-with-local-coops-subsystem-for-local-governance", - "sourceId": "QKMVPC", - "title": "Tackling East Asia's Population Decline Issues with Local Coop's Subsystem for Local Governance", - "description": "Local Coop envisions a world beyond nation-states and capitalism, fostering mutual aid and co-creation. It promotes self-reliant community autonomy and public goods, targeting East Asia's declining population. The system includes digital resident IDs with NFTs, democratizes emissions trading, and manages resources sustainably. Partnerships with local governments facilitate transferring public goods and services to Local Coop, optimized through technology and resident participation.", - "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Local/SEA", - "featured": false, + "id": "tending-the-infinite-garden-organizational-culture-in-the-ethereum-ecosystem", + "sourceId": "U7SNLQ", + "title": "Tending the Infinite Garden: Organizational Culture in the Ethereum Ecosystem", + "description": "This presentation will discuss the findings of the academic paper \"Tending the Infinite Garden: Organisational Culture in the Ethereum Ecosystem\" by Dr. Paul-Dylan-Ennis and Ann Brody. Our study examines the decision-making processes fundamental to Ethereum's protocol governance, drawing on interviews with Ethereum's core developers. We identify a central worldview in Ethereum known as the \"Infinite Garden\" and discuss how Ethereum's social layer is crucial for upholding cypherpunk values.", + "track": "Cypherpunk & Privacy", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Developer", + "featured": true, "doNotRecord": false, "tags": [ - "Public good", - "Local Impact", - "service", - "public", - "Autonomous World", - "Local Impact", - "Public good" + "value" ], "keywords": [ - "Population Decline", - "Local Government", - "NFT", - "Public Service" + "Ethereum", + "Core", + "Development;", + "Social", + "Layer;", + "Governance;", + "Values" ], - "duration": 558, + "duration": 1427, "language": "en", - "sources_swarmHash": "970e2579b6bacb4f94a244e5139a9f6388fba8e2a7198feced8dc071c16bd62d", - "sources_youtubeId": "pmWRDoV3ug4", + "sources_swarmHash": "6cd713334783bb75c3c9510a70b0a320d2f16dfb69b2c7e997c0f0a2504db504", + "sources_youtubeId": "GAAi4ysKV_c", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735bb619dbb7a90e19e862b", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735bb619dbb7a90e19e862b.vtt", - "transcript_text": " Thank you. I believe that the most important thing is to implement new ideas and mechanisms into society. If you ask me the way to start the implementation into real society, I will answer in rural area of Japan where the population is rapidly declining. And Japan's population peaked in 2018 and has continued to decline and it is fair to say that Japan is one of the countries in the world with the most declining aging population. And it will become a serious issue not only in Japan, but also in East Asia. Thailand is about to enter a time of population decline. The map of Japan is colored by red by local governments that predict it to likely disappear by 2050, but 40% of the more than 1,700 basic local governments are in danger of disappearing. We have decided to focus our attention on Japan's local community and local government to develop and implement a new community operating system, Local Coop. By taking back the laws previously left to government agencies and putting them in our own hands, we will build a foundation for a more flexible and fulfilling way of life. Numerous small communities will be created, and the people who move freely between them will solve problems and create new values through co-creation and mutual assistance. Local co-op is truly a mechanism for funding local commons whereby local assets such as vacant houses and best forests in a community can be made accessible and nurtured as commons while raising funded external resources. The goal is to optimally redistribute and invest common funds without the involvement of the former government or council in governance. Now let's describe the specific case study. The first case is Yamaguchi village, a type of depopulated area in Japan. Currently, Yamaguchi village has a population of less than 770 and is facing difficulty in maintaining the infrastructure that is essential for daily life. With a sense of crisis that their village will be disappeared if nothing is done, we are contacted by the villagers about three years ago and offered this proposal. Let's let people around the world know what is going on in Yamakoshi Village and get our friends. So let me show the video. This is the rural community of Yamakoshi, Japan. In 2004, a massive earthquake hit this part of Japan. Many left and never came back. If my parents and mother are gone, if the young people don't come back, this village will be destroyed. To help deal with this, Haruka's got some ideas in the digital space. To avoid losing our identity, I started using NFTs. When you buy an NFT, it's almost like your passport to be a digital villager. How many do you have now? There's 1,012 people. There's 1,000 and a half people that have bought these NFTs. Yes. Whether you live in Japan or in the world, anyone can buy an NFT and become a member of Yamakoshi. It's worth not underestimating the value of that legacy. And I hope Web3 technology can help bridge the divide between young generations and historical legacies. Currently the number of the digital villages both domestic and international exceeds 1,700 more than double the number of the real villages. The second is about a relationship between the sustainably regenerated nature in the sustainable local community. We have focused an abundant of nature in Japan. 70% of the country land is forested and the archipelago is surrounded by the sea. On the other hand, as a result of the decline of the forestry and the other industry planted as a result of the decline of forestry and other industries, planted forests have become desolate, creatures are disappearing, fish are harder to catch, and the risk of landslides and other disasters is increasing. It is important to guarantee economic feasibility while restoring the cycle of nature. SINLA makes it possible to raise funds for the maintaining of Japan's local forests and the oceans by allowing pre-holding rights for the carbon credit to be created to be exchanged on the blockchain. By using the funds raised to maintain natural resources, carbon credit can be created. The project is also promoting the democratization of the emission trading so that individuals as well as companies both domestic and international can participate in the trading. As our first site for creating a carbon credit we have a partnership with Oasis City. On the other hand, carbon credit are not only things generated from forest maintenance. We are working to create biodiversity forests and restore the nature that humans have destroyed. To create a watershed where diverse creatures can return and disasters are less likely to occur. is now in the position to generate carbon quality worth 330,000 US dollars per year. This model, which can be financed by caring for the local nature, is beginning to be deployed in other regions. Local coop, the practice is still in the process process but we have discussed with a specific example. We are forming the common fund with a steady stream of the funds as a resource through the multiple channel. The common funds are matched using methods such as a quadratic funding. In Japan, we are currently conducting demonstrations in four areas and plan to conduct demonstrations outside of Japan in the near future. Local coup is a strategy to start by creating a subsystem of local government in Japan, which is facing the rapid population decline, and it is an excellent opportunity to design the original radical self-government. We intend to develop local coop as the main system to implement a plural world where people freely move between multiple communities and help each other, creating the communities that are self-serving but necessary for survival. Thank you. Thank you. It's very interesting because some of the community members in Taiwan actually gather resources to buy one of the NFT. So we're technically one of the villagers. Any questions from the audience? Anyone? Okay, death. I'm going to throw it. Oh, oops. Very far. I'm going to throw it. Oops. Very far. Nice. Thank you so much for that very brief overview. I'm wondering if there's ways for people outside of Japan who maybe don't have a Japanese passport to participate in these kind of schemes. Is that part of the conversation? Is there any way to design in digital nomads to come in and experience even for short periods of time? In Japan, there's some kind of discussion about the digital nomad visa ID, but it's kind of separated. So now we are just for example, Nishikigoi NFT is just digital resident certificate NFT. So you can participate in a discussion and you can participate in voting. And also you can visit to the village and stay and talk with local residents and people and participate in a festival, something like that. And it's interesting. There are so many elderly people over 80, 90 years old, but they all realize digital villages. So if you're a digital village and and go to the Yamakoshi village, probably elderly, the little grandmother, grandpa, but they realize the relationship between the local resident and the outside of people, digital visitor. That's wonderful, I think. Thank you for your question. Thank you. I'm sorry, but we only have...", + "sources_streamethId": "67349bef9dbb7a90e120f3ee", "eventId": "devcon-7", - "slot_start": 1731573600000, - "slot_end": 1731574200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/105LJog6X4qLZc6Fr_TdY9gMTLhUukbrbE677s9fsW6E", - "resources_slides": null, + "slot_start": 1731495600000, + "slot_end": 1731497400000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1f-XpVYzA-AiFID7laGqTa-L6kAXqGezXQRCWQw-a-L4", + "resources_slides": "https://drive.google.com/file/d/1SSaeXlM1yqA642cTndIn_PNnqTFqbnC0/view", "speakers": [ - "atsushi-hayashiatsu" + "ann-brody" ] }, "vector": [ @@ -717504,7 +715481,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -718115,9 +716091,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -718359,8 +716335,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -718496,7 +716470,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -718624,6 +716597,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -718682,7 +716656,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -718710,7 +716683,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -718813,12 +716785,13 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, + 2, 0, 0, 0, @@ -718829,60 +716802,46 @@ 0, 0, 0, - 2, 0 ] }, { "session": { - "id": "tales-from-interop", - "sourceId": "UQPDPQ", - "title": "Tales from interop", - "description": "A deep dive into the interop process for Pectra and how it evolved over the year. Find out how 100 people can work on 3 forks at the same time and how we avoided the devops bottlenecks.", - "track": "Core Protocol", + "id": "thailands-talents-attraction-initiatives-ltr-smart-visa-program", + "sourceId": "ENXEC9", + "title": "Thailand’s talents attraction initiatives; LTR / Smart visa program", + "description": "In this session, we'll explore Thailand’s talent attraction initiatives, including the Long-Term Resident (LTR) Visa and Smart Visa, crafted to draw global talent and investment. Expect a comprehensive overview of visa categories, eligibility criteria, and exclusive benefits like reduced personal income tax rates for highly skilled professionals, streamlined work permissions, and extended reporting requirements.", + "track": "Real World Ethereum", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developer", "featured": false, - "doNotRecord": false, - "tags": [ - "Core Protocol", - "Security", - "Testing", - "devops", - "Core Protocol", - "Security", - "Testing" - ], + "doNotRecord": true, "keywords": [ - "DevOps" + "Visa", + "work", + "permit" ], - "duration": 1433, + "tags": [], "language": "en", - "sources_swarmHash": "383122b77f86b227e151f74387c9f010ac758d64fca5abea34685147c14c417d", - "sources_youtubeId": "NHsi-lyOEUA", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731403800000, - "slot_end": 1731405600000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1EI6PvXpSa-LCMg1S_f31vrLcip8y61g5BqDRGaUIJe0", - "resources_slides": null, "speakers": [ - "parithosh-jayanthi" - ] + "kantarot-laopradith" + ], + "eventId": "devcon-7", + "slot_start": 1731645600000, + "slot_end": 1731647400000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1azLdMUNgvh3r_j9J8CLf4Esqc2Konko-l9riMWwdpNU", + "resources_slides": "" }, "vector": [ 0, 0, 0, 0, - 6, - 0, 0, 0, + 6, 0, 0, 0, @@ -719492,9 +717451,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -719626,7 +717585,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -719645,7 +717603,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -719870,7 +717827,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -720115,7 +718071,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -720193,9 +718148,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -720211,51 +718166,47 @@ }, { "session": { - "id": "tending-the-infinite-garden-organizational-culture-in-the-ethereum-ecosystem", - "sourceId": "U7SNLQ", - "title": "Tending the Infinite Garden: Organizational Culture in the Ethereum Ecosystem", - "description": "This presentation will discuss the findings of the academic paper \"Tending the Infinite Garden: Organisational Culture in the Ethereum Ecosystem\" by Dr. Paul-Dylan-Ennis and Ann Brody. Our study examines the decision-making processes fundamental to Ethereum's protocol governance, drawing on interviews with Ethereum's core developers. We identify a central worldview in Ethereum known as the \"Infinite Garden\" and discuss how Ethereum's social layer is crucial for upholding cypherpunk values.", - "track": "Cypherpunk & Privacy", - "type": "Talk", + "id": "the-10-most-common-vulnerabilities-found-in-audit-contests", + "sourceId": "LYFXZN", + "title": "The 10 Most Common Vulnerabilities Found in Audit Contests", + "description": "This lightning talk offers a quick survival guide for DApp developers and security experts, highlighting the most common vulnerabilities found in audit contests. As these contests are often the final step before mainnet, the identified vulnerabilities have typically been overlooked by multiple developers and auditors. The session includes a link to a guide on fixing each vulnerability and a 2-minute Q&A to explore any of the 10 vulnerabilities in more detail and discuss why they are often missed", + "track": "Security", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", - "featured": true, + "audience": "Research", + "featured": false, "doNotRecord": false, "tags": [ - "value" + "Security", + "Auditing", + "audit", + "contest", + "Auditing", + "Security" ], "keywords": [ - "Ethereum", - "Core", - "Development;", - "Social", - "Layer;", - "Governance;", - "Values" + "Vulnerabilities;", + "Audit", + "Contests" ], - "duration": 1427, + "duration": 595, "language": "en", - "sources_swarmHash": "6cd713334783bb75c3c9510a70b0a320d2f16dfb69b2c7e997c0f0a2504db504", - "sources_youtubeId": "GAAi4ysKV_c", + "sources_swarmHash": "3103f2e82576803c887da36c890760dec4bb346076f23924fe2e0ecaf42099a0", + "sources_youtubeId": "MT7mYhwgksI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67349bef9dbb7a90e120f3ee", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731495600000, - "slot_end": 1731497400000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1f-XpVYzA-AiFID7laGqTa-L6kAXqGezXQRCWQw-a-L4", - "resources_slides": null, + "slot_start": 1731408000000, + "slot_end": 1731408600000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1_iMeu-TIt6aOehgouo5xQOCb89l5Su5oE2WffTDcOII", + "resources_slides": "https://drive.google.com/file/d/1y7ExLB5m_41dHQpU0shgISHC5VGpS21i/view", "speakers": [ - "ann-brody" + "jack-sanford" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -720869,16 +718820,13 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -721007,6 +718955,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -721158,6 +719107,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -721283,6 +719233,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -721376,7 +719327,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -721494,6 +719444,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -721570,7 +719521,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -721582,36 +719532,54 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "thailands-talents-attraction-initiatives-ltr-smart-visa-program", - "sourceId": "ENXEC9", - "title": "Thailand’s talents attraction initiatives; LTR / Smart visa program", - "description": "In this session, we'll explore Thailand’s talent attraction initiatives, including the Long-Term Resident (LTR) Visa and Smart Visa, crafted to draw global talent and investment. Expect a comprehensive overview of visa categories, eligibility criteria, and exclusive benefits like reduced personal income tax rates for highly skilled professionals, streamlined work permissions, and extended reporting requirements.", - "track": "Real World Ethereum", - "type": "Talk", + "id": "the-age-of-account-abstraction-opportunities-and-challenges", + "sourceId": "EPN9S7", + "title": "The Age of Account Abstraction: Opportunities and Challenges", + "description": "In a world where the web3 user experience is streamlined through account abstraction, complexities like gas and multiple L1s/L2s are hidden from users. This talk explores the competitive dynamics likely to develop at each layer of the stack (layers, DeFi protocols, intent protocols) and the strategies that might be employed to succeed. Join me to delve into the transformative impact of making Web3 seamless and accessible, and understand how to navigate and thrive in this evolving landscape.", + "track": "Usability", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Business", "featured": false, - "doNotRecord": true, + "doNotRecord": false, "keywords": [ - "Visa", - "work", - "permit" + "Protocol competition", + "User growth", + "Layer specialisation" + ], + "tags": [ + "Layer 2s", + "Account Abstraction", + "Intents", + "specialisation", + "layer", + "Account Abstraction", + "Intents", + "Layer 2s" ], - "tags": [], "language": "en", + "sources_swarmHash": "7737be5eba2099cd02775d6c06240621bd43800d2d436c6647d2411b7f3cd8cc", + "sources_youtubeId": "dNeCG5Za_hM", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "kantarot-laopradith" + "daniel-yanev" ], "eventId": "devcon-7", - "slot_start": 1731645600000, - "slot_end": 1731647400000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1azLdMUNgvh3r_j9J8CLf4Esqc2Konko-l9riMWwdpNU" + "slot_start": 1731552300000, + "slot_end": 1731552900000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/17eyZChjX1qpt1_WuQIDXpXi06_RixZQtwAbNNS22vqU", + "resources_slides": "https://drive.google.com/file/d/1StaC6tHfqm3FvyU52I3Z1SfxsnSyWWwc/view" }, "vector": [ 0, @@ -721620,6 +719588,8 @@ 0, 0, 0, + 0, + 0, 6, 0, 0, @@ -722232,7 +720202,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -722412,9 +720381,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -722425,6 +720396,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -722717,6 +720689,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -722847,17 +720820,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -722934,8 +720897,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -722950,53 +720913,50 @@ }, { "session": { - "id": "the-10-most-common-vulnerabilities-found-in-audit-contests", - "sourceId": "LYFXZN", - "title": "The 10 Most Common Vulnerabilities Found in Audit Contests", - "description": "This lightning talk offers a quick survival guide for DApp developers and security experts, highlighting the most common vulnerabilities found in audit contests. As these contests are often the final step before mainnet, the identified vulnerabilities have typically been overlooked by multiple developers and auditors. The session includes a link to a guide on fixing each vulnerability and a 2-minute Q&A to explore any of the 10 vulnerabilities in more detail and discuss why they are often missed", - "track": "Security", - "type": "Lightning Talk", + "id": "the-age-of-aggregation", + "sourceId": "VVTWM7", + "title": "The Age Of AGGREGATION", + "description": "Aggregation plays a critical role in enhancing the usability and scalability of blockchain technology. In this session, we will explore the fundamental concepts of aggregation, debunk common myths, and discuss the necessity of aggregated blockchain systems for achieving real-world usage. Current scalability boundaries limit blockchain's potential, but through aggregation, we can optimize performance and usability, making blockchain technology accessible to a broader audience", + "track": "Layer 2", + "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Security", - "Auditing", - "audit", - "contest", - "Auditing", - "Security" + "Protocol Design", + "Scalability", + "Token bridging", + "User Experience", + "Protocol Design", + "Token bridging", + "User Experience" ], "keywords": [ - "Vulnerabilities;", - "Audit", - "Contests" + "Blockchain optimization", + "performance enhancement", + "scalability" ], - "duration": 595, + "duration": 1566, "language": "en", - "sources_swarmHash": "3103f2e82576803c887da36c890760dec4bb346076f23924fe2e0ecaf42099a0", - "sources_youtubeId": "MT7mYhwgksI", + "sources_swarmHash": "75f1b00bbf2a2c46b3ff71f60ca339198feaa063744cb3f1e87a5af850ddd94f", + "sources_youtubeId": "asPJJDIQaWY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6736e8da1b0f83434d32eac5", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736e8da1b0f83434d32eac5.vtt", + "transcript_text": " of Polygon Labs. For those of you who don't know Polygon Labs, or specifically who don't know what Polygon Labs does, because we do many things, we are core contributor to the AgLayer along with many others. We've also developed multiple blockchains being Polygon POS that pretty much everyone knows, as well as Polygon ZKVM. So I'm going to go through a little bit of history before actually attacking what exactly we mean by aggregation, and specifically what I want to look at. And people talk a lot about not looking at analogies, but if we think of what we want Web 3 to be, we really want it to eat as much of Web 2 up as possible. We should be able to scale Web 3 to the size of Web 2. And so the question then becomes, well, how did the Internet become the Internet? And if you think about it, the Internet actually started very fragmented. It was very disconnected with multiple networks, unable to communicate. And then new protocols came along. And specifically, things like TCP, IP came along. And what you suddenly had was this diversity in what could be built on the internet and the infrastructure underlying it, but with some common protocols. And specifically what you get there is TCP IP. And it's very important to actually understand this because a lot of people say, like, how are we going to bring together all of Web 3 when you've got so much disparate technology in Web3? You've got different stacks, you've got different VMs, like, everything looks different. But we actually have an example of bringing together different technology in a way that, like, unifies it and actually feels like one. So let's talk about, like, specific problems we see in Web3 and why this is actually very relevant. We've got fragmentation as an issue. I think this is an obvious one for everyone. Scalability, security, and frankly, just poor UX. If we look at each of these one by one, what do we have right now? We've got a bunch of chains, and liquidity is fragmented across all of these chains. We really have no good way of bringing them together at all. Similarly, we've got a scalability problem. If we want Web3 to scale to the size of the internet, then we should probably compare to what currently is using the internet. And when we look at that, what we see things is MasterCard, Visa, Nasdaq. And I do not believe we actually have a network that can handle the load of any of those. I think we've heard many state that they want blockchains to be able to handle, you know, a load like Nasdaq. And that is 250,000 transactions per second, which no network is even close to being able to handle in any way whatsoever. I actually just as a little side note, I'll give you a few side notes during this presentation, but this is a little interesting fact is let's just assume that we could actually scale a blockchain to the size of Nasdaq. Nasdaq can execute transactions with a latency of 50 microseconds. You are not going to execute any transaction on a blockchain in 50 microseconds. So I think our dreams of bringing Nasdaq on-chain should probably recognize the fact that there's some things that are actually better in a centralized state, and there's some things that are better in a decentralized state. Obviously, this does not mean that we don't want decentralized exchanges. It means that there's specific technologies that serve specific purposes, and we should probably focus on more long-tail assets in a decentralized way when we want perfect performance execution. And then when we just want censorship resistance, then we should probably bring that on chain. Third one is security. Right now, when you look at many different networks and how they come together, we talk about concepts like shared sequencers and intents and bridges, and most of that is not actually very secure, even though it actually works decently in some cases. And lastly, the UX challenge. We've got a UX problem, whether it's across wallets, bridges and intents, chains. Basically, what you get is this fragmented feeling across the board. And so then the question becomes, what do you do about this? And the first answer that we tried to give, or many do still give is let's just use a monolithic chain Solana can solve this for you because something something something well, you know most of the time that something isn't going to be good enough because Literally we have seen that we can't even scale to some of the the smallest Networks that are good for payments in the world or the biggest ones. And so the idea that we're going to throw everything on one chain is not happening. And so we need to go look at another solution. The other solution was, why don't we just scale through modular chains, right? But the problem is modular chains increase fragmentation. We see it today. You've got, you know, Polygon ZKVM, Polygon POS. You've got ZK Sync Era. You've got, you know, Arbitrum 1. You've got OpenMainnet. You could keep going on and on. And, like, great, this is nice. Like, being able to choose how you want to spin up a chain is great. It actually solves the scalability issue, right? Because now when you need more block space, what do you do? You spin up a new chain. And with time, this is actually going to get easier and easier, right? Like right now, people want to sell you on the idea that spinning up a new chain is easy. Like frankly, it's actually not that easy. But if you fast forward 12 months from now and 24 months from now and 36 months from now, you're going to get to a point where spinning up a new chain is actually incredibly easy. And when you need to increase throughput and increase block space, then you're going to spin up a new chain to actually be able to do that. And so through kind of the modular approach, we can actually solve scalability. The problem is we still don't have one unified Web3 if we do that. And so the answer to this is just aggregate all the chains. And so what does aggregation actually allow for? It actually allows us to scale Web3 to the size of the internet. What are the things that we need to do this? First, we need practically infinite scalability. And that's what you get when you can spin up new chains very easily when more demand is needed. Because there's... When there is more demand, and so more block space is needed. And then the second one is you need to be able to unify state liquidity users. And if you really think of the North Star of what we should be targeting, we should really want to be able to have a Web3 and an Ethereum that scales as much as you need in a unified way, making you believe that you are using one chain while you are using 100,000 different chains at any point in time, and that that 100,000 chains can become 125,000 chains at any point in time, and it still feels like one internet. So when we've thought about this problem and how we want to solve this problem exactly, we started thinking about it and we realized that we needed to create a neutral platform. And so, this is the concept of the Ag layer. Polygon Labs is a core contributor, along with many others that I will show you a little bit later. But the general idea was that we need some kind of neutral cross-chain settlement platform that unifies liquidity users in state, and that ultimately has finality on Ethereum. Now, very importantly, the AgLayer is actually not intended to scale only Ethereum. And there's a reason for this, which is that as amazing as Ethereum is, what do people actually value in Web3? They value assets. And there's always going to be assets outside of Ethereum. And we need to be able to get people to come to Ethereum and stay on Ethereum. And the only way to do that is to allow them to use or to receive or send assets to another chain that is not Ethereum. And the only way to do that is to aggregate those chains along with Ethereum and all chains on them. And so that is a big part of kind of the AgL layer is really bringing everything together and ultimately having finality on Ethereum. Maybe let me talk to you about kind of like the different components of the Ag layer. Really think about it in like four different parts. A lot of times when people hear the Ag layer or anything that has to do with interoperability, they immediately default to it's a bridge. And it's nothing but a bridge. And this is simply because when we think of the forms of interoperability that we have had to go cross-chain over the last many years, we default to bridges. Layer zeros, the wormholes, things like that. And then more recently, people start thinking of cross-chain transactions as being things driven by intents. But people don't really think holistically about everything that you need to get safety when there's a fragmented Web3 that you want to bring together. So let me talk through these four different components of the ag layer. First one is the pessimistic proof. So the most important thing, if you want to bring chains together, is to ensure that no one chain can rug another chain. Right? Like, if I have gone to a chain, I have decided this is a chain that I trust. Or like many, frankly, haven't paid attention, but I'm still assuming trust on that chain. And when I send a transaction cross-chain or I'm receiving an asset from another chain, I need to be 100% sure that the only thing I am trusting is that chain that I am on. And this is basically what the pessimistic proof does. What it does is it looks at all assets that come into a chain. It looks at all assets that are leaving a chain. And we call this chain level accounting. And basically what the pessimistic proof does, it then ensures that when an asset wants to leave a chain, it is not an asset that is in excess of any assets that have come into it. So if you've had, you know had 10 ETH come into a chain, then you can't have more than 10 ETH leave that chain. And this is what the pessimistic proof enforces. Now, the interesting thing about this pessimistic proof is that it is not just enforcing this for chains that have proofs. So a lot of times people think, okay, the ag layer or anything that Polygon is working on requires ZK technology. So you look at Polygon ZK VM or X layer and you're like, hey, this uses execution proofs. And so the ag layer requires execution proofs. The reality is the ag layer is actually completely agnostic to that. You could have execution proofs, you could have fraud proofs, you can have some form of consensus that can be proven, or you can straight up have a database. Pessimistic proof doesn't care. It's going to look at what assets came in, what assets have left, and whether an asset that wants to leave or be removed from there can. And so that's kind of like the guarantee you get with pessimistic proof, is that regardless of what it chain, like the form of a chain, you can prove facts about that chain. And specifically on something like a Polygon POS that has a validator set, you can prove the consensus of Polygon POS so that you can confidently actually have transactions go cross-chain. Next one is proof aggregation. A lot of people think the proof aggregation is what allows the interoperability in the AgLayer. All proof aggregation does is it lowers the cost of transacting on chains connected to the AgLayer and using the AgLayer. Specifically what it does is you get a ZK proof, and this could be the pessimistic proof or it could be an execution proof on a chain. And what it does is it wraps those proofs together and then wraps those proofs together and then wraps those proofs together and then submits them to Ethereum. If you were to submit each of those proofs to Ethereum on their own, it would be incredibly expensive. And so this is how you're able to lower the cost so that it is incredibly cheap to execute these cross-chain transactions. Next thing is there's a unified bridge. This is a bridge on Ethereum, and that bridge is the bridge used to create canonical assets. That allows for all transactions going across the ag layer to be done in fungible assets, meaning that you don't have wrapped assets and then unwrapped assets. And then lastly, there's this concept of fast interrupt. The ag layer with nothing else going between two chains, especially two L2s, is going to settle at the speed of Ethereum settlement, and so we're going to be looking at 15-minute cross-chain transactions, which isn't good enough. And so the goal with fast interrupt is to lower that latency to just a few seconds rather than minutes So like what are the benefits of this? First of all is you get these native tokens Look, I think cosmos has done many correct things. But when you go when you use cosmos the experience of having many, many unwrapped tokens and moving those around and figuring out what these assets are, and the same has become true within the Ethereum ecosystem as well, that is not a good user experience. Second is we need safe cross-chain transactions. As of right now, when we use bridges that we use, most of the time these are not actually very safe for interoperability, and we actually need something to add safety. Sometimes we talk about shared sequencers as providing atomic composability. Again, you need something to actually make that safe and trustless. And importantly, this is where you get the concept of compatibility. The ag layer often is viewed as competitive with all of these different options that people hear about for interoperability purposes. But it actually really isn't. It's a very low-level base layer that is compatible with most of these. So if you want to do an asynchronous transaction, you use the ag layer. But if you want a synchronous transaction, then you would actually use a shared sequencer with the added security of the AgLayer. If you want to use intents, those are expensive. They require capital in two different pools. You need to rebalance that capital. Rebalancing that capital has risk and time associated with it. You can lower that risk and time, allowing for rebalancing much quicker, allowing for users to get transactions at a cheaper cost. And so good experience there. And so the goal when we're thinking about building the AgLayer with the other contributors is really like how do we make this the best experience possible? And that includes from a cost perspective when working with other solutions out there. Next, like the AgLayer, if you think about it, is really like an asset-first protocol. You're actually passing assets from one chain to another. And this is different from other interoperability solutions that are generally passing messages. But the AgLayer does allow for passing messages as well, which is necessary and something that can be done. Next is it actually enables the concept of, like, chain abstraction. I think chain abstraction is a very nebulous concept that if you ask 10 people what is chain abstraction, 10 people will give you different answers. The way that I often think about it is like the Ag layer allows for easy use of chain abstraction. And like one example of this is a library that we've built that we refer to as Bridge and Call. And so this is a library that allows for users to basically execute one transaction themselves, but they're actually executing multiple transactions across chains. So imagine that you wanted to bridge funds from Ethereum to Polygon, ZKVM, and then you wanted to swap those assets into different assets and then transfer them over to X layer and then buy an NFT. With the bridge and call function, you can do that. And so you can imagine a wallet that abstracts that all the way and basically says, hey, do you want to do this bridge transfer and then this swap and then transfer it and then buy this NFT, click one button and it swap, and then transfer it, and then buy this NFT, click one button, and it's calling multiple functions in the background, and you've abstracted away the entire kind of cross-chain experience. And then lastly, it provides for this low interoperability, low latency in terms of these cross-chain transactions. Again, it doesn't provide synchronous interoperability. That's something that by working with shared sequencers secured by the AgLayer, you can receive, but it's not something that the AgLayer gives natively. This is the AgLayer ecosystem, multiple contributors. We're always looking to work with more folks on this. One thing you'll notice is the AgLayer is not called the Polygon AgLayer. This is very intentional. We've talked to every big team in the Ethereum ecosystem and most outside of it. The goal is to get everybody to contribute so that we can have a non-fragmented Web3 as a whole, allowing for Ethereum to grow and be used without actually needing to leave Ethereum. A very important point is that AgLayer is not rent-seeking. If you look at a lot of different interoperability solutions and a lot of different other ecosystems building out interoperability, what you end up seeing is some form of fee that is placed on every transaction. And it really looks like a middleman form of rent seeking. The AgLayer has no fee for joining the AgLayer. There's actually no fee per transaction on the AgLayer. Instead, what it is is that chains are going to create with as many transactions as they can include in their consensus or that's part of the consensus being satisfied or as part of an execution proof, and they're going to submit that to the AgLayer. They could choose to submit that once a month. They could choose to submit that every two seconds. They get to choose how often they actually want to submit a proof to the AgLayer. And that allows for chains to continue to remain sovereign. For example, you could think of like a gaming chain. Gaming chain is going to say, hey, our users don't actually care how often we finalize this chain, or they're going to accept lower security requirements for some period of time. And so we're going to submit proofs much less frequently than like a DeFi chain that's going to say, hey, I want to submit proofs every two seconds. And so by having this flexibility in the ag layer, it allows for people to be able to execute these transactions basically freely. And so basically what we see here is that we've solved fragmented liquidity. You've got fungible assets across the ecosystem. You've solved scalability because you have chains that can be spun up at any point in time. You've solved security with the pessimistic proofs that you can safely go between chains. And you've solved the UX issue because you can now seamlessly interoperate between these chains with some of the greatest technologies around shared sequencing and intents that are going to be part of this. And what you end up with is really a unified kind of Web3 allowing for users to stay in Ethereum, benefit from assets across every ecosystem. And what you see is a Web3 that ends up actually being united under some common protocol. Thank you. Thank you, Mark. That was quite insightful. We do have a few questions here. So who will aggregate the aggregators? That's a good question. I don't know. We see this all the time, of course. I have not been able to have anyone point to me to another team that is actually trying to aggregate all of Web3. And so there is nothing that prevents the Ag layer from being connected to a chain in the super chain. You will actually see that happen. There is nothing that prevents the Ag layer from being connected to a chain on the elastic chain. There is nothing that prevents it from connecting to an orbit chain. And so given that it's a low- level solution, you can call it the aggregator of aggregators. And so that's what the Agilera provides. All right. And what is the difference with CCIP? Yeah, that's a good question. So I would think of CCIP more on the messaging side of things than on the asset transfer side of things. And so specifically, like when you think about the bridges that I was referring to, that's kind of what currently exists is CCIP. One of the things that we're actually working on right now on the Ag layer is trying to bring kind of bridge standards within the Ag layer. There's actually nothing that prevents the AgLayer from using some of the bridge standards that we currently see and adding to it a level of security that currently doesn't actually exist. And so the goal is actually to work with something like CCIP rather than compete with it. All right. Where does AgLayer run and who are the actors? Yeah, that's a good question. So the AgLayer is going to be live in three to four weeks. And in its initial form, it's run in a centralized way. The nice thing, though, is that with ZK technology, you can be running a centralized system in a trustless manner. And that's kind of what the AgLayer is going to look like in its initial form. But I fundamentally believe that notwithstanding how centralized we're seeing everything in the space right now, pretty much everything is going to decentralize. And this is either going to happen because it's going to be forced by some government actors in some way, or it's going to happen because critical issues are going to happen in centralized systems, and we're all going to be reminded why it is that we actually have decentralized systems. And so our goal, and it's on the roadmap, and it's actually a lot of work that's already being done, is to decentralize the Agilator and not keep it in a centralized form. All right. Is there any hope of optimistic networks to get aggregated into the Ag layer? Their optimistic nature, that is waiting periods, etc., seems to be fundamentally incompatible with composability. Yes. This is a good question and something we've spent a lot of time about. So I got two answers for you on that. Answer number one is all optimistic roll-ups will be ZK roll-ups. It's just a matter of time. Everybody knows it. All optimistic roll-ups are working on that already. It's just a question of time. And that, frankly, is what will create the ideal user experience. Another alternative would be to actually just prove the fraud proof. Okay, that's the exact question. Like, you can't wait seven days to go cross-train. That's horrible. But what I was also saying is that we don't just need to prove proofs. We can prove other things. For example, you know, the centralized sequencers that we have right now do reach consensus in a centralized way, and you can actually prove that consensus using the pessimistic proof. That's why I was saying that even in this current state where you've got fraud proofs on chains, that we will still see them on the Ag layer because we can protect against the risk of anything happening within the fraud-proof window using the pessimistic proof. All right. We only have time for one more question. Is my understanding correct that existing L2s need to migrate over all their asset strings in their native bridges to the unified bridge? Yeah, that is a very good question. It's something we spend a lot of time on. So I would say there's the ideal state and there's the less the ideal state. As I've mentioned a few times, we can currently connect any existing chain and you will see existing chains get connected without migrating assets over to the Ag layer. And what will happen when that happens is basically you'll start issuing new assets on that new canonical bridge being the AgLayer bridge. For any chains that want the what I'll call ideal user experience, you would actually want to have them migrate all the assets over. This is actually something that you will see happening with Polygon POS. We have 6,500 or so assets on Polygon POS. They will all get migrated over to the unified bridge. And therefore, we're going to be the first example of probably one of the biggest chains, definitely the biggest chain in the world from an assets perspective, actually migrating all of those assets over. All right. Thank you again so much. Can we get a round of applause for Mark, please?", "eventId": "devcon-7", - "slot_start": 1731408000000, - "slot_end": 1731408600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1_iMeu-TIt6aOehgouo5xQOCb89l5Su5oE2WffTDcOII", - "resources_slides": null, + "slot_start": 1731645000000, + "slot_end": 1731646800000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/19GjAOPnXoMBNpAerM--poOFpPMM-IeprVNBtTrgK-UA", + "resources_slides": "https://drive.google.com/file/d/1upW5srNCAMB9Wvg7J5vx9Kck4ZM8Yem9/view", "speakers": [ - "jack-sanford" + "marc-boiron" ] }, "vector": [ - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -723004,6 +720964,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -723611,13 +721572,13 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -723742,7 +721703,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -723761,6 +721721,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -723790,6 +721751,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -723883,6 +721845,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -723894,7 +721857,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -723946,6 +721908,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -724021,7 +721984,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -724232,7 +722194,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -724310,10 +722271,10 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, 0, 0, 0, @@ -724327,40 +722288,47 @@ }, { "session": { - "id": "the-age-of-account-abstraction-opportunities-and-challenges", - "sourceId": "EPN9S7", - "title": "The Age of Account Abstraction: Opportunities and Challenges", - "description": "In a world where the web3 user experience is streamlined through account abstraction, complexities like gas and multiple L1s/L2s are hidden from users. This talk explores the competitive dynamics likely to develop at each layer of the stack (layers, DeFi protocols, intent protocols) and the strategies that might be employed to succeed. Join me to delve into the transformative impact of making Web3 seamless and accessible, and understand how to navigate and thrive in this evolving landscape.", - "track": "Usability", + "id": "the-blind-mans-elephant-a-product-vision-towards-private-identities", + "sourceId": "GSZKVK", + "title": "The Blind Man's Elephant: a product vision towards private identities", + "description": "A short talk introducing the concepts of key properties we want to achieve in private ZK identities. Sparkling concepts like SSI and DIDs and why blockchains are the best way to ensure that.\r\n\r\nFinally it concludes with simple ZK and data-structure constructions and different alternatives that are seeking to provide this characteristics.\r\n\r\nIn short, this is a lightning overview of the space, it's desired features and different approaches to achieve them.", + "track": "Applied Cryptography", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Business", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Protocol competition", - "User growth", - "Layer specialisation" - ], "tags": [ - "Layer 2s", - "Account Abstraction", - "Intents", - "specialisation", - "layer", - "Account Abstraction", - "Intents", - "Layer 2s" + "Privacy", + "Identity", + "ZKP", + "Use Cases", + "selective", + "disclosure", + "Identity", + "Privacy", + "Use Cases", + "ZKP" ], - "language": "en", - "speakers": [ - "daniel-yanev" + "keywords": [ + "Selective-disclosure" ], + "duration": 706, + "language": "en", + "sources_swarmHash": "849d3e4fd5ed45afc927a10bae59624aead23e6e86dad6d8ff724046c4df13b9", + "sources_youtubeId": "-BESF3MUM20", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731552300000, - "slot_end": 1731552900000, + "slot_start": 1731395400000, + "slot_end": 1731396000000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/17eyZChjX1qpt1_WuQIDXpXi06_RixZQtwAbNNS22vqU" + "resources_presentation": "https://docs.google.com/presentation/d/1OM2zZQsD8haiBnMdAS98Oz90Cmk3F2nH7dY0H_hjKTA", + "resources_slides": "https://drive.google.com/file/d/1cBG-vGfpn9lGNM3Q02luBsmrhfOuSU3h/view", + "speakers": [ + "andy" + ] }, "vector": [ 0, @@ -724371,14 +722339,9 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -724984,13 +722947,13 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -725169,8 +723132,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -725180,7 +723141,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -725194,9 +723154,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -725233,6 +723195,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -725474,7 +723437,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -725605,10 +723567,11 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -725681,10 +723644,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -725699,47 +723662,53 @@ }, { "session": { - "id": "the-age-of-aggregation", - "sourceId": "VVTWM7", - "title": "The Age Of AGGREGATION", - "description": "Aggregation plays a critical role in enhancing the usability and scalability of blockchain technology. In this session, we will explore the fundamental concepts of aggregation, debunk common myths, and discuss the necessity of aggregated blockchain systems for achieving real-world usage. Current scalability boundaries limit blockchain's potential, but through aggregation, we can optimize performance and usability, making blockchain technology accessible to a broader audience", - "track": "Layer 2", + "id": "the-chain-abstraction-master-plan", + "sourceId": "DCSCA7", + "title": "The Chain Abstraction Master Plan", + "description": "Chain abstraction is vital for Ethereum’s future competitiveness and interoperability. This talk will dive into why Ethereum apps need chain abstraction to avoid fragmentation and ensure open, trustless, and modular systems. We’ll explore approaches to abstraction, the importance of open standards, and a roadmap for upgrading the ecosystem’s core infrastructure—spanning JSON-RPC API improvements, resource locks, and intent settlement—to unlock new layers of usability and decentralization.", + "track": "Usability", "type": "Talk", "expertise": "Intermediate", "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Protocol Design", - "Scalability", - "Token bridging", - "User Experience", - "Protocol Design", + "Account Abstraction", + "Cross-L2", + "Developer Infrastructure", + "DevEx", + "Ethereum Roadmap", + "Gas", + "Intents", + "MEV", + "Paymaster", + "Rollups", "Token bridging", + "Transaction fees mechanisms", "User Experience" ], "keywords": [ - "Blockchain optimization", - "performance enhancement", - "scalability" + "Chain Abstraction", + "OneBalance", + "Resource Locks" ], - "duration": 1566, + "duration": 883, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "asPJJDIQaWY", + "sources_swarmHash": "41949c931075e883c80aba1313f8f7f87470af99f2053e0e96485b9145d4b4bf", + "sources_youtubeId": "9fH-de8v53g", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736e8da1b0f83434d32eac5", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736e8da1b0f83434d32eac5.vtt", - "transcript_text": " of Polygon Labs. For those of you who don't know Polygon Labs, or specifically who don't know what Polygon Labs does, because we do many things, we are core contributor to the AgLayer along with many others. We've also developed multiple blockchains being Polygon POS that pretty much everyone knows, as well as Polygon ZKVM. So I'm going to go through a little bit of history before actually attacking what exactly we mean by aggregation, and specifically what I want to look at. And people talk a lot about not looking at analogies, but if we think of what we want Web 3 to be, we really want it to eat as much of Web 2 up as possible. We should be able to scale Web 3 to the size of Web 2. And so the question then becomes, well, how did the Internet become the Internet? And if you think about it, the Internet actually started very fragmented. It was very disconnected with multiple networks, unable to communicate. And then new protocols came along. And specifically, things like TCP, IP came along. And what you suddenly had was this diversity in what could be built on the internet and the infrastructure underlying it, but with some common protocols. And specifically what you get there is TCP IP. And it's very important to actually understand this because a lot of people say, like, how are we going to bring together all of Web 3 when you've got so much disparate technology in Web3? You've got different stacks, you've got different VMs, like, everything looks different. But we actually have an example of bringing together different technology in a way that, like, unifies it and actually feels like one. So let's talk about, like, specific problems we see in Web3 and why this is actually very relevant. We've got fragmentation as an issue. I think this is an obvious one for everyone. Scalability, security, and frankly, just poor UX. If we look at each of these one by one, what do we have right now? We've got a bunch of chains, and liquidity is fragmented across all of these chains. We really have no good way of bringing them together at all. Similarly, we've got a scalability problem. If we want Web3 to scale to the size of the internet, then we should probably compare to what currently is using the internet. And when we look at that, what we see things is MasterCard, Visa, Nasdaq. And I do not believe we actually have a network that can handle the load of any of those. I think we've heard many state that they want blockchains to be able to handle, you know, a load like Nasdaq. And that is 250,000 transactions per second, which no network is even close to being able to handle in any way whatsoever. I actually just as a little side note, I'll give you a few side notes during this presentation, but this is a little interesting fact is let's just assume that we could actually scale a blockchain to the size of Nasdaq. Nasdaq can execute transactions with a latency of 50 microseconds. You are not going to execute any transaction on a blockchain in 50 microseconds. So I think our dreams of bringing Nasdaq on-chain should probably recognize the fact that there's some things that are actually better in a centralized state, and there's some things that are better in a decentralized state. Obviously, this does not mean that we don't want decentralized exchanges. It means that there's specific technologies that serve specific purposes, and we should probably focus on more long-tail assets in a decentralized way when we want perfect performance execution. And then when we just want censorship resistance, then we should probably bring that on chain. Third one is security. Right now, when you look at many different networks and how they come together, we talk about concepts like shared sequencers and intents and bridges, and most of that is not actually very secure, even though it actually works decently in some cases. And lastly, the UX challenge. We've got a UX problem, whether it's across wallets, bridges and intents, chains. Basically, what you get is this fragmented feeling across the board. And so then the question becomes, what do you do about this? And the first answer that we tried to give, or many do still give is let's just use a monolithic chain Solana can solve this for you because something something something well, you know most of the time that something isn't going to be good enough because Literally we have seen that we can't even scale to some of the the smallest Networks that are good for payments in the world or the biggest ones. And so the idea that we're going to throw everything on one chain is not happening. And so we need to go look at another solution. The other solution was, why don't we just scale through modular chains, right? But the problem is modular chains increase fragmentation. We see it today. You've got, you know, Polygon ZKVM, Polygon POS. You've got ZK Sync Era. You've got, you know, Arbitrum 1. You've got OpenMainnet. You could keep going on and on. And, like, great, this is nice. Like, being able to choose how you want to spin up a chain is great. It actually solves the scalability issue, right? Because now when you need more block space, what do you do? You spin up a new chain. And with time, this is actually going to get easier and easier, right? Like right now, people want to sell you on the idea that spinning up a new chain is easy. Like frankly, it's actually not that easy. But if you fast forward 12 months from now and 24 months from now and 36 months from now, you're going to get to a point where spinning up a new chain is actually incredibly easy. And when you need to increase throughput and increase block space, then you're going to spin up a new chain to actually be able to do that. And so through kind of the modular approach, we can actually solve scalability. The problem is we still don't have one unified Web3 if we do that. And so the answer to this is just aggregate all the chains. And so what does aggregation actually allow for? It actually allows us to scale Web3 to the size of the internet. What are the things that we need to do this? First, we need practically infinite scalability. And that's what you get when you can spin up new chains very easily when more demand is needed. Because there's... When there is more demand, and so more block space is needed. And then the second one is you need to be able to unify state liquidity users. And if you really think of the North Star of what we should be targeting, we should really want to be able to have a Web3 and an Ethereum that scales as much as you need in a unified way, making you believe that you are using one chain while you are using 100,000 different chains at any point in time, and that that 100,000 chains can become 125,000 chains at any point in time, and it still feels like one internet. So when we've thought about this problem and how we want to solve this problem exactly, we started thinking about it and we realized that we needed to create a neutral platform. And so, this is the concept of the Ag layer. Polygon Labs is a core contributor, along with many others that I will show you a little bit later. But the general idea was that we need some kind of neutral cross-chain settlement platform that unifies liquidity users in state, and that ultimately has finality on Ethereum. Now, very importantly, the AgLayer is actually not intended to scale only Ethereum. And there's a reason for this, which is that as amazing as Ethereum is, what do people actually value in Web3? They value assets. And there's always going to be assets outside of Ethereum. And we need to be able to get people to come to Ethereum and stay on Ethereum. And the only way to do that is to allow them to use or to receive or send assets to another chain that is not Ethereum. And the only way to do that is to aggregate those chains along with Ethereum and all chains on them. And so that is a big part of kind of the AgL layer is really bringing everything together and ultimately having finality on Ethereum. Maybe let me talk to you about kind of like the different components of the Ag layer. Really think about it in like four different parts. A lot of times when people hear the Ag layer or anything that has to do with interoperability, they immediately default to it's a bridge. And it's nothing but a bridge. And this is simply because when we think of the forms of interoperability that we have had to go cross-chain over the last many years, we default to bridges. Layer zeros, the wormholes, things like that. And then more recently, people start thinking of cross-chain transactions as being things driven by intents. But people don't really think holistically about everything that you need to get safety when there's a fragmented Web3 that you want to bring together. So let me talk through these four different components of the ag layer. First one is the pessimistic proof. So the most important thing, if you want to bring chains together, is to ensure that no one chain can rug another chain. Right? Like, if I have gone to a chain, I have decided this is a chain that I trust. Or like many, frankly, haven't paid attention, but I'm still assuming trust on that chain. And when I send a transaction cross-chain or I'm receiving an asset from another chain, I need to be 100% sure that the only thing I am trusting is that chain that I am on. And this is basically what the pessimistic proof does. What it does is it looks at all assets that come into a chain. It looks at all assets that are leaving a chain. And we call this chain level accounting. And basically what the pessimistic proof does, it then ensures that when an asset wants to leave a chain, it is not an asset that is in excess of any assets that have come into it. So if you've had, you know had 10 ETH come into a chain, then you can't have more than 10 ETH leave that chain. And this is what the pessimistic proof enforces. Now, the interesting thing about this pessimistic proof is that it is not just enforcing this for chains that have proofs. So a lot of times people think, okay, the ag layer or anything that Polygon is working on requires ZK technology. So you look at Polygon ZK VM or X layer and you're like, hey, this uses execution proofs. And so the ag layer requires execution proofs. The reality is the ag layer is actually completely agnostic to that. You could have execution proofs, you could have fraud proofs, you can have some form of consensus that can be proven, or you can straight up have a database. Pessimistic proof doesn't care. It's going to look at what assets came in, what assets have left, and whether an asset that wants to leave or be removed from there can. And so that's kind of like the guarantee you get with pessimistic proof, is that regardless of what it chain, like the form of a chain, you can prove facts about that chain. And specifically on something like a Polygon POS that has a validator set, you can prove the consensus of Polygon POS so that you can confidently actually have transactions go cross-chain. Next one is proof aggregation. A lot of people think the proof aggregation is what allows the interoperability in the AgLayer. All proof aggregation does is it lowers the cost of transacting on chains connected to the AgLayer and using the AgLayer. Specifically what it does is you get a ZK proof, and this could be the pessimistic proof or it could be an execution proof on a chain. And what it does is it wraps those proofs together and then wraps those proofs together and then wraps those proofs together and then submits them to Ethereum. If you were to submit each of those proofs to Ethereum on their own, it would be incredibly expensive. And so this is how you're able to lower the cost so that it is incredibly cheap to execute these cross-chain transactions. Next thing is there's a unified bridge. This is a bridge on Ethereum, and that bridge is the bridge used to create canonical assets. That allows for all transactions going across the ag layer to be done in fungible assets, meaning that you don't have wrapped assets and then unwrapped assets. And then lastly, there's this concept of fast interrupt. The ag layer with nothing else going between two chains, especially two L2s, is going to settle at the speed of Ethereum settlement, and so we're going to be looking at 15-minute cross-chain transactions, which isn't good enough. And so the goal with fast interrupt is to lower that latency to just a few seconds rather than minutes So like what are the benefits of this? First of all is you get these native tokens Look, I think cosmos has done many correct things. But when you go when you use cosmos the experience of having many, many unwrapped tokens and moving those around and figuring out what these assets are, and the same has become true within the Ethereum ecosystem as well, that is not a good user experience. Second is we need safe cross-chain transactions. As of right now, when we use bridges that we use, most of the time these are not actually very safe for interoperability, and we actually need something to add safety. Sometimes we talk about shared sequencers as providing atomic composability. Again, you need something to actually make that safe and trustless. And importantly, this is where you get the concept of compatibility. The ag layer often is viewed as competitive with all of these different options that people hear about for interoperability purposes. But it actually really isn't. It's a very low-level base layer that is compatible with most of these. So if you want to do an asynchronous transaction, you use the ag layer. But if you want a synchronous transaction, then you would actually use a shared sequencer with the added security of the AgLayer. If you want to use intents, those are expensive. They require capital in two different pools. You need to rebalance that capital. Rebalancing that capital has risk and time associated with it. You can lower that risk and time, allowing for rebalancing much quicker, allowing for users to get transactions at a cheaper cost. And so good experience there. And so the goal when we're thinking about building the AgLayer with the other contributors is really like how do we make this the best experience possible? And that includes from a cost perspective when working with other solutions out there. Next, like the AgLayer, if you think about it, is really like an asset-first protocol. You're actually passing assets from one chain to another. And this is different from other interoperability solutions that are generally passing messages. But the AgLayer does allow for passing messages as well, which is necessary and something that can be done. Next is it actually enables the concept of, like, chain abstraction. I think chain abstraction is a very nebulous concept that if you ask 10 people what is chain abstraction, 10 people will give you different answers. The way that I often think about it is like the Ag layer allows for easy use of chain abstraction. And like one example of this is a library that we've built that we refer to as Bridge and Call. And so this is a library that allows for users to basically execute one transaction themselves, but they're actually executing multiple transactions across chains. So imagine that you wanted to bridge funds from Ethereum to Polygon, ZKVM, and then you wanted to swap those assets into different assets and then transfer them over to X layer and then buy an NFT. With the bridge and call function, you can do that. And so you can imagine a wallet that abstracts that all the way and basically says, hey, do you want to do this bridge transfer and then this swap and then transfer it and then buy this NFT, click one button and it swap, and then transfer it, and then buy this NFT, click one button, and it's calling multiple functions in the background, and you've abstracted away the entire kind of cross-chain experience. And then lastly, it provides for this low interoperability, low latency in terms of these cross-chain transactions. Again, it doesn't provide synchronous interoperability. That's something that by working with shared sequencers secured by the AgLayer, you can receive, but it's not something that the AgLayer gives natively. This is the AgLayer ecosystem, multiple contributors. We're always looking to work with more folks on this. One thing you'll notice is the AgLayer is not called the Polygon AgLayer. This is very intentional. We've talked to every big team in the Ethereum ecosystem and most outside of it. The goal is to get everybody to contribute so that we can have a non-fragmented Web3 as a whole, allowing for Ethereum to grow and be used without actually needing to leave Ethereum. A very important point is that AgLayer is not rent-seeking. If you look at a lot of different interoperability solutions and a lot of different other ecosystems building out interoperability, what you end up seeing is some form of fee that is placed on every transaction. And it really looks like a middleman form of rent seeking. The AgLayer has no fee for joining the AgLayer. There's actually no fee per transaction on the AgLayer. Instead, what it is is that chains are going to create with as many transactions as they can include in their consensus or that's part of the consensus being satisfied or as part of an execution proof, and they're going to submit that to the AgLayer. They could choose to submit that once a month. They could choose to submit that every two seconds. They get to choose how often they actually want to submit a proof to the AgLayer. And that allows for chains to continue to remain sovereign. For example, you could think of like a gaming chain. Gaming chain is going to say, hey, our users don't actually care how often we finalize this chain, or they're going to accept lower security requirements for some period of time. And so we're going to submit proofs much less frequently than like a DeFi chain that's going to say, hey, I want to submit proofs every two seconds. And so by having this flexibility in the ag layer, it allows for people to be able to execute these transactions basically freely. And so basically what we see here is that we've solved fragmented liquidity. You've got fungible assets across the ecosystem. You've solved scalability because you have chains that can be spun up at any point in time. You've solved security with the pessimistic proofs that you can safely go between chains. And you've solved the UX issue because you can now seamlessly interoperate between these chains with some of the greatest technologies around shared sequencing and intents that are going to be part of this. And what you end up with is really a unified kind of Web3 allowing for users to stay in Ethereum, benefit from assets across every ecosystem. And what you see is a Web3 that ends up actually being united under some common protocol. Thank you. Thank you, Mark. That was quite insightful. We do have a few questions here. So who will aggregate the aggregators? That's a good question. I don't know. We see this all the time, of course. I have not been able to have anyone point to me to another team that is actually trying to aggregate all of Web3. And so there is nothing that prevents the Ag layer from being connected to a chain in the super chain. You will actually see that happen. There is nothing that prevents the Ag layer from being connected to a chain on the elastic chain. There is nothing that prevents it from connecting to an orbit chain. And so given that it's a low- level solution, you can call it the aggregator of aggregators. And so that's what the Agilera provides. All right. And what is the difference with CCIP? Yeah, that's a good question. So I would think of CCIP more on the messaging side of things than on the asset transfer side of things. And so specifically, like when you think about the bridges that I was referring to, that's kind of what currently exists is CCIP. One of the things that we're actually working on right now on the Ag layer is trying to bring kind of bridge standards within the Ag layer. There's actually nothing that prevents the AgLayer from using some of the bridge standards that we currently see and adding to it a level of security that currently doesn't actually exist. And so the goal is actually to work with something like CCIP rather than compete with it. All right. Where does AgLayer run and who are the actors? Yeah, that's a good question. So the AgLayer is going to be live in three to four weeks. And in its initial form, it's run in a centralized way. The nice thing, though, is that with ZK technology, you can be running a centralized system in a trustless manner. And that's kind of what the AgLayer is going to look like in its initial form. But I fundamentally believe that notwithstanding how centralized we're seeing everything in the space right now, pretty much everything is going to decentralize. And this is either going to happen because it's going to be forced by some government actors in some way, or it's going to happen because critical issues are going to happen in centralized systems, and we're all going to be reminded why it is that we actually have decentralized systems. And so our goal, and it's on the roadmap, and it's actually a lot of work that's already being done, is to decentralize the Agilator and not keep it in a centralized form. All right. Is there any hope of optimistic networks to get aggregated into the Ag layer? Their optimistic nature, that is waiting periods, etc., seems to be fundamentally incompatible with composability. Yes. This is a good question and something we've spent a lot of time about. So I got two answers for you on that. Answer number one is all optimistic roll-ups will be ZK roll-ups. It's just a matter of time. Everybody knows it. All optimistic roll-ups are working on that already. It's just a question of time. And that, frankly, is what will create the ideal user experience. Another alternative would be to actually just prove the fraud proof. Okay, that's the exact question. Like, you can't wait seven days to go cross-train. That's horrible. But what I was also saying is that we don't just need to prove proofs. We can prove other things. For example, you know, the centralized sequencers that we have right now do reach consensus in a centralized way, and you can actually prove that consensus using the pessimistic proof. That's why I was saying that even in this current state where you've got fraud proofs on chains, that we will still see them on the Ag layer because we can protect against the risk of anything happening within the fraud-proof window using the pessimistic proof. All right. We only have time for one more question. Is my understanding correct that existing L2s need to migrate over all their asset strings in their native bridges to the unified bridge? Yeah, that is a very good question. It's something we spend a lot of time on. So I would say there's the ideal state and there's the less the ideal state. As I've mentioned a few times, we can currently connect any existing chain and you will see existing chains get connected without migrating assets over to the Ag layer. And what will happen when that happens is basically you'll start issuing new assets on that new canonical bridge being the AgLayer bridge. For any chains that want the what I'll call ideal user experience, you would actually want to have them migrate all the assets over. This is actually something that you will see happening with Polygon POS. We have 6,500 or so assets on Polygon POS. They will all get migrated over to the unified bridge. And therefore, we're going to be the first example of probably one of the biggest chains, definitely the biggest chain in the world from an assets perspective, actually migrating all of those assets over. All right. Thank you again so much. Can we get a round of applause for Mark, please?", + "sources_streamethId": "6735e4739dbb7a90e18580fb", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731645000000, - "slot_end": 1731646800000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/19GjAOPnXoMBNpAerM--poOFpPMM-IeprVNBtTrgK-UA", - "resources_slides": null, + "slot_start": 1731576600000, + "slot_end": 1731577800000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1aMlbfC7Va_bqN5fI43BFPOB0iIennWgUwyiQxb7D3q0", + "resources_slides": "https://drive.google.com/file/d/10EkGiZcp3_B5sTR_FMEgG2WFbJ1DXC56/view", "speakers": [ - "marc-boiron" + "stephane-gosselin" ] }, "vector": [ @@ -725750,6 +723719,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -726492,11 +724462,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, + 6, 0, 0, 0, @@ -726524,13 +724490,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -726542,6 +724502,7 @@ 0, 2, 0, + 2, 0, 0, 0, @@ -726550,8 +724511,11 @@ 0, 0, 0, + 2, 0, 0, + 2, + 2, 0, 0, 0, @@ -726634,15 +724598,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -726705,8 +724660,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -726730,6 +724687,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -726927,6 +724885,24 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -727045,18 +725021,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -727079,46 +725043,43 @@ }, { "session": { - "id": "the-blind-mans-elephant-a-product-vision-towards-private-identities", - "sourceId": "GSZKVK", - "title": "The Blind Man's Elephant: a product vision towards private identities", - "description": "A short talk introducing the concepts of key properties we want to achieve in private ZK identities. Sparkling concepts like SSI and DIDs and why blockchains are the best way to ensure that.\r\n\r\nFinally it concludes with simple ZK and data-structure constructions and different alternatives that are seeking to provide this characteristics.\r\n\r\nIn short, this is a lightning overview of the space, it's desired features and different approaches to achieve them.", - "track": "Applied Cryptography", + "id": "the-chain-mail-gaze", + "sourceId": "73SKE9", + "title": "The Chain Mail Gaze", + "description": "With their dreams of new ‘Network State’ empires, resource extraction, and colonial domination, today’s tech overlords are the descendants of Europe’s mediaeval Crusaders: well-financed, zealous fanatics remaking the world in the name of their greater good. Through a psycho-political reading of scarcity, chauvinism, and colonialism, The Chain Mail Gaze connects Crusader ideologues’ desire for blood, land, and booty, to emerging ‘frontiers’ mediated by contemporary technologies.", + "track": "Coordination", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "expertise": "Beginner", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Privacy", - "Identity", - "ZKP", - "Use Cases", - "selective", - "disclosure", - "Identity", - "Privacy", - "Use Cases", - "ZKP" + "Governance", + "Network State", + "decolonial", + "Governance", + "Network State" ], "keywords": [ - "Selective-disclosure" + "decolonial" ], - "duration": 706, + "duration": 449, "language": "en", - "sources_swarmHash": "849d3e4fd5ed45afc927a10bae59624aead23e6e86dad6d8ff724046c4df13b9", - "sources_youtubeId": "-BESF3MUM20", + "sources_swarmHash": "422f0d629401b9f6bd9d6e38fd02fe8770da00b13ef29d52d9871a214bff2fc1", + "sources_youtubeId": "zhsmBcFnDsE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "67382f841b0f83434da7d8bf", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67382f841b0f83434da7d8bf.vtt", + "transcript_text": " Jane Mailgaze by Waseem Z. Alcindi. Please, give him a warm welcome. Hello everybody. It's slightly out of place to have such cheerful music introducing this talk, perhaps. So the title of it is called The Chainmail Gaze, and hopefully the reasons for that will become apparent as we go on. But I'm sure you've all heard of the mail gaze and the kind of externalities and, you know, unfavorable things that come with that but what about the chain mail gaze well hopefully we'll find out not got very long so I'll try to give you a survey of what's actually quite extensive anthropological and philosophical research project some pictures of big strong men and their swords looking into the distance perhaps thinking about lands to conquer. A short poetic refrain, the church and the network, zeal and time, death and money, all sides of the same coin. I want to talk to you about an interim project that led up to where we're going to, that's called Profit Motives, looking at the false divinity of capital, how it animates motivations and desires in the technology space that might not be immediately apparent. as there's been financial capital risk and speculation have orbited manipulated and harnessed it as narrative feedback machines simultaneously reading and rewriting realities markets exist as a distributed conversation between uh amongst speculators driven by profit motives and appetite for divination and prophecy today new strains of techno-colonialism are emerging which are the latest of a series of echoes throughout Western history. An ascendant cabal of technology elites are attempting to reshape the world in their favor whilst hiding in plain sight behind the technologies that have enriched them. Theirs is a Promethean zealotry without faith, affecting an aura of divine sanction for the purposes of elevating the ego, enriching the chosen ones, and creating empires of various stripes. But was it not? Always so. I want to read you a short story, fictional speculation, about where we might be going in the future. It's called Seething Like a State. Decentralization has a cost, the price of anarchy. The price is always due, but the rewards weren't cheap to reap. So solid CCRU. What most did not expect was that payment would become due at the grandest scales of governance. The West failure state, forged under the fire of peer pressure, was ambushed by upstart modes of power, opening new vistas of communication, commodification, and communion. The orientations that nation-states had used to enshrine their power only made it easier to undermine them. The bigger they were, the harder they fell. Brextopia, the European Union, NATO's cave, the United State machine, all returned to dust. The decline of the nation state in the roaring 20s became a canon of canonicity for an entire generation of sole traders. It wasn't even just the bit leavers. In those days, there were many networks, many messiahs, many ideologics, all with their own profit motives. So, many of you have doubtless heard of this concept of the network state that has emerged in the last few years. Exit fantasies, arguably fueled by the failure or the shortcomings, the shortfall of the full promise and the dream of Web3. I'm going to read you a short section from Balaji Sreenivasan's self-titled book in 2022. A network state is a social network with a moral innovation, a sense of national consciousness, a recognized founder, a capacity for collective action, an in-person level of civility, an integrated cryptocurrency, a consensual government limited by a social smart contract, an archipelago of crowdfunded physical territories, a virtual capital, and an on-chain census that proves a large enough population, income, and real estate footprint to attain a measure of diplomatic recognition. The magic words in here are recognized founder and diplomatic recognition, and territory, I would say. So these projects carry echoes, in my mind at least, of some of the earliest expeditions set out from the West in search of new territories to conquer and subjugate. So many of you have doubtless heard of things like Liberland, Sealand, Seasteading Institutes. This is the Zahidi-designed metaverse HQ of Liberland on screen here. There's been so many attempts to make crypto islands, Bitcoin cruise ships, and they've all failed. It's quite interesting. But what seems to be different now is the level of capital on hand as a result of market success of these technologies to the people that want to reshape the map of the world in their own, you know, to their own preferences, into their own cause, we know that leaders of nation states, we even have leaders of nation states that are cheerleading some of these technologies. Nayib Bukeri, the authoritarian strongman in El Salvador, would like to build a Bitcoin city financed by volcano bonds in his country. He's not done it yet. But meanwhile, he's removed the judiciary, removed term limits, and instituted one-party rule. And most of the Bitcoins are cheering this on. So I ask myself, are we still in this for the freedom? And now, made possible by Ethereum, DAOs, and all the rest of it, we have projects like Aleph, Urbit, and even Praxis. Praxis on the left, and Prospera on the right. So these projects are trying to build physical cities, private territories, network states, or at least what may develop into the Bellagio concept of the nation state. And it's at this point I want to introduce the chainmail gaze. Today's tech overlords are the descendants of europe's crusaders well-financed zealous fanatics wreaking destruction on the planetary other in the name of their greater good the vatican sponsored waves of levantine invasions that began in the late 11th century with the midwife of capitalism colonialism and technology as we know it today. With the network state organizational concept, a cadre of powerful ideologues blessed with tokenized wealth, a toying with the prospect of reshaping national frontiers, mirroring the desires of Frankish noblemen and their knightly orders in the Levant a thousand years ago. And that's all the time I have. Seven minutes for a thousand years of Western history. Thank you very much.", "eventId": "devcon-7", - "slot_start": 1731395400000, - "slot_end": 1731396000000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1OM2zZQsD8haiBnMdAS98Oz90Cmk3F2nH7dY0H_hjKTA", - "resources_slides": null, + "slot_start": 1731409800000, + "slot_end": 1731410400000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/17RnVgqUzy-db9C_X4-QKgghgKSZ40O-5PtTPVJladMk", + "resources_slides": "https://drive.google.com/file/d/1iUoQ4ZdzG69XCZPrnta5uZd5c0RAkUuc/view", "speakers": [ - "andy" + "wassim-z-alsindi" ] }, "vector": [ @@ -727132,6 +725093,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -727916,14 +725878,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -727948,11 +725909,9 @@ 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, @@ -727968,6 +725927,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -727989,7 +725949,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -728366,7 +726325,6 @@ 0, 0, 2, - 2, 0, 0, 0, @@ -728439,7 +726397,6 @@ 2, 0, 0, - 0, 2, 0, 0, @@ -728452,62 +726409,45 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "the-chain-abstraction-master-plan", - "sourceId": "DCSCA7", - "title": "The Chain Abstraction Master Plan", - "description": "Chain abstraction is vital for Ethereum’s future competitiveness and interoperability. This talk will dive into why Ethereum apps need chain abstraction to avoid fragmentation and ensure open, trustless, and modular systems. We’ll explore approaches to abstraction, the importance of open standards, and a roadmap for upgrading the ecosystem’s core infrastructure—spanning JSON-RPC API improvements, resource locks, and intent settlement—to unlock new layers of usability and decentralization.", - "track": "Usability", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Product", + "id": "the-challenges-of-leaving-laboratory-outbreaks-to-scientists", + "sourceId": "TPLHFG", + "title": "The challenges of leaving laboratory outbreaks to scientists", + "description": "NA", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "Expert", + "audience": "Academic", "featured": false, "doNotRecord": false, - "tags": [ - "Account Abstraction", - "Cross-L2", - "Developer Infrastructure", - "DevEx", - "Ethereum Roadmap", - "Gas", - "Intents", - "MEV", - "Paymaster", - "Rollups", - "Token bridging", - "Transaction fees mechanisms", - "User Experience" - ], - "keywords": [ - "Chain Abstraction", - "OneBalance", - "Resource Locks" - ], - "duration": 883, + "tags": [], + "keywords": [], + "duration": 661, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "9fH-de8v53g", + "sources_swarmHash": "fa87bb835a351d6d4df9119d32643e69d0b3dc68a8b607fdeba54852e233647d", + "sources_youtubeId": "vwTDtUELw3g", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735e4739dbb7a90e18580fb", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6735abb99dbb7a90e1a6f0a9", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735abb99dbb7a90e1a6f0a9.vtt", + "transcript_text": " Hi, thank you for inviting me to give a talk and thank you Vitalik for that perfect opening to my talk. So who am I and what am I doing here? In early 2020, I was pushing for the lab leak hypothesis to be taken very seriously. And as a scientist from a reputable institute, that gave me a lot of credibility. And once the lab leak hypothesis was taken seriously by experts broadly, that's when I went to the Bulletin of Atomic Scientists and asked them to please convene international experts to think of new frameworks around risky research that could cause pandemics. And so in the next 10 minutes, I'll tell you about ways that I think we should structure a future effective bio-risk and bio-safety approach. So as Vitalik mentioned, the number one takeaway that most of us had from the COVID-19 pandemic is that actually experts are not very reliable during times of crisis and that they themselves can be a source of misinformation. And we saw that in the COVID-19 pandemic, this ranged from human-to-human transmission, whether masks are protective, how the virus spreads, whether it's airborne, how to protect yourself, what is herd immunity, and whether the vaccines would prevent infection. And most of all, whether the pandemic might have started in a lab. And so what do you do about the situation, right, where the experts are not even able to tell you what is happening in real time? So fast forward five years since the pandemic began, we now know that experts broadly around the world agree that both natural and lab leak hypotheses are on the table. And briefly, this ranges all the way from an animal at the market infecting people to perhaps a villager getting infected and then traveling to Wuhan, or maybe a scientist was infected in the field while collecting samples and then went to Wuhan, or that there was an accident in a lab where this virus was being handled. Ultimately, everyone agrees that the original virus, the precursor of this virus, must have come from bats, but we just do not agree on how it made its way to Wuhan. And so all the evidence today remains circumstantial. No natural or lab source has been found or reported. And surprisingly, five years later, no external investigation has been conducted and what we know now is that there were influential scientists who sought to squash the theory calling it a conspiracy theory and this started all the way at the beginning of the pandemic. So I was really fortunate to publish my analysis of why I think the evidence strongly points towards ALAM origin in the New York Times this year. And just to sum this up, but if this is the first time you've heard of this, I really recommend you check out the article. Why do I think that COVID-19 likely starts in a lab? This pandemic, it really could have been caused by any of hundreds of different virus species and any of tens of thousands of wildlife markets in this region and in any of thousands of large cities and in any of thousands of large cities, and in any year. But it was a SARS-like coronavirus with a unique furin cleavage site. This is a gain-of-function feature that emerged in Wuhan less than two years after scientists there, sometimes working at low biosafety, proposed collecting and creating viruses of that very same design. And of the hundreds of other SARS-like viruses we know today, none of them have such a furin cleavage site. So this story, why haven't we found the origin of COVID-19? It's because an incredible amount of data continues to be withheld. So this concerns early cases, the transmission chain, the genome of the virus was withheld in the early days, including early sequences still being withheld. We still do not have access to the activities and catalogs at both the wildlife market in the city as well as the laboratories in the city. So all of these were not put out in the public. A lot of it was withheld in private communications, private databases, in scientific journals, and under peer review. Sometimes an editor would reject a paper and it would stay hidden for years. And private research documents as well as thesis and grant proposals, we only know about all of these data today because they were unearthed by mostly internet sleuths, so independent vigilante sleuths, as well as scientists and journalists. And some of these other documents were leaked by people inside the US government or others were sued for through the Freedom of Information Act. So these things had to be painstakingly pulled out of the sources. What the COVID-19 pandemic really showed us was this complete lack of interest in finding the origin of COVID-19. So if you look at the US collaborators of the Wuhan scientists, they admitted that their collaboration had never given them access to the pathogen samples. So these are samples from bats and even people collected and stored in Wuhan. They could not reproduce the database of the samples or virus sequences. And they did not have access to the database in real time that was in Wuhan. So they didn't even know about viruses collected after 2015, even though the funding covered their work through 2019. So that's a four-year gap. They do not know about the viruses collected after 2015 in Wuhan. They did not ask their collaborators in Wuhan if they had started on experiments proposed in 2018. So this included experiments where they were inserting furin cleavage sites and other gain of function features into SARS-like viruses in Wuhan. So some would say this is strategic ignorance, right? Maybe they didn't ask because they didn't want to have information that would then put them in a difficult spot. But I would actually argue that it was structural because they never structured their collaboration in a way that gave them full access in real time to the samples and even the sequences. And this problem really goes way beyond the EcoHealth Alliance to even upwards in government. Earlier this year, the Department of Defense in the U.S. said that they had not been closely tracking their funded projects. They could not even tell how much funding they had given to Chinese research that would enhance pathogens with pandemic potential. And unfortunately, the new US government policy on this sort of research that kicks in next May still leaves many loopholes open. In fact, the entire research pipeline that could have caused the COVID-19 pandemic remains not subject to oversight. So you can go anywhere, collect any novel pathogens, no oversight. You can serially passage them, so adapt them to human cells, even human avi cells, and even humanized animals still not subject to oversight. So really nothing has been done here to deal with this risk of pandemics coming from labs in this manner. So unlike other technologies, you know, like nuclear, chemical, even in transportation, even flying planes, where you have a serious accident, there's no independent body here for these sort of pandemic risk research to really conduct external oversight. So there's no independent external organization that will have oversight of this work around the U.S., let's say. And when that's an incident, there's no investigation to go in by this independent body. It's usually all self-investigation, self-reporting. So this is a real problem because we are now truly in an era of pandemics starting from laboratories. Unlike 20 years ago, today research for pandemic risk is proliferating due to many technological advances in gene sequencing and synthesis as well as dropping costs. So now more and more labs are gaining access to this ability to create viruses in the lab from scratch, just based on the sequences. It's not very expensive. And unfortunately, a lot of this is happening at low biosafety in ways that cannot contain airborne viruses like COVID-19, let's say. And it's importantly not being systematically tracked. So there's no one really systematically tracking it, even at a national level in the U.S. So why is the case? And I think that there are two problems here that really underlie this challenge. So the first is that there are many penalties and fear of retaliation for virologists to be calling out their peers. So imagine if you start pointing fingers at your colleagues for doing risky research, you could immediately find yourself alienated and shut off funding applications as well as publications. So this would essentially destroy your career. And those who are for gain of function, so they want gain of function to happen, they say that you can't put regulation on us, you can't put all this oversight on us, because then the US will lose the competitive edge. So the problem with this is that even these scientists, these leading experts, as seen in the case of COVID-19, cannot accurately predict the risk of their cutting-edge experiments. It's really hard for them to know what will happen when they're working with novel pathogens and putting in new features. So the second problem here is that we are kind of stuck in this 9-11 dichotomy of outbreaks being either natural or deliberate. And this is a problem because I think that the risk of pandemics coming from lab accidents is much higher than the risk of a pandemic coming from a deliberate bioterror incident. So unfortunately, this story of let's defeat the bad guys let's let's defeat the terrorists it gets much more interest and funding it's much more sexy than then telling people let's just stop people from having accidents let's protect the good guys from having accidents in their labs so that this whole field of accidental lab leaks has been really neglected in my opinion and and it is it's compounded by this fact that many many virologists are afraid to call out their peers. So very little is being done. And I have this quote that really exemplifies this problem. So early on in the pandemic, an influential virologist said, it really bothered me that this was some suggestion this was a lab construct. So the virus that was causing the pandemic. If it turned out to be true, it would really bother the hell out of me. Not just because of people dying and so forth. It's an indictment of the field, right? So these virologists, and not just him, they think that if COVID came from a lab, it would indict the entire field of virology. So this is a major conflict of interest. So where do we go from here? Clearly, we should investigate the origin of COVID-19 so that we can restore public trust. Imagine if, you know, a few decades down the road, it's found that this pandemic started in a lab. We need to be able to look back and see our public leaders, as well as our scientists, leading the charge on investigating and finding the lab origin. We don't want to look back and see them being complicit in the cover-up, suppressing an investigation. So there are three things that I think should inform a viable biosafety strategy. So the first thing is that OSINT is necessary, but during a crisis, as we have seen, is challenged by mis- and disinformation, a great amount of information is actually kept private, and not even for malicious reasons. We need a system to counter misinformation from experts, and journalists especially should be much more skeptical and seek diverse input so that they don't find themselves amplifying the misinformation. And finally, we really truly need an independent organization and regulation that focuses on reducing the risk of accidental pandemics. So thank you again for inviting me to give this talk.", "eventId": "devcon-7", - "slot_start": 1731576600000, - "slot_end": 1731577800000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1aMlbfC7Va_bqN5fI43BFPOB0iIennWgUwyiQxb7D3q0", - "resources_slides": null, + "slot_start": 1731568200000, + "slot_end": 1731568800000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1p9hSMYlq5ABHla4brR0sibxE7RLsOTyxT95WWe9_UTQ", + "resources_slides": "", "speakers": [ - "stephane-gosselin" + "alina-chan" ] }, "vector": [ + 0, + 6, 0, 0, 0, @@ -728516,7 +726456,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -729261,7 +727200,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -729275,7 +727213,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -729289,7 +727226,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -729299,9 +727235,7 @@ 0, 0, 0, - 2, 0, - 2, 0, 0, 0, @@ -729310,11 +727244,8 @@ 0, 0, 0, - 2, 0, 0, - 2, - 2, 0, 0, 0, @@ -729451,7 +727382,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -729459,10 +727389,8 @@ 0, 0, 0, - 2, 0, 0, - 2, 0, 0, 0, @@ -729486,7 +727414,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -729685,7 +727612,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -729822,7 +727748,9 @@ 0, 0, 0, - 2, + 0, + 0, + 0, 0, 0, 0, @@ -729839,48 +727767,51 @@ 0, 0, 0, + 0, + 2, + 0, + 0, + 0, 0 ] }, { "session": { - "id": "the-chain-mail-gaze", - "sourceId": "73SKE9", - "title": "The Chain Mail Gaze", - "description": "With their dreams of new ‘Network State’ empires, resource extraction, and colonial domination, today’s tech overlords are the descendants of Europe’s mediaeval Crusaders: well-financed, zealous fanatics remaking the world in the name of their greater good. Through a psycho-political reading of scarcity, chauvinism, and colonialism, The Chain Mail Gaze connects Crusader ideologues’ desire for blood, land, and booty, to emerging ‘frontiers’ mediated by contemporary technologies.", - "track": "Coordination", + "id": "the-combination-of-zkp-mpc-fhe", + "sourceId": "XPLVT8", + "title": "The combination of ZKP +/- MPC +/- FHE", + "description": "This talk will provide you with the necessary intuition to understand when you should use ZKP, MPC or FHE, or any combination of them.", + "track": "Applied Cryptography", "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Research", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Governance", - "Network State", - "decolonial", - "Governance", - "Network State" + "ZKP", + "MPC", + "fhe", + "MPC", + "ZKP" ], "keywords": [ - "decolonial" + "FHE" ], - "duration": 449, + "duration": 521, "language": "en", - "sources_swarmHash": "422f0d629401b9f6bd9d6e38fd02fe8770da00b13ef29d52d9871a214bff2fc1", - "sources_youtubeId": "zhsmBcFnDsE", + "sources_swarmHash": "7724dd5759a7e9323aa0eff8393fff2e9afee7739808254312ba965d6a194a18", + "sources_youtubeId": "Tq7CVqDE_P4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67382f841b0f83434da7d8bf", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67382f841b0f83434da7d8bf.vtt", - "transcript_text": " Jane Mailgaze by Waseem Z. Alcindi. Please, give him a warm welcome. Hello everybody. It's slightly out of place to have such cheerful music introducing this talk, perhaps. So the title of it is called The Chainmail Gaze, and hopefully the reasons for that will become apparent as we go on. But I'm sure you've all heard of the mail gaze and the kind of externalities and, you know, unfavorable things that come with that but what about the chain mail gaze well hopefully we'll find out not got very long so I'll try to give you a survey of what's actually quite extensive anthropological and philosophical research project some pictures of big strong men and their swords looking into the distance perhaps thinking about lands to conquer. A short poetic refrain, the church and the network, zeal and time, death and money, all sides of the same coin. I want to talk to you about an interim project that led up to where we're going to, that's called Profit Motives, looking at the false divinity of capital, how it animates motivations and desires in the technology space that might not be immediately apparent. as there's been financial capital risk and speculation have orbited manipulated and harnessed it as narrative feedback machines simultaneously reading and rewriting realities markets exist as a distributed conversation between uh amongst speculators driven by profit motives and appetite for divination and prophecy today new strains of techno-colonialism are emerging which are the latest of a series of echoes throughout Western history. An ascendant cabal of technology elites are attempting to reshape the world in their favor whilst hiding in plain sight behind the technologies that have enriched them. Theirs is a Promethean zealotry without faith, affecting an aura of divine sanction for the purposes of elevating the ego, enriching the chosen ones, and creating empires of various stripes. But was it not? Always so. I want to read you a short story, fictional speculation, about where we might be going in the future. It's called Seething Like a State. Decentralization has a cost, the price of anarchy. The price is always due, but the rewards weren't cheap to reap. So solid CCRU. What most did not expect was that payment would become due at the grandest scales of governance. The West failure state, forged under the fire of peer pressure, was ambushed by upstart modes of power, opening new vistas of communication, commodification, and communion. The orientations that nation-states had used to enshrine their power only made it easier to undermine them. The bigger they were, the harder they fell. Brextopia, the European Union, NATO's cave, the United State machine, all returned to dust. The decline of the nation state in the roaring 20s became a canon of canonicity for an entire generation of sole traders. It wasn't even just the bit leavers. In those days, there were many networks, many messiahs, many ideologics, all with their own profit motives. So, many of you have doubtless heard of this concept of the network state that has emerged in the last few years. Exit fantasies, arguably fueled by the failure or the shortcomings, the shortfall of the full promise and the dream of Web3. I'm going to read you a short section from Balaji Sreenivasan's self-titled book in 2022. A network state is a social network with a moral innovation, a sense of national consciousness, a recognized founder, a capacity for collective action, an in-person level of civility, an integrated cryptocurrency, a consensual government limited by a social smart contract, an archipelago of crowdfunded physical territories, a virtual capital, and an on-chain census that proves a large enough population, income, and real estate footprint to attain a measure of diplomatic recognition. The magic words in here are recognized founder and diplomatic recognition, and territory, I would say. So these projects carry echoes, in my mind at least, of some of the earliest expeditions set out from the West in search of new territories to conquer and subjugate. So many of you have doubtless heard of things like Liberland, Sealand, Seasteading Institutes. This is the Zahidi-designed metaverse HQ of Liberland on screen here. There's been so many attempts to make crypto islands, Bitcoin cruise ships, and they've all failed. It's quite interesting. But what seems to be different now is the level of capital on hand as a result of market success of these technologies to the people that want to reshape the map of the world in their own, you know, to their own preferences, into their own cause, we know that leaders of nation states, we even have leaders of nation states that are cheerleading some of these technologies. Nayib Bukeri, the authoritarian strongman in El Salvador, would like to build a Bitcoin city financed by volcano bonds in his country. He's not done it yet. But meanwhile, he's removed the judiciary, removed term limits, and instituted one-party rule. And most of the Bitcoins are cheering this on. So I ask myself, are we still in this for the freedom? And now, made possible by Ethereum, DAOs, and all the rest of it, we have projects like Aleph, Urbit, and even Praxis. Praxis on the left, and Prospera on the right. So these projects are trying to build physical cities, private territories, network states, or at least what may develop into the Bellagio concept of the nation state. And it's at this point I want to introduce the chainmail gaze. Today's tech overlords are the descendants of europe's crusaders well-financed zealous fanatics wreaking destruction on the planetary other in the name of their greater good the vatican sponsored waves of levantine invasions that began in the late 11th century with the midwife of capitalism colonialism and technology as we know it today. With the network state organizational concept, a cadre of powerful ideologues blessed with tokenized wealth, a toying with the prospect of reshaping national frontiers, mirroring the desires of Frankish noblemen and their knightly orders in the Levant a thousand years ago. And that's all the time I have. Seven minutes for a thousand years of Western history. Thank you very much.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731409800000, - "slot_end": 1731410400000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/17RnVgqUzy-db9C_X4-QKgghgKSZ40O-5PtTPVJladMk", - "resources_slides": null, + "slot_start": 1731390000000, + "slot_end": 1731390600000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1iRVxAm1tYqEBlFNUqErTPQ1GCnhG1txvgCWdfQbSgpk", + "resources_slides": "https://drive.google.com/file/d/1-64x8IsSYka-eRl4bwL--25gw7uB38Y9/view", "speakers": [ - "wassim-z-alsindi" + "giacomo" ] }, "vector": [ @@ -729894,7 +727825,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -730509,13 +728439,9 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -730682,7 +728608,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -730713,6 +728638,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -730726,12 +728652,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -731000,6 +728926,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -731129,7 +729056,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -731203,8 +729129,8 @@ 2, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -731220,35 +729146,35 @@ }, { "session": { - "id": "the-challenges-of-leaving-laboratory-outbreaks-to-scientists", - "sourceId": "TPLHFG", - "title": "The challenges of leaving laboratory outbreaks to scientists", - "description": "NA", + "id": "the-dacc-vision-balancing-progress-and-protection", + "sourceId": "AA8SRQ", + "title": "The d/acc Vision: Balancing Progress and Protection", + "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", "type": "Lightning Talk", - "expertise": "Expert", - "audience": "Academic", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [], "keywords": [], - "duration": 661, + "duration": 594, "language": "en", - "sources_swarmHash": "fa87bb835a351d6d4df9119d32643e69d0b3dc68a8b607fdeba54852e233647d", - "sources_youtubeId": "vwTDtUELw3g", + "sources_swarmHash": "b77c636e49391f4c713aa60f1572f8527d82b94618ed7b10dd27f83b94af084c", + "sources_youtubeId": "pTpURVxj9hk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735abb99dbb7a90e1a6f0a9", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735abb99dbb7a90e1a6f0a9.vtt", - "transcript_text": " Hi, thank you for inviting me to give a talk and thank you Vitalik for that perfect opening to my talk. So who am I and what am I doing here? In early 2020, I was pushing for the lab leak hypothesis to be taken very seriously. And as a scientist from a reputable institute, that gave me a lot of credibility. And once the lab leak hypothesis was taken seriously by experts broadly, that's when I went to the Bulletin of Atomic Scientists and asked them to please convene international experts to think of new frameworks around risky research that could cause pandemics. And so in the next 10 minutes, I'll tell you about ways that I think we should structure a future effective bio-risk and bio-safety approach. So as Vitalik mentioned, the number one takeaway that most of us had from the COVID-19 pandemic is that actually experts are not very reliable during times of crisis and that they themselves can be a source of misinformation. And we saw that in the COVID-19 pandemic, this ranged from human-to-human transmission, whether masks are protective, how the virus spreads, whether it's airborne, how to protect yourself, what is herd immunity, and whether the vaccines would prevent infection. And most of all, whether the pandemic might have started in a lab. And so what do you do about the situation, right, where the experts are not even able to tell you what is happening in real time? So fast forward five years since the pandemic began, we now know that experts broadly around the world agree that both natural and lab leak hypotheses are on the table. And briefly, this ranges all the way from an animal at the market infecting people to perhaps a villager getting infected and then traveling to Wuhan, or maybe a scientist was infected in the field while collecting samples and then went to Wuhan, or that there was an accident in a lab where this virus was being handled. Ultimately, everyone agrees that the original virus, the precursor of this virus, must have come from bats, but we just do not agree on how it made its way to Wuhan. And so all the evidence today remains circumstantial. No natural or lab source has been found or reported. And surprisingly, five years later, no external investigation has been conducted and what we know now is that there were influential scientists who sought to squash the theory calling it a conspiracy theory and this started all the way at the beginning of the pandemic. So I was really fortunate to publish my analysis of why I think the evidence strongly points towards ALAM origin in the New York Times this year. And just to sum this up, but if this is the first time you've heard of this, I really recommend you check out the article. Why do I think that COVID-19 likely starts in a lab? This pandemic, it really could have been caused by any of hundreds of different virus species and any of tens of thousands of wildlife markets in this region and in any of thousands of large cities and in any of thousands of large cities, and in any year. But it was a SARS-like coronavirus with a unique furin cleavage site. This is a gain-of-function feature that emerged in Wuhan less than two years after scientists there, sometimes working at low biosafety, proposed collecting and creating viruses of that very same design. And of the hundreds of other SARS-like viruses we know today, none of them have such a furin cleavage site. So this story, why haven't we found the origin of COVID-19? It's because an incredible amount of data continues to be withheld. So this concerns early cases, the transmission chain, the genome of the virus was withheld in the early days, including early sequences still being withheld. We still do not have access to the activities and catalogs at both the wildlife market in the city as well as the laboratories in the city. So all of these were not put out in the public. A lot of it was withheld in private communications, private databases, in scientific journals, and under peer review. Sometimes an editor would reject a paper and it would stay hidden for years. And private research documents as well as thesis and grant proposals, we only know about all of these data today because they were unearthed by mostly internet sleuths, so independent vigilante sleuths, as well as scientists and journalists. And some of these other documents were leaked by people inside the US government or others were sued for through the Freedom of Information Act. So these things had to be painstakingly pulled out of the sources. What the COVID-19 pandemic really showed us was this complete lack of interest in finding the origin of COVID-19. So if you look at the US collaborators of the Wuhan scientists, they admitted that their collaboration had never given them access to the pathogen samples. So these are samples from bats and even people collected and stored in Wuhan. They could not reproduce the database of the samples or virus sequences. And they did not have access to the database in real time that was in Wuhan. So they didn't even know about viruses collected after 2015, even though the funding covered their work through 2019. So that's a four-year gap. They do not know about the viruses collected after 2015 in Wuhan. They did not ask their collaborators in Wuhan if they had started on experiments proposed in 2018. So this included experiments where they were inserting furin cleavage sites and other gain of function features into SARS-like viruses in Wuhan. So some would say this is strategic ignorance, right? Maybe they didn't ask because they didn't want to have information that would then put them in a difficult spot. But I would actually argue that it was structural because they never structured their collaboration in a way that gave them full access in real time to the samples and even the sequences. And this problem really goes way beyond the EcoHealth Alliance to even upwards in government. Earlier this year, the Department of Defense in the U.S. said that they had not been closely tracking their funded projects. They could not even tell how much funding they had given to Chinese research that would enhance pathogens with pandemic potential. And unfortunately, the new US government policy on this sort of research that kicks in next May still leaves many loopholes open. In fact, the entire research pipeline that could have caused the COVID-19 pandemic remains not subject to oversight. So you can go anywhere, collect any novel pathogens, no oversight. You can serially passage them, so adapt them to human cells, even human avi cells, and even humanized animals still not subject to oversight. So really nothing has been done here to deal with this risk of pandemics coming from labs in this manner. So unlike other technologies, you know, like nuclear, chemical, even in transportation, even flying planes, where you have a serious accident, there's no independent body here for these sort of pandemic risk research to really conduct external oversight. So there's no independent external organization that will have oversight of this work around the U.S., let's say. And when that's an incident, there's no investigation to go in by this independent body. It's usually all self-investigation, self-reporting. So this is a real problem because we are now truly in an era of pandemics starting from laboratories. Unlike 20 years ago, today research for pandemic risk is proliferating due to many technological advances in gene sequencing and synthesis as well as dropping costs. So now more and more labs are gaining access to this ability to create viruses in the lab from scratch, just based on the sequences. It's not very expensive. And unfortunately, a lot of this is happening at low biosafety in ways that cannot contain airborne viruses like COVID-19, let's say. And it's importantly not being systematically tracked. So there's no one really systematically tracking it, even at a national level in the U.S. So why is the case? And I think that there are two problems here that really underlie this challenge. So the first is that there are many penalties and fear of retaliation for virologists to be calling out their peers. So imagine if you start pointing fingers at your colleagues for doing risky research, you could immediately find yourself alienated and shut off funding applications as well as publications. So this would essentially destroy your career. And those who are for gain of function, so they want gain of function to happen, they say that you can't put regulation on us, you can't put all this oversight on us, because then the US will lose the competitive edge. So the problem with this is that even these scientists, these leading experts, as seen in the case of COVID-19, cannot accurately predict the risk of their cutting-edge experiments. It's really hard for them to know what will happen when they're working with novel pathogens and putting in new features. So the second problem here is that we are kind of stuck in this 9-11 dichotomy of outbreaks being either natural or deliberate. And this is a problem because I think that the risk of pandemics coming from lab accidents is much higher than the risk of a pandemic coming from a deliberate bioterror incident. So unfortunately, this story of let's defeat the bad guys let's let's defeat the terrorists it gets much more interest and funding it's much more sexy than then telling people let's just stop people from having accidents let's protect the good guys from having accidents in their labs so that this whole field of accidental lab leaks has been really neglected in my opinion and and it is it's compounded by this fact that many many virologists are afraid to call out their peers. So very little is being done. And I have this quote that really exemplifies this problem. So early on in the pandemic, an influential virologist said, it really bothered me that this was some suggestion this was a lab construct. So the virus that was causing the pandemic. If it turned out to be true, it would really bother the hell out of me. Not just because of people dying and so forth. It's an indictment of the field, right? So these virologists, and not just him, they think that if COVID came from a lab, it would indict the entire field of virology. So this is a major conflict of interest. So where do we go from here? Clearly, we should investigate the origin of COVID-19 so that we can restore public trust. Imagine if, you know, a few decades down the road, it's found that this pandemic started in a lab. We need to be able to look back and see our public leaders, as well as our scientists, leading the charge on investigating and finding the lab origin. We don't want to look back and see them being complicit in the cover-up, suppressing an investigation. So there are three things that I think should inform a viable biosafety strategy. So the first thing is that OSINT is necessary, but during a crisis, as we have seen, is challenged by mis- and disinformation, a great amount of information is actually kept private, and not even for malicious reasons. We need a system to counter misinformation from experts, and journalists especially should be much more skeptical and seek diverse input so that they don't find themselves amplifying the misinformation. And finally, we really truly need an independent organization and regulation that focuses on reducing the risk of accidental pandemics. So thank you again for inviting me to give this talk.", + "sources_streamethId": "67356cb99dbb7a90e15f86d1", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67356cb99dbb7a90e15f86d1.vtt", + "transcript_text": " I will get straight into the one slide that I have today. So last year I wrote this post that introduced this concept of DIAC and in general was basically responding to a lot of the discussion that was happening at the time around techno-optimism, the need to be more optimistic about technology and to appreciate the very positive impact that technology actually has had over the past 10 millennia, and also the extremely positive potential that it has over the next century. And at the same time take seriously some of the risks and also really focus on which technologies are going to do the best possible job of giving us the kind of future that we want while at the same time minimizing and even actively fighting against the risks. One of the arguments that I made is basically that there is this important phenomenon which is the offense-defense balance, right? Basically is it easier to attack or is it easier to defend? And if you have an environment where it's easy to attack, then you almost inevitably have this kind of dark Hobbesian choice between a very powerful sovereign and a very destructive state of anarchy. And it just inherently creates all kinds of very bad political effects on top of creating constant ongoing risk of suffering, right? And at the same time, this is all happening under this backdrop of this discussion about is AI itself, and particularly super intelligent AI, this very big and massive risk? If it is, should we try to slow it down? But then if we do slow it down, then slowing it down forever basically just creates a world that becomes more and more unstable. And so what is the actually resilient and actually long-term stable future that we could be aiming for? And I think this to me is a near and mid-term part of the answer. So in the post that I made last year, I split up defensive technology into four categories. So the first split was the split between the world of atoms and the world of bits. So very famous distinction, might as well just grab it and use it. And within the world of atoms, split it up between macro and micro. And so macro defense, basically you think about physical resilience. And we'll have some very interesting talks later today, including topics on how to massively reduce casualties if some kind of extremely terrible disaster happens to humanity. Micro-defense basically means biodefense, so anti-pandemics. And both myself and a huge number of other really fascinating people will be talking about how we can make our environments vastly more resilient against actually existing and even potential pandemics all without requiring significant changes in individual behavior so you might be pleased to know that this room is actually already passively airborne disease resistant so for example over here you have this i mean box, and the box is HIPAA filter. We have a bunch of these all around the room. And so basically this room becomes vastly more safe against COVID, against any kind of future airborne virus that comes up without requiring anyone to actually notice. And this is one example of the kinds of open and widely accessible, easily deployable, and freedom-preserving technologies that we can deploy that can give humanity an orders of magnitude boost in its resistance against these kinds of threats. These are things that we are not doing today, and these are things that with a surprisingly low amount of investment we could actually do a much better job at. So then on the other side we have the world of bits. And the world of bits, this is a distinction that is I believe unique to me. But I talk about the distinction between cyber defense and info defense. Cyber defense is defense against threats where all reasonable people agree who the attacker is, right? So if a DeFi protocol gets hacked, then obviously the DeFi smart contract doesn't agree that it got hacked, because if it did, it wouldn't have sent any money out. But all reasonable people looking at the situation will agree that, well, yes, there is a hacker, or at the very least, someone who used a very unintended mechanism to get coins out of that system. And so we can talk about cryptography, we can talk about formal verification, blockchains, zero-knowledge proofs, what I call the Egyptian God Protocols, FHE and obfuscation, and also another important thing, hardware security, and actually deploying all of those technologies and actually even applying them at the level of operating systems and making sure that things are much safer than they would be today. And then info defense. So this is where reasonable people can actually disagree on who the attacker is. So one man's misinformation is another man's unjustly suppressed valid point and we need technology to actually help filter through this information and help people identify what kinds of content are more likely to be actually positive and what kinds of content are more likely to be misinformative, misleading, things that even they themselves, if they better understood it, would not want to see, and do so in a way that does not involve empowering a centralized elite that decides on behalf of everyone else what is good and true and what is bad and false. Now, that is all a vision of defense. I think one thing that is also important to talk about here is I added a third dimension in the year since then, right? And I call this the survive-thrive dimension. And here we talk about not just the technologies to protect against the bad stuff, but also actually enabling the positive future, right? So in the bio side, we have longevity. Who here is excited about longevity? So now longevity, and then beside longevity, we also have BCI, and BCI is kind of conveniently beside longevity and open decentralized compute, right? So on the left is making sure a compute is safe, or at the top, making sure a compute is safe. At the bottom is making sure a compute is amazing. And compute and biology together, we get BCI acceleration. And I think one of the really important points that we're going to make is that there's actually a lot of ties between these different spaces. It's all one very big integrated field. A lot of the viral persistence research that's being done in the context of long COVID right now actually has a lot of tie-ins that are very applicable to aging, right? So there's recent new theories that viral persistence actually is a thing that's a very big contributor to Alzheimer's. And then BCI, better medical technology, it contributes to fighting diseases, it can contribute to living longer under, quote, normal conditions, and it's also a BCI accelerator. Now, physical abundance. So we want the big silver punk cities, we want housing to be affordable. We want, you know, housing to follow Moore's Law instead of following Eroom's Law, for those who know what those are. And then, you know, we want to go to space. We want everything about our physical environments to not just be resilient but also be affordable and amazing. And finally, collaboration technology. So even in situations where people are not attacking each other, where you actually do have reasonable people and communities that have high trust between each other but they do want to be able to more quickly and more effectively agree on things and come to new consensus if we want agree on things and come to new consensus if we want decentralized collectives to like actually be able to act like life players and make bold choices and keep adapting themselves to rapidly changing circumstances without that collapsing into a dictatorship then much more powerful collaboration technology. So this includes all of the stuff we've been doing around public goods funding. It includes quadratic voting, other forms of voting, futarchy, all kinds of different governance ideas. So basically I think there are natural tie-ins at all sides of this between technologies that can create an environment where everything is much more safe and much more resilient by default and an environment where we have open, distributed and widely available progress for everyone. And so this is what all of the speeches that we're going to see today are going to be about. And I'm very excited to be with you and listen to them. Thank you. And so now I'll introduce our next speaker, Eli Dorado.", "eventId": "devcon-7", - "slot_start": 1731568200000, - "slot_end": 1731568800000, + "slot_start": 1731553200000, + "slot_end": 1731553800000, "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1p9hSMYlq5ABHla4brR0sibxE7RLsOTyxT95WWe9_UTQ", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/105T9qheqDS91uBB6zsLjkTZKkqteIemZL0l9pkz8eJo", + "resources_slides": "https://drive.google.com/file/d/1GnSqMMj86XIKIrrDYqp1bTaWUvTFJ3uR/view", "speakers": [ - "alina-chan" + "vitalik-buterin" ] }, "vector": [ @@ -731445,6 +729371,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -731878,12 +729805,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -732567,6 +730488,7 @@ 0, 0, 0, + 2, 0, 0, 2, @@ -732579,7 +730501,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -732588,41 +730509,44 @@ }, { "session": { - "id": "the-combination-of-zkp-mpc-fhe", - "sourceId": "XPLVT8", - "title": "The combination of ZKP +/- MPC +/- FHE", - "description": "This talk will provide you with the necessary intuition to understand when you should use ZKP, MPC or FHE, or any combination of them.", - "track": "Applied Cryptography", - "type": "Lightning Talk", + "id": "the-daos-of-the-east", + "sourceId": "BUKGLV", + "title": "The DAOs of the East", + "description": "DAOs are growing fast in East Asia, and they work very differently from DAOs in the West. From regional revitalization in Japan to Taiwan's digital ministry to the Chinese diaspora, I'll cover many examples and what they mean for the global community of DAOs.", + "track": "Coordination", + "type": "Talk", "expertise": "Beginner", - "audience": "Developer", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "ZKP", - "MPC", - "fhe", - "MPC", - "ZKP" + "DAO", + "Collective Intelligence", + "Regulation", + "asia", + "Collective Intelligence", + "DAO" ], "keywords": [ - "FHE" + "Asia" ], - "duration": 521, + "duration": 1454, "language": "en", - "sources_swarmHash": "7724dd5759a7e9323aa0eff8393fff2e9afee7739808254312ba965d6a194a18", - "sources_youtubeId": "Tq7CVqDE_P4", + "sources_swarmHash": "33043f047e26dd63e0e3f70d83d15ed508047a0e835ef5a62f8ff974ed11ca5f", + "sources_youtubeId": "hJ2bIUH6_VA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673481e19dbb7a90e1d36174", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731390000000, - "slot_end": 1731390600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1iRVxAm1tYqEBlFNUqErTPQ1GCnhG1txvgCWdfQbSgpk", - "resources_slides": null, + "slot_start": 1731492000000, + "slot_end": 1731493800000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/185nuWRZn9PaXkbj3mmudjiul9XhVrRireCzXcJBlu4Y", + "resources_slides": "https://drive.google.com/file/d/1J_r5wl8YGbGtaLxHKFm4LNZoRVCyC0QK/view", "speakers": [ - "giacomo" + "joshua-tan" ] }, "vector": [ @@ -732636,8 +730560,8 @@ 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -733408,6 +731332,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -733452,13 +731377,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -733466,7 +731384,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -733482,11 +731399,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -733741,7 +731660,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -733831,6 +731749,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -733946,10 +731865,10 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, 0, 0, 0, @@ -733962,47 +731881,47 @@ }, { "session": { - "id": "the-dacc-vision-balancing-progress-and-protection", - "sourceId": "AA8SRQ", - "title": "The d/acc Vision: Balancing Progress and Protection", - "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "", - "audience": "Engineering", + "id": "the-dave-fraud-proof-algorithm-triumphing-over-sybils-with-a-laptop-and-a-small-collateral", + "sourceId": "C7ZFH3", + "title": "The Dave fraud-proof algorithm — triumphing over Sybils with a laptop and a small collateral", + "description": "Current fraud-proof algorithms are susceptible to Sybil attacks, impacting security, decentralization, and (settlement) liveness. This presentation introduces _Dave_, a novel algorithm that offers an unprecedented combination of these three properties. We demonstrate that there's no realistic Sybil attack capable of exhausting defenders' resources or causing significant delays, even with minimal bond requirements.", + "track": "Layer 2", + "type": "Talk", + "expertise": "Expert", + "audience": "Research", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 594, + "tags": [ + "Optimistic rollups", + "fraud", + "proof", + "Optimistic", + "rollups" + ], + "keywords": [ + "Interactive", + "fraud", + "proofs" + ], + "duration": 1393, "language": "en", - "sources_swarmHash": "b77c636e49391f4c713aa60f1572f8527d82b94618ed7b10dd27f83b94af084c", - "sources_youtubeId": "pTpURVxj9hk", + "sources_swarmHash": "f6b19521b73dd026fbdfe1a938aa2a58f1b3e9332c026eba6e6fdd0cc69e350a", + "sources_youtubeId": "dI_3neyXVl0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67356cb99dbb7a90e15f86d1", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67356cb99dbb7a90e15f86d1.vtt", - "transcript_text": " I will get straight into the one slide that I have today. So last year I wrote this post that introduced this concept of DIAC and in general was basically responding to a lot of the discussion that was happening at the time around techno-optimism, the need to be more optimistic about technology and to appreciate the very positive impact that technology actually has had over the past 10 millennia, and also the extremely positive potential that it has over the next century. And at the same time take seriously some of the risks and also really focus on which technologies are going to do the best possible job of giving us the kind of future that we want while at the same time minimizing and even actively fighting against the risks. One of the arguments that I made is basically that there is this important phenomenon which is the offense-defense balance, right? Basically is it easier to attack or is it easier to defend? And if you have an environment where it's easy to attack, then you almost inevitably have this kind of dark Hobbesian choice between a very powerful sovereign and a very destructive state of anarchy. And it just inherently creates all kinds of very bad political effects on top of creating constant ongoing risk of suffering, right? And at the same time, this is all happening under this backdrop of this discussion about is AI itself, and particularly super intelligent AI, this very big and massive risk? If it is, should we try to slow it down? But then if we do slow it down, then slowing it down forever basically just creates a world that becomes more and more unstable. And so what is the actually resilient and actually long-term stable future that we could be aiming for? And I think this to me is a near and mid-term part of the answer. So in the post that I made last year, I split up defensive technology into four categories. So the first split was the split between the world of atoms and the world of bits. So very famous distinction, might as well just grab it and use it. And within the world of atoms, split it up between macro and micro. And so macro defense, basically you think about physical resilience. And we'll have some very interesting talks later today, including topics on how to massively reduce casualties if some kind of extremely terrible disaster happens to humanity. Micro-defense basically means biodefense, so anti-pandemics. And both myself and a huge number of other really fascinating people will be talking about how we can make our environments vastly more resilient against actually existing and even potential pandemics all without requiring significant changes in individual behavior so you might be pleased to know that this room is actually already passively airborne disease resistant so for example over here you have this i mean box, and the box is HIPAA filter. We have a bunch of these all around the room. And so basically this room becomes vastly more safe against COVID, against any kind of future airborne virus that comes up without requiring anyone to actually notice. And this is one example of the kinds of open and widely accessible, easily deployable, and freedom-preserving technologies that we can deploy that can give humanity an orders of magnitude boost in its resistance against these kinds of threats. These are things that we are not doing today, and these are things that with a surprisingly low amount of investment we could actually do a much better job at. So then on the other side we have the world of bits. And the world of bits, this is a distinction that is I believe unique to me. But I talk about the distinction between cyber defense and info defense. Cyber defense is defense against threats where all reasonable people agree who the attacker is, right? So if a DeFi protocol gets hacked, then obviously the DeFi smart contract doesn't agree that it got hacked, because if it did, it wouldn't have sent any money out. But all reasonable people looking at the situation will agree that, well, yes, there is a hacker, or at the very least, someone who used a very unintended mechanism to get coins out of that system. And so we can talk about cryptography, we can talk about formal verification, blockchains, zero-knowledge proofs, what I call the Egyptian God Protocols, FHE and obfuscation, and also another important thing, hardware security, and actually deploying all of those technologies and actually even applying them at the level of operating systems and making sure that things are much safer than they would be today. And then info defense. So this is where reasonable people can actually disagree on who the attacker is. So one man's misinformation is another man's unjustly suppressed valid point and we need technology to actually help filter through this information and help people identify what kinds of content are more likely to be actually positive and what kinds of content are more likely to be misinformative, misleading, things that even they themselves, if they better understood it, would not want to see, and do so in a way that does not involve empowering a centralized elite that decides on behalf of everyone else what is good and true and what is bad and false. Now, that is all a vision of defense. I think one thing that is also important to talk about here is I added a third dimension in the year since then, right? And I call this the survive-thrive dimension. And here we talk about not just the technologies to protect against the bad stuff, but also actually enabling the positive future, right? So in the bio side, we have longevity. Who here is excited about longevity? So now longevity, and then beside longevity, we also have BCI, and BCI is kind of conveniently beside longevity and open decentralized compute, right? So on the left is making sure a compute is safe, or at the top, making sure a compute is safe. At the bottom is making sure a compute is amazing. And compute and biology together, we get BCI acceleration. And I think one of the really important points that we're going to make is that there's actually a lot of ties between these different spaces. It's all one very big integrated field. A lot of the viral persistence research that's being done in the context of long COVID right now actually has a lot of tie-ins that are very applicable to aging, right? So there's recent new theories that viral persistence actually is a thing that's a very big contributor to Alzheimer's. And then BCI, better medical technology, it contributes to fighting diseases, it can contribute to living longer under, quote, normal conditions, and it's also a BCI accelerator. Now, physical abundance. So we want the big silver punk cities, we want housing to be affordable. We want, you know, housing to follow Moore's Law instead of following Eroom's Law, for those who know what those are. And then, you know, we want to go to space. We want everything about our physical environments to not just be resilient but also be affordable and amazing. And finally, collaboration technology. So even in situations where people are not attacking each other, where you actually do have reasonable people and communities that have high trust between each other but they do want to be able to more quickly and more effectively agree on things and come to new consensus if we want agree on things and come to new consensus if we want decentralized collectives to like actually be able to act like life players and make bold choices and keep adapting themselves to rapidly changing circumstances without that collapsing into a dictatorship then much more powerful collaboration technology. So this includes all of the stuff we've been doing around public goods funding. It includes quadratic voting, other forms of voting, futarchy, all kinds of different governance ideas. So basically I think there are natural tie-ins at all sides of this between technologies that can create an environment where everything is much more safe and much more resilient by default and an environment where we have open, distributed and widely available progress for everyone. And so this is what all of the speeches that we're going to see today are going to be about. And I'm very excited to be with you and listen to them. Thank you. And so now I'll introduce our next speaker, Eli Dorado.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731553200000, - "slot_end": 1731553800000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/105T9qheqDS91uBB6zsLjkTZKkqteIemZL0l9pkz8eJo", - "resources_slides": null, + "slot_start": 1731470400000, + "slot_end": 1731472200000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1GhOQePXCr0xuShvpJcgSNAMhIC_wT2B34JYiogZJB7s", + "resources_slides": "https://drive.google.com/file/d/1ulLAN8HrVwwBhQFbRhpx3KGT14CNJ0nZ/view", "speakers": [ - "vitalik-buterin" + "gabriel-coutinho-de-paula", + "augusto-teixeira" ] }, "vector": [ - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -734010,6 +731929,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -734187,8 +732107,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -734632,6 +732550,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -734818,6 +732738,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -734973,6 +732894,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -735093,6 +733015,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -735241,6 +733164,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -735311,7 +733236,6 @@ 0, 2, 0, - 0, 2, 0, 0, @@ -735324,50 +733248,47 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "the-daos-of-the-east", - "sourceId": "BUKGLV", - "title": "The DAOs of the East", - "description": "DAOs are growing fast in East Asia, and they work very differently from DAOs in the West. From regional revitalization in Japan to Taiwan's digital ministry to the Chinese diaspora, I'll cover many examples and what they mean for the global community of DAOs.", - "track": "Coordination", - "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "the-end-game-wallet-when-does-abstraction-go-too-far", + "sourceId": "ZTMLMQ", + "title": "The End Game Wallet: When Does Abstraction Go Too Far?", + "description": "Chain abstraction has taken the front seat. As innovations continue, it's becoming increasingly stark that we will eventually approach a world where third-party solvers fulfill most transactions. The core protocol is also changing to cater to further abstractions even at the validator level. The question remains, how far are we willing to go in the name of efficiency, and optimizations, to which a user can't use Ethereum without third parties?", + "track": "Usability", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "DAO", - "Collective Intelligence", - "Regulation", - "asia", - "Collective Intelligence", - "DAO" + "Values", + "UI/UX", + "UI/UX", + "Values" ], "keywords": [ - "Asia" + "n/a" ], - "duration": 1454, + "duration": 602, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "bbf4c10d801934b671c122c84ecccd4d41134ab3102920d95762ec1e24fd214a", + "sources_youtubeId": "FBB5YWMQ56s", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673481e19dbb7a90e1d36174", + "sources_streamethId": "673579d19dbb7a90e1d1825d", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731492000000, - "slot_end": 1731493800000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/185nuWRZn9PaXkbj3mmudjiul9XhVrRireCzXcJBlu4Y", - "resources_slides": null, + "slot_start": 1731556800000, + "slot_end": 1731557400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1Yvp0nywauCOnCqYI14BUwqz77qWUB-SScBhTcicFGtg", + "resources_slides": "https://drive.google.com/file/d/17ukjHSYJPsyATV_bLOgr9PxLFKpdeebW/view", "speakers": [ - "joshua-tan" + "gregthegreek" ] }, "vector": [ @@ -735379,9 +733300,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -735999,12 +733917,12 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -736156,7 +734074,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -736182,6 +734099,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -736223,13 +734142,11 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -736247,6 +734164,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -736574,7 +734492,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -736684,6 +734601,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -736694,8 +734612,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -736707,44 +734623,47 @@ }, { "session": { - "id": "the-dave-fraud-proof-algorithm-triumphing-over-sybils-with-a-laptop-and-a-small-collateral", - "sourceId": "C7ZFH3", - "title": "The Dave fraud-proof algorithm — triumphing over Sybils with a laptop and a small collateral", - "description": "Current fraud-proof algorithms are susceptible to Sybil attacks, impacting security, decentralization, and (settlement) liveness. This presentation introduces _Dave_, a novel algorithm that offers an unprecedented combination of these three properties. We demonstrate that there's no realistic Sybil attack capable of exhausting defenders' resources or causing significant delays, even with minimal bond requirements.", - "track": "Layer 2", - "type": "Talk", - "expertise": "Expert", - "audience": "Research", + "id": "the-end-of-self-custodial-wallets", + "sourceId": "KDUNLM", + "title": "The end of self-custodial wallets", + "description": "This talk provides a quick overview of how countries worldwide restrict or plan to ban the self-custodial ownership model, which is the foundation of cryptocurrencies.\r\n\r\n- What kind of laws, regulations and guidance countries have passed to restrict self-custodial\r\n- What kind of areas are being targeted: ownership of cryptocurrencies, wallets, developers, interfaces\r\n- Who are the driving forces behind opposing self-custodial\r\n- How to counteract this development", + "track": "Cypherpunk & Privacy", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Business", "featured": false, "doNotRecord": false, "tags": [ - "Optimistic rollups", - "fraud", - "proof", - "Optimistic", - "rollups" + "Free Speech", + "Censorship Resistance", + "Regulation", + "fatf", + "Censorship Resistance", + "Free Speech", + "Regulation" ], "keywords": [ - "Interactive", - "fraud", - "proofs" + "Self custodial", + "FATF", + "wallet" ], - "duration": 1393, + "duration": 594, "language": "en", - "sources_swarmHash": "f6b19521b73dd026fbdfe1a938aa2a58f1b3e9332c026eba6e6fdd0cc69e350a", - "sources_youtubeId": "dI_3neyXVl0", + "sources_swarmHash": "9a46ddf422d25723e072d0ee0fd70a3bb5735f6e1210c767aaa352e247b6d9ff", + "sources_youtubeId": "Cwn42afQZ3k", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6736db4874749a4b8945b981", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736db4874749a4b8945b981.vtt", + "transcript_text": " Tämä video on tehty Yleisölle. Yleisölle on tehty Yleisölle. Yleisölle on tehty Yleisölle. Yleisölle on tehty Yleisölle. Yleisölle on tehty Yleisölle. Yleisölle on tehty Yleisölle. Yleisölle on tehty Yleisölle. Hyvä. Tänään olen täällä puhumaan selviämisestä ja miten hallitsevat ja ystävät yrittävät heitä pannaan. Tämä on järjestelmä, joten jos haluat nähdä täysin esityksen, niin seuraa minua Twitterissä, koska laitan lisää. Why does this matter? If and when the government are going to restrict the use of self-custodial wallets, it's so called bad ending. At that point it doesn't matter what you do there in the audience, because you can all go home and work in a bank or a McDonald's. So it should be obvious for you why this is important. pankissa tai McDonald'sissa. Se on tärkeää, että se on tärkeää. Ensimmäisenä omaa kastoria-korttia. Ensimmäinen on, että me olemme asettamme. Se on meidän kontroliimme. Ei ole riskejä, että pankissa tai keskusteluissa he rikkovat sen ja sinä menet. Jos menet, se on vain o screw it up and you lose. If you lose, it's only your own fault. The other important point is no vendor login, so you can change wallets. You are not locked in a single wallet provider, but you can have a competition between those, and it's very good for cost efficiency, because the cost will be very, very low for us, and you all know that we are not Kosko-efficiönsä, koska kosteus on hyvin vähäinen meille. Ja kaikki tiedätte, että emme palaa valitsemme. Ja valitsema on ei valitsema, vaan krypto, joka vahvistaa kryptovaluutteiden luonnon luonnon. Ja nyt ei ole vielä mahdollista syntyä kryptovaluutteita itse. It's no longer in this point feasible to ban the crypto themselves. Instead, what governments and regulators work is that they will restrict its use. And there are mainly three ways to do it. Don't allow to transfer in and out from wallets. Regulate developers or regulate interactions with the wallets like a front-end Uniswap. And this is how the regulation in the world is done. ja sillä voidaan säädellä interaktioita valitseihin, kuten Uniswap. Ja tällä tavalla maailman säädellä on tehty. Meidän huolimme USA-politiikasta on se, että kaikki ekonomiset rikot ovat menneet pysyvään, mutta tämä ei ole rahaa, vaan se on vitsi. Joten kaikki kansalliset säädellä jatkuvat, mitä FITF is doing and they are doing what the US says them to do. And there's also compliance industry who is making money out of this. So for them it's very beneficial that we will restrict the use of the wallets. So they will vote for more regulation every time. And the first case we have in Estonia. So 2021 it was already almost there that they banned this. Ja ensimmäinen tapauksena on Estonia. Eli vuonna 2021 se oli jo melkein siellä, että he pahasivat tämän. Siinä oli yksityiskumppani, joka lobaoi hallituksen, että me ei pitäisi oikeastaan pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä. ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja Seuraava asia on Euroopan unionin. Suomalaisen antimainojen lausunnon edistämisessä sanotaan, että käsittelyt ja käsittelyt itse käsittelyt ovat edistettyä, mutta niiden määrä on vähentävä. Siinä on vain käsittelyt, joita on vähentävä, luulen, että se on 1iminnassa ensi vuonna tai vuonna 2026. Sitten Euroopan unionin kansalaiset eivät voi käyttää itsevastuullisia koneita, jos asetuksissa on enemmän kuin 1 000 euroa. Sitten on Seychelles. Se oli ensimmäinen maa, jossa ei ole edes ohjausta, vaan sen omaa lakia. Joku, en tiedä kuka, I don't know who, wrote in the law that non-custodial services, including wallets, should be regulated. And it means that the developers should sign up there with the local regulator if you are developing a wallet. It didn't make to the final law text, but it was explicitly also said that this request came from FATF. And they wanted to close this so-called loophole for the users. And then there's Denmark where they told that Uniswap and others can be identified, so that's why developers like these should be also registered in Denmark asen VASP-palveluun, koska ne voivat antaa palveluita danilaisille. Ja mitä voit tehdä? Organisaatioita, jotka ovat pro-kryptoita. Uskon, että Yhdysvalloissa on mennyt melko hyvin. Ja tärkeintä on, että puhut lokalaista mediasta oman maanasi. Ja yritetään saada luonnollisia ihmisiä siellä, jotka voivat lobbyoida privatiivisesta ja oikeuksista omaa asettasi. Kiitos. Ja seuraavat kysymykset. Kiitos paljon. and the next questions. Thank you very much. So again, if somebody has any questions, please raise your hand. I will toss the mic to you. Yep, we have one. Yeah, better. It's a bit far. So, of course, you can ban alcohol, as they tried to in the US, obviously, but it's an enforcement issue. Do you think that's going to give us any sort of ‑‑ that example, does that give us any reason to have hopium? I think your talk is great, by the way. I think it's so important. I'm not trying to dumb this down. It's really important. But is there hopium out there that this isn't going to be enforceable? I think we have to work a bit harder than that. I mean, it depends, because when they banned alcohol, it was like 100 years ago, and the serverless state didn't exist. Today, governments can follow your actions, and especially the actions of the software companies and developers very closely, so they can knock your door if you have a GitHub account. So that's why I'm quite pessimistic that this can fly under radar, so to speak. So there are bad people out there who do bad things. And while you can argue with how the regulations are laid out, there's not much argument that terrorist financing and money laundering shouldn't be allowed. So how do we, I think we all in this here, we believe in self-sovereignty, but we also, many of us probably think that zero knowledge is a potential solution. How do we get the governments to try it? Because it's not even in the Overton window of discussing these new technologies. How do we get them to actually approve a test case or some way of trying it? I mean, I think we already fixed it in one country by having our friends in the U.S. elect Trump. So that's a step forward. But also, I think the correct way to go about it is to talk with media, so that we have a proof, we have a real scientific research that for the anti-money laundering work, the current regulations restricting centralized exchanges is enough, and it's already stopping most of terrorist funding. We know that the terrorists, yes, they use USDT and Bitcoin and so on and especially North Korea is kind of a pain in the butt but it's not very significant still. Crypto has grown but still it's super small compared to you know all the other gas and oil and whatever there is going around. Yeah, we have one there. Last question. What do you think about companies like Chain Analysis and other days? Do you think they make a positive impact? Some yes, some no. Some of them are pro-crypto, but some of them are part of this compliance industry complex, and they only see how they could seek rents. nähdä, miten he voisivat saada lupaa. Ja usein voi tietää, onko yritys Pro Crypto tai onko se täällä vain rahaa, jos lukee raportteja. Ja jos he tekevät niin sanottua fear-saleja, kuten he tekevät raportteja, joissa sanotaan, että niin moni teröristi tulee ja blockchain on vain käyttöön rahallisuus ja rauhoja ja niin edelleen. chain is only used for money laundering and drugs and so on, and they show the curves like how it's bad usage is growing, which by the way is not anymore growing. It means that they are selling the fare to the governments who then buy their services. But some good companies, like I think chain analysis is one of the good ones. They create reports that are balanced and they show that yeah, some things are bad, but some things are actually very good. Thank you very much.", "eventId": "devcon-7", - "slot_start": 1731470400000, - "slot_end": 1731472200000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1GhOQePXCr0xuShvpJcgSNAMhIC_wT2B34JYiogZJB7s", - "resources_slides": null, + "slot_start": 1731647400000, + "slot_end": 1731648000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1Ap05BLrc25kR-WdwGvInSGF6oehwIIAg82A0vs0Krrg", + "resources_slides": "https://drive.google.com/file/d/1ckKhhAIZuV4WVwkWwUdDUpbYfGiRsmUD/view", "speakers": [ - "gabriel-coutinho-de-paula", - "augusto-teixeira" + "mikko-ohtamaa" ] }, "vector": [ @@ -736753,9 +734672,9 @@ 0, 0, 0, + 6, 0, 0, - 6, 0, 0, 0, @@ -737377,10 +735296,9 @@ 0, 0, 0, - 6, - 6, 0, 0, + 6, 0, 0, 0, @@ -737524,6 +735442,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -737567,7 +735486,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -737604,6 +735522,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -737644,6 +735563,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -737723,7 +735643,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -737845,7 +735764,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -737993,8 +735911,6 @@ 0, 0, 0, - 0, - 2, 2, 0, 0, @@ -738062,10 +735978,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 2, 0, @@ -738077,58 +735993,50 @@ 0, 0, 0, - 0, - 0, 0 ] }, { "session": { - "id": "the-end-game-wallet-when-does-abstraction-go-too-far", - "sourceId": "ZTMLMQ", - "title": "The End Game Wallet: When Does Abstraction Go Too Far?", - "description": "Chain abstraction has taken the front seat. As innovations continue, it's becoming increasingly stark that we will eventually approach a world where third-party solvers fulfill most transactions. The core protocol is also changing to cater to further abstractions even at the validator level. The question remains, how far are we willing to go in the name of efficiency, and optimizations, to which a user can't use Ethereum without third parties?", - "track": "Usability", - "type": "Lightning Talk", + "id": "the-fixed-rate-flywheel", + "sourceId": "WYWLXV", + "title": "The Fixed Rate Flywheel", + "description": "In the rapidly evolving landscape of modern DeFi, fixed-rate protocols have emerged as a critical component, bridging the gap between traditional finance stability and DeFi innovation. This panel introduces \"The Fixed Rate Flywheel,\" a powerful concept illustrating how fixed rate markets fuel variable lending, create hedging opportunities, and generate high-yield products. Join us to hear experts from DELV Tech, Morpho Labs, Phoenix Labs, and Gauntlet talk about the next evolution of DeFi.", + "track": "Cryptoeconomics", + "type": "Panel", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Values", - "UI/UX", - "UI/UX", - "Values" + "fixed", + "rate" ], "keywords": [ - "n/a" + "DeFi", + "Fixed Rates" ], - "duration": 602, + "duration": 3282, "language": "en", - "sources_swarmHash": "bbf4c10d801934b671c122c84ecccd4d41134ab3102920d95762ec1e24fd214a", - "sources_youtubeId": "FBB5YWMQ56s", + "sources_swarmHash": "fb92f51e567f58d5275601ba18e31fd5273866fca5fa0ff9479ca09ee5036cdd", + "sources_youtubeId": "RLzBsBudpFA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673579d19dbb7a90e1d1825d", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "673485c89dbb7a90e106942c", "eventId": "devcon-7", - "slot_start": 1731556800000, - "slot_end": 1731557400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Yvp0nywauCOnCqYI14BUwqz77qWUB-SScBhTcicFGtg", - "resources_slides": null, + "slot_start": 1731491400000, + "slot_end": 1731495000000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1ng1HvT-kAE4r-IB_k-m3qkQnZ9PMYl3wwR_zkEmF4Fg", + "resources_slides": "https://drive.google.com/file/d/1KWVG3Uic3DfGAsnTkx4ZCMIcsBygR6oC/view", "speakers": [ - "gregthegreek" + "alex-towle", + "merlin-egalite", + "lucas-manuel", + "violet-vienhage" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 6, @@ -738754,9 +736662,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -738764,6 +736669,10 @@ 0, 0, 0, + 6, + 6, + 6, + 6, 0, 0, 0, @@ -738933,7 +736842,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -738998,7 +736906,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -739375,6 +737282,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -739459,47 +737368,41 @@ }, { "session": { - "id": "the-end-of-self-custodial-wallets", - "sourceId": "KDUNLM", - "title": "The end of self-custodial wallets", - "description": "This talk provides a quick overview of how countries worldwide restrict or plan to ban the self-custodial ownership model, which is the foundation of cryptocurrencies.\r\n\r\n- What kind of laws, regulations and guidance countries have passed to restrict self-custodial\r\n- What kind of areas are being targeted: ownership of cryptocurrencies, wallets, developers, interfaces\r\n- Who are the driving forces behind opposing self-custodial\r\n- How to counteract this development", - "track": "Cypherpunk & Privacy", + "id": "the-future-of-ai-why-we-need-private-uncensored-permissionless-ai", + "sourceId": "EK8T9X", + "title": "The Future of AI: Why We Need Private, Uncensored, Permissionless AI", + "description": "The current path of AI development leads to a future where a few powerful companies control this transformative technology, with the potential to become the arbiter of truth, manipulate and monetize private user data, and moderate who has access to the future of intelligence.\r\n\r\nNo entity, private or public, should have the power to monopolize or contextualize truth. Open-source, uncensored, and decentralised AI is impervious to political fancy and ideology, and offers a necessary alternative.", + "track": "Real World Ethereum", "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Business", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Free Speech", - "Censorship Resistance", - "Regulation", - "fatf", "Censorship Resistance", - "Free Speech", - "Regulation" + "Permissionless", + "Privacy" ], "keywords": [ - "Self custodial", - "FATF", - "wallet" + "AI" ], - "duration": 594, + "duration": 457, "language": "en", "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_youtubeId": "B_5wj6TfX8s", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736db4874749a4b8945b981", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736db4874749a4b8945b981.vtt", - "transcript_text": " Tämä video on tehty Yleisölle. Yleisölle on tehty Yleisölle. Yleisölle on tehty Yleisölle. Yleisölle on tehty Yleisölle. Yleisölle on tehty Yleisölle. Yleisölle on tehty Yleisölle. Yleisölle on tehty Yleisölle. Hyvä. Tänään olen täällä puhumaan selviämisestä ja miten hallitsevat ja ystävät yrittävät heitä pannaan. Tämä on järjestelmä, joten jos haluat nähdä täysin esityksen, niin seuraa minua Twitterissä, koska laitan lisää. Why does this matter? If and when the government are going to restrict the use of self-custodial wallets, it's so called bad ending. At that point it doesn't matter what you do there in the audience, because you can all go home and work in a bank or a McDonald's. So it should be obvious for you why this is important. pankissa tai McDonald'sissa. Se on tärkeää, että se on tärkeää. Ensimmäisenä omaa kastoria-korttia. Ensimmäinen on, että me olemme asettamme. Se on meidän kontroliimme. Ei ole riskejä, että pankissa tai keskusteluissa he rikkovat sen ja sinä menet. Jos menet, se on vain o screw it up and you lose. If you lose, it's only your own fault. The other important point is no vendor login, so you can change wallets. You are not locked in a single wallet provider, but you can have a competition between those, and it's very good for cost efficiency, because the cost will be very, very low for us, and you all know that we are not Kosko-efficiönsä, koska kosteus on hyvin vähäinen meille. Ja kaikki tiedätte, että emme palaa valitsemme. Ja valitsema on ei valitsema, vaan krypto, joka vahvistaa kryptovaluutteiden luonnon luonnon. Ja nyt ei ole vielä mahdollista syntyä kryptovaluutteita itse. It's no longer in this point feasible to ban the crypto themselves. Instead, what governments and regulators work is that they will restrict its use. And there are mainly three ways to do it. Don't allow to transfer in and out from wallets. Regulate developers or regulate interactions with the wallets like a front-end Uniswap. And this is how the regulation in the world is done. ja sillä voidaan säädellä interaktioita valitseihin, kuten Uniswap. Ja tällä tavalla maailman säädellä on tehty. Meidän huolimme USA-politiikasta on se, että kaikki ekonomiset rikot ovat menneet pysyvään, mutta tämä ei ole rahaa, vaan se on vitsi. Joten kaikki kansalliset säädellä jatkuvat, mitä FITF is doing and they are doing what the US says them to do. And there's also compliance industry who is making money out of this. So for them it's very beneficial that we will restrict the use of the wallets. So they will vote for more regulation every time. And the first case we have in Estonia. So 2021 it was already almost there that they banned this. Ja ensimmäinen tapauksena on Estonia. Eli vuonna 2021 se oli jo melkein siellä, että he pahasivat tämän. Siinä oli yksityiskumppani, joka lobaoi hallituksen, että me ei pitäisi oikeastaan pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä pystyä. ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja aloittaa, ja Seuraava asia on Euroopan unionin. Suomalaisen antimainojen lausunnon edistämisessä sanotaan, että käsittelyt ja käsittelyt itse käsittelyt ovat edistettyä, mutta niiden määrä on vähentävä. Siinä on vain käsittelyt, joita on vähentävä, luulen, että se on 1iminnassa ensi vuonna tai vuonna 2026. Sitten Euroopan unionin kansalaiset eivät voi käyttää itsevastuullisia koneita, jos asetuksissa on enemmän kuin 1 000 euroa. Sitten on Seychelles. Se oli ensimmäinen maa, jossa ei ole edes ohjausta, vaan sen omaa lakia. Joku, en tiedä kuka, I don't know who, wrote in the law that non-custodial services, including wallets, should be regulated. And it means that the developers should sign up there with the local regulator if you are developing a wallet. It didn't make to the final law text, but it was explicitly also said that this request came from FATF. And they wanted to close this so-called loophole for the users. And then there's Denmark where they told that Uniswap and others can be identified, so that's why developers like these should be also registered in Denmark asen VASP-palveluun, koska ne voivat antaa palveluita danilaisille. Ja mitä voit tehdä? Organisaatioita, jotka ovat pro-kryptoita. Uskon, että Yhdysvalloissa on mennyt melko hyvin. Ja tärkeintä on, että puhut lokalaista mediasta oman maanasi. Ja yritetään saada luonnollisia ihmisiä siellä, jotka voivat lobbyoida privatiivisesta ja oikeuksista omaa asettasi. Kiitos. Ja seuraavat kysymykset. Kiitos paljon. and the next questions. Thank you very much. So again, if somebody has any questions, please raise your hand. I will toss the mic to you. Yep, we have one. Yeah, better. It's a bit far. So, of course, you can ban alcohol, as they tried to in the US, obviously, but it's an enforcement issue. Do you think that's going to give us any sort of ‑‑ that example, does that give us any reason to have hopium? I think your talk is great, by the way. I think it's so important. I'm not trying to dumb this down. It's really important. But is there hopium out there that this isn't going to be enforceable? I think we have to work a bit harder than that. I mean, it depends, because when they banned alcohol, it was like 100 years ago, and the serverless state didn't exist. Today, governments can follow your actions, and especially the actions of the software companies and developers very closely, so they can knock your door if you have a GitHub account. So that's why I'm quite pessimistic that this can fly under radar, so to speak. So there are bad people out there who do bad things. And while you can argue with how the regulations are laid out, there's not much argument that terrorist financing and money laundering shouldn't be allowed. So how do we, I think we all in this here, we believe in self-sovereignty, but we also, many of us probably think that zero knowledge is a potential solution. How do we get the governments to try it? Because it's not even in the Overton window of discussing these new technologies. How do we get them to actually approve a test case or some way of trying it? I mean, I think we already fixed it in one country by having our friends in the U.S. elect Trump. So that's a step forward. But also, I think the correct way to go about it is to talk with media, so that we have a proof, we have a real scientific research that for the anti-money laundering work, the current regulations restricting centralized exchanges is enough, and it's already stopping most of terrorist funding. We know that the terrorists, yes, they use USDT and Bitcoin and so on and especially North Korea is kind of a pain in the butt but it's not very significant still. Crypto has grown but still it's super small compared to you know all the other gas and oil and whatever there is going around. Yeah, we have one there. Last question. What do you think about companies like Chain Analysis and other days? Do you think they make a positive impact? Some yes, some no. Some of them are pro-crypto, but some of them are part of this compliance industry complex, and they only see how they could seek rents. nähdä, miten he voisivat saada lupaa. Ja usein voi tietää, onko yritys Pro Crypto tai onko se täällä vain rahaa, jos lukee raportteja. Ja jos he tekevät niin sanottua fear-saleja, kuten he tekevät raportteja, joissa sanotaan, että niin moni teröristi tulee ja blockchain on vain käyttöön rahallisuus ja rauhoja ja niin edelleen. chain is only used for money laundering and drugs and so on, and they show the curves like how it's bad usage is growing, which by the way is not anymore growing. It means that they are selling the fare to the governments who then buy their services. But some good companies, like I think chain analysis is one of the good ones. They create reports that are balanced and they show that yeah, some things are bad, but some things are actually very good. Thank you very much.", + "sources_streamethId": "", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67359da49dbb7a90e17463e3.vtt", + "transcript_text": " Hi, guys. I hope you're having a great DevCon. It's great to be here. I've got five minutes, so I'm going to go really fast. Bear with me. So imagine a small group of companies control access to the most powerful machine learning used by billions of people worldwide. They record all your AI conversations, monetize your data, and share it with governments on demand. Everything you share is attached to your identity forever. They record... Whoops. What's going on here? Okay. Sorry about that. Through politicized content policies, their AI models are trained to coddle and redirect you when you explore topics determined to be taboo. They restrict and redact information and influence your thinking based on their view of the truth. Does this sound like science fiction? It's not. It's already happening. The current AI development path leads to a few powerful companies controlling the technology and becoming the arbiter of truth. The race to dominate the consumer AI market is on. OpenAI and by proxy Microsoft have a head start. Their partnership puts AI in the hands of 1.5 billion iPhone users. The Biden administration's policies entrench AI development in the hands of a few powerful entities, accelerating centralization that favors incumbents. It's notable that the newly appointed U.S. Artificial Intelligence Safety and Security Board doesn't include any open source or decentralized AI leaders. I believe AI should be optionally private. Our interactions with AI are personal and intimate. This isn't the polished... This is not the right slide either. Sorry, guys. This isn't the polished social media version of us. It's raw, honest, intellectual exploration. Would you share your diary online? That's the level of vulnerability we expose when we use AI without privacy safeguards. Popular AI tools store your inputs and outputs, and in many cases, the platform owns the outputs you create. As I mentioned earlier, these inputs are attached to your identity. Platforms are vulnerable to hackers, which continually lead to breaches. Does anyone remember Equifax? Information held by governments is equally vulnerable, and the authorities making the privacy rules can't even protect their own data. A recent EU Parliament data breach exposed sensitive personal data of more than 8,000 staffers. Your data doesn't need to be leaked to expose you to manipulation. Cambridge Analytica demonstrated how information can change the tech giants, sorry, the information you share with tech giants can be scraped without your consent to create campaigns designed to influence your views. We need to stop volunteering our data and take control over what we share with AI. I believe AI should be uncensored. Centralized platforms censor according to often opaque content policies, influenced by the values of those who control the platform, hidden behind system prompts that are only revealed through jailbreaking. Users think they're interacting with true machine intelligence, a calculator programmed to do statistical inference on language. In reality, they're engaging with proprietary partiality within guardrails imposed by humans who have their own biases. At best, we receive nonsensical outputs like Gemini producing black vikings. At worst, human adulterated AI can perpetuate the silencing of public discourse. No entity, public or private, should monopolize or contextualize truth. Open access to AI is under threat. Biden's AI executive order requires licenses for large models that restrict the number of parameters allowed. California proposed an AI bill which includes criminalizing certain open source AI developments. The EU's AI Act is more permissive of open source but strict on large scale AI. The Act's author himself has raised concerns that the regulatory bar has been set too high. A stark example of excessive regulation backfiring. France-based Mistral and Meta both recently withheld their latest open source AI models from the EU, preventing an entire region from accessing advancing AI. Politics will play a role in AI. Some push for tight controls, others champion open source development. The back and forth highlights that AI's future should not hinge on political whims. When confidence in our political leaders is waning, do we really trust them to regulate intelligence? AI is being adopted rapidly. A recent Harvard study found that nearly 40% of all U.S. adults between 18 to 64 have used generative AI. But unlike money or social media, we don't need to change our existing behavior, but we do need to start as we intend to continue. We must seek out and use alternatives. Open source permissionless AI, evolved through thoughtful iteration, is impervious to political fancy, ideology, and the antidote to gate-kept, curated, and censored AI. It's pretty easy for a developer to run a small model privately, locally, using a Lama or a similar service. Open source models are rapidly becoming competitive, surpassing closed source models on many benchmarks. You can find a plethora of open source models on Hugging Face. I've listed a few here. For those who can't or don't want to run AI locally, we created Venice, a generative AI platform that embodies the principles of permissionlessness. You can chat with some of the leading open source models too large to run locally, generate images, create and interact with AI characters, and write and debug code, all in private. Venice uses decentralized infrastructure to run the platform. All of your Venice activity is stored only on your browser. Venice never shares your data with anyone. We simply can't because we never had it in the first place. Use it anonymously and for free. If you want greater amounts of inference or to access the API, you can upgrade to Pro. There's no doubt that AI would change humanity, and we should engage with it, but through mediums that are optionally private, uncensored, and permissionless. Thanks. Hello. Okay, that's amazing. This is time for question right now. We probably have time for one question. Anyone have any burning question at the moment about permission as AI? It's kind of an important part of the... Oh, we'll go. One question. How can you in that system run your own LORAS and your own, how was it called, tuned models. Can you do that? Since you say you do not have the data on your servers. Yeah. So we provide access to open source models. We host those on our service. We have a proxy service. So when you send in your prompt to Venice, it's encrypted, sent via proxy to a decentralized GPU. The response is also encrypted via proxy and sent back to you on your browser. So nothing persists on Venice servers. We don't see your prompt, we don't see the response, and it's never stored. So I cannot upload my own LORAs after I did my own training? You can't upload your own LORuras for training, but just this week, hopefully later today, so I might be front-running myself, we are going live with image-to-image. So you'll be able to upload your own images.", "eventId": "devcon-7", - "slot_start": 1731647400000, - "slot_end": 1731648000000, + "slot_start": 1731564000000, + "slot_end": 1731564600000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Ap05BLrc25kR-WdwGvInSGF6oehwIIAg82A0vs0Krrg", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1kklsZ1YE71cdtzZNkgKNXlsh133eDOoZO3-I29W9u9s", + "resources_slides": "https://drive.google.com/file/d/1oduMhD9MDwrPtDQAGfBx888DIF9nj_f7/view", "speakers": [ - "mikko-ohtamaa" + "teana-baker-taylor" ] }, "vector": [ @@ -739508,10 +737411,8 @@ 0, 0, 0, - 6, - 0, - 0, 0, + 6, 0, 0, 0, @@ -740135,13 +738036,13 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -740281,7 +738182,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -740361,7 +738261,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -740371,6 +738270,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -740521,6 +738421,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -740751,9 +738652,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -740824,8 +738722,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -740839,43 +738737,49 @@ }, { "session": { - "id": "the-evolution-of-zk-from-1985-2013", - "sourceId": "FGXMGH", - "title": "The Evolution of ZK from 1985-2013", - "description": "This session delves into the rich history of zero-knowledge proofs (ZKPs), tracing key milestones from their inception in 1985 to groundbreaking advancements like simulation extractability and the first non-interactive zero-knowledge protocol (NIZK), the first SNARK protocol, etc. While many advances happened within the crypto space, it is beneficial to be aware about the evolution of ZK prior to us inheriting it from the theoretical world.", - "track": "Applied Cryptography", + "id": "the-future-of-eof-layer-1-layer-2-and-beyond", + "sourceId": "9EBQ3H", + "title": "The Future of EOF: Layer 1, Layer 2, and Beyond!", + "description": "While the EVM Object Format provides a mechanism to modernize the EVM, the container format itself provides a stable path for innovation and experimentation within the base and rollup layers of ethereum, as well as rollup layers, and even chain free execution.\r\n\r\nIn this presentation we will show how the structure of the EOF container may be adapted to support these potential use cases.", + "track": "Core Protocol", "type": "Talk", - "expertise": "Expert", - "audience": "Developer", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "history" - ], "tags": [ - "Zero-Knowledge", - "Cryptography", - "history", - "Cryptography", - "Zero-Knowledge" + "Layer 1", + "EVM-equivalent", + "Politics", + "EVM", + "EVM-equivalent", + "Layer 1", + "Politics" ], - "language": "en", - "speakers": [ - "vanishree-rao" + "keywords": [ + "EOF", + "EVM" ], + "duration": 1363, + "language": "en", + "sources_swarmHash": "0ffb833072a1d18d73796d3f2897b4dc1730db7e1b9afca32f958cb51ceb815b", + "sources_youtubeId": "NeKMerFPJoM", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673cc6b5982f234a120cec86", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731656400000, - "slot_end": 1731658200000, + "slot_start": 1731563400000, + "slot_end": 1731565200000, "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1sY_h2GBY4R5mcKYTqc0O1AuTzmygnIH1SdXhzmaDIyE" + "resources_presentation": "https://docs.google.com/presentation/d/1xsXLO6lk8scS1Bau7a1gPEtC1QKpw5GdJrAD2ZppNaI", + "resources_slides": "https://drive.google.com/file/d/1p4Rn8g0ziyzu5jLvAwnPS699M6WjJF_d/view", + "speakers": [ + "danno-ferrin" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -741214,6 +739118,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -741503,7 +739408,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -741633,14 +739537,13 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -741832,6 +739735,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -741949,6 +739853,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -741959,6 +739864,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -742119,7 +740025,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -742184,13 +740089,13 @@ 0, 0, 0, + 2, 0, 0, 0, 2, 0, 0, - 2, 0, 0, 0, @@ -742206,62 +740111,44 @@ }, { "session": { - "id": "the-fixed-rate-flywheel", - "sourceId": "WYWLXV", - "title": "The Fixed Rate Flywheel", - "description": "In the rapidly evolving landscape of modern DeFi, fixed-rate protocols have emerged as a critical component, bridging the gap between traditional finance stability and DeFi innovation. This panel introduces \"The Fixed Rate Flywheel,\" a powerful concept illustrating how fixed rate markets fuel variable lending, create hedging opportunities, and generate high-yield products. Join us to hear experts from DELV Tech, Morpho Labs, Phoenix Labs, and Gauntlet talk about the next evolution of DeFi.", - "track": "Cryptoeconomics", - "type": "Panel", + "id": "the-future-of-layer-2-research-development-and-next-gen-technologies", + "sourceId": "PJQQSR", + "title": "The Future of Layer 2: Research, Development, and Next-Gen Technologies", + "description": "Discussion around L2 blockchain research and development. What are the major challenges for L2s to advance, and what solutions are being explored? What will the L2 space look like next year and beyond? The talk will be illustrated with examples from Arbitrum’s research and development.", + "track": "Layer 2", + "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developper", "featured": false, "doNotRecord": false, "tags": [ - "fixed", - "rate" + "Layer 2s", + "Scalability", + "arbitrum", + "Layer 2s", + "Scalability" ], "keywords": [ - "DeFi", - "Fixed Rates" + "Arbitrum" ], - "duration": 3282, + "duration": 1539, "language": "en", - "sources_swarmHash": "fb92f51e567f58d5275601ba18e31fd5273866fca5fa0ff9479ca09ee5036cdd", - "sources_youtubeId": "RLzBsBudpFA", + "sources_swarmHash": "04837782db7800ae0149069f9ac27bfc65f9d8593413bf75bb9314b9c9604a2f", + "sources_youtubeId": "6GHjgjD9Va8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673485c89dbb7a90e106942c", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731491400000, - "slot_end": 1731495000000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1ng1HvT-kAE4r-IB_k-m3qkQnZ9PMYl3wwR_zkEmF4Fg", - "resources_slides": null, + "slot_start": 1731492000000, + "slot_end": 1731493800000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1j5n0blTsDLltg5bxumMOQ0zvAqbfL-faBMhuzsnBX3k", + "resources_slides": "https://drive.google.com/file/d/1bBY4w4qCHkXs8_-PjrCuA-rFAxny5H8A/view", "speakers": [ - "alex-towle", - "merlin-egalite", - "lucas-manuel", - "violet-vienhage" + "ed-felten" ] }, "vector": [ - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -742269,6 +740156,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -742879,10 +740767,6 @@ 0, 0, 0, - 6, - 6, - 6, - 6, 0, 0, 0, @@ -742902,6 +740786,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -743078,6 +740963,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -743151,6 +741037,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -743495,8 +741382,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -743511,6 +741396,17 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -743559,7 +741455,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -743575,47 +741470,46 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0 ] }, { "session": { - "id": "the-future-of-ai-why-we-need-private-uncensored-permissionless-ai", - "sourceId": "EK8T9X", - "title": "The Future of AI: Why We Need Private, Uncensored, Permissionless AI", - "description": "The current path of AI development leads to a future where a few powerful companies control this transformative technology, with the potential to become the arbiter of truth, manipulate and monetize private user data, and moderate who has access to the future of intelligence.\r\n\r\nNo entity, private or public, should have the power to monopolize or contextualize truth. Open-source, uncensored, and decentralised AI is impervious to political fancy and ideology, and offers a necessary alternative.", - "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "the-future-of-light-clients", + "sourceId": "UL8U8B", + "title": "The Future of Light Clients", + "description": "Ethereum has achieved a remarkable feat: production-ready light clients. There are now at least seven light client projects active on Ethereum today.\r\n\r\nHowever, light clients have kept up with Ethereum’s future, Layer 2s. Implementations for layer 2s have been mostly overlooked. This is due to both the low prioritization of work on light clients and significant technical challenges. In this talk, we will discuss the path to layer 2 light clients and our work to bring them to production in Helios.", + "track": "Layer 2", + "type": "Talk", + "expertise": "Expert", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Censorship Resistance", - "Permissionless", - "Privacy" - ], - "keywords": [ - "AI" + "Layer 2s", + "Light Clients" ], - "duration": 457, + "keywords": [], + "duration": 1503, "language": "en", - "sources_swarmHash": "c5bb2e821c903b3a3b35c03901a6a4ecd4fbfb64f461cc95cf25ec2dcd983d8a", - "sources_youtubeId": "2rVhw5W3kaE", + "sources_swarmHash": "ae42d7faa5d49909983fcbdfc21c6fb48c8961506f5fd90a364aa222c9eea601", + "sources_youtubeId": "SrZBfwnjf7M", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67359da49dbb7a90e17463e3", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67359da49dbb7a90e17463e3.vtt", - "transcript_text": " Hi, guys. I hope you're having a great DevCon. It's great to be here. I've got five minutes, so I'm going to go really fast. Bear with me. So imagine a small group of companies control access to the most powerful machine learning used by billions of people worldwide. They record all your AI conversations, monetize your data, and share it with governments on demand. Everything you share is attached to your identity forever. They record... Whoops. What's going on here? Okay. Sorry about that. Through politicized content policies, their AI models are trained to coddle and redirect you when you explore topics determined to be taboo. They restrict and redact information and influence your thinking based on their view of the truth. Does this sound like science fiction? It's not. It's already happening. The current AI development path leads to a few powerful companies controlling the technology and becoming the arbiter of truth. The race to dominate the consumer AI market is on. OpenAI and by proxy Microsoft have a head start. Their partnership puts AI in the hands of 1.5 billion iPhone users. The Biden administration's policies entrench AI development in the hands of a few powerful entities, accelerating centralization that favors incumbents. It's notable that the newly appointed U.S. Artificial Intelligence Safety and Security Board doesn't include any open source or decentralized AI leaders. I believe AI should be optionally private. Our interactions with AI are personal and intimate. This isn't the polished... This is not the right slide either. Sorry, guys. This isn't the polished social media version of us. It's raw, honest, intellectual exploration. Would you share your diary online? That's the level of vulnerability we expose when we use AI without privacy safeguards. Popular AI tools store your inputs and outputs, and in many cases, the platform owns the outputs you create. As I mentioned earlier, these inputs are attached to your identity. Platforms are vulnerable to hackers, which continually lead to breaches. Does anyone remember Equifax? Information held by governments is equally vulnerable, and the authorities making the privacy rules can't even protect their own data. A recent EU Parliament data breach exposed sensitive personal data of more than 8,000 staffers. Your data doesn't need to be leaked to expose you to manipulation. Cambridge Analytica demonstrated how information can change the tech giants, sorry, the information you share with tech giants can be scraped without your consent to create campaigns designed to influence your views. We need to stop volunteering our data and take control over what we share with AI. I believe AI should be uncensored. Centralized platforms censor according to often opaque content policies, influenced by the values of those who control the platform, hidden behind system prompts that are only revealed through jailbreaking. Users think they're interacting with true machine intelligence, a calculator programmed to do statistical inference on language. In reality, they're engaging with proprietary partiality within guardrails imposed by humans who have their own biases. At best, we receive nonsensical outputs like Gemini producing black vikings. At worst, human adulterated AI can perpetuate the silencing of public discourse. No entity, public or private, should monopolize or contextualize truth. Open access to AI is under threat. Biden's AI executive order requires licenses for large models that restrict the number of parameters allowed. California proposed an AI bill which includes criminalizing certain open source AI developments. The EU's AI Act is more permissive of open source but strict on large scale AI. The Act's author himself has raised concerns that the regulatory bar has been set too high. A stark example of excessive regulation backfiring. France-based Mistral and Meta both recently withheld their latest open source AI models from the EU, preventing an entire region from accessing advancing AI. Politics will play a role in AI. Some push for tight controls, others champion open source development. The back and forth highlights that AI's future should not hinge on political whims. When confidence in our political leaders is waning, do we really trust them to regulate intelligence? AI is being adopted rapidly. A recent Harvard study found that nearly 40% of all U.S. adults between 18 to 64 have used generative AI. But unlike money or social media, we don't need to change our existing behavior, but we do need to start as we intend to continue. We must seek out and use alternatives. Open source permissionless AI, evolved through thoughtful iteration, is impervious to political fancy, ideology, and the antidote to gate-kept, curated, and censored AI. It's pretty easy for a developer to run a small model privately, locally, using a Lama or a similar service. Open source models are rapidly becoming competitive, surpassing closed source models on many benchmarks. You can find a plethora of open source models on Hugging Face. I've listed a few here. For those who can't or don't want to run AI locally, we created Venice, a generative AI platform that embodies the principles of permissionlessness. You can chat with some of the leading open source models too large to run locally, generate images, create and interact with AI characters, and write and debug code, all in private. Venice uses decentralized infrastructure to run the platform. All of your Venice activity is stored only on your browser. Venice never shares your data with anyone. We simply can't because we never had it in the first place. Use it anonymously and for free. If you want greater amounts of inference or to access the API, you can upgrade to Pro. There's no doubt that AI would change humanity, and we should engage with it, but through mediums that are optionally private, uncensored, and permissionless. Thanks. Hello. Okay, that's amazing. This is time for question right now. We probably have time for one question. Anyone have any burning question at the moment about permission as AI? It's kind of an important part of the... Oh, we'll go. One question. How can you in that system run your own LORAS and your own, how was it called, tuned models. Can you do that? Since you say you do not have the data on your servers. Yeah. So we provide access to open source models. We host those on our service. We have a proxy service. So when you send in your prompt to Venice, it's encrypted, sent via proxy to a decentralized GPU. The response is also encrypted via proxy and sent back to you on your browser. So nothing persists on Venice servers. We don't see your prompt, we don't see the response, and it's never stored. So I cannot upload my own LORAs after I did my own training? You can't upload your own LORuras for training, but just this week, hopefully later today, so I might be front-running myself, we are going live with image-to-image. So you'll be able to upload your own images.", + "sources_streamethId": "673486b89dbb7a90e1136426", "eventId": "devcon-7", - "slot_start": 1731564000000, - "slot_end": 1731564600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1kklsZ1YE71cdtzZNkgKNXlsh133eDOoZO3-I29W9u9s", - "resources_slides": null, + "slot_start": 1731493800000, + "slot_end": 1731495600000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/11L_sO6Usnx1os7aiKFPC2mNm1diDnV9Hlo7PETnsic8", + "resources_slides": "https://drive.google.com/file/d/1jADpteHZL5COexS36m7GhgUqaR7Fa8rw/view", "speakers": [ - "teana-baker-taylor" + "noah-citron" ] }, "vector": [ @@ -743625,8 +741519,8 @@ 0, 0, 0, - 6, 0, + 6, 0, 0, 0, @@ -744387,6 +742281,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -744432,6 +742327,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -744486,10 +742382,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -744518,7 +742410,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -744638,7 +742529,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -744937,12 +742827,11 @@ 0, 2, 0, + 2, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -744955,46 +742844,41 @@ }, { "session": { - "id": "the-future-of-eof-layer-1-layer-2-and-beyond", - "sourceId": "9EBQ3H", - "title": "The Future of EOF: Layer 1, Layer 2, and Beyond!", - "description": "While the EVM Object Format provides a mechanism to modernize the EVM, the container format itself provides a stable path for innovation and experimentation within the base and rollup layers of ethereum, as well as rollup layers, and even chain free execution.\r\n\r\nIn this presentation we will show how the structure of the EOF container may be adapted to support these potential use cases.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "the-future-of-web3-grants-learnings-from-studying-30-programs", + "sourceId": "F9YCZY", + "title": "The Future of Web3 Grants: Learnings from Studying 30+ Programs", + "description": "This presentation will cover learnings from studying almost 3 dozen grant programs across multiple chains and ecosystems. I will present an overview of the state of grants across Ethereum as well as Bitcoin, Cardano, Solana, and other chains. I will present on the most pressing challenges for grant operators, feedback from grantees on their experiences, and will present a potential path forward in terms of collective priorities that can help all programs improve.", + "track": "Coordination", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Layer 1", - "EVM-equivalent", - "Politics", - "EVM", - "EVM-equivalent", - "Layer 1", - "Politics" + "capital" ], "keywords": [ - "EOF", - "EVM" + "Grant", + "Allocation", + "Capital" ], - "duration": 1363, + "duration": 437, "language": "en", - "sources_swarmHash": "0ffb833072a1d18d73796d3f2897b4dc1730db7e1b9afca32f958cb51ceb815b", - "sources_youtubeId": "NeKMerFPJoM", + "sources_swarmHash": "53db20665d6eb076c87dcfb4bd938bed30aa84c796cc95e39f06bdc9eff7351c", + "sources_youtubeId": "Vp6ju5k3w3A", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673cc6b5982f234a120cec86", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6736f1cc74749a4b8923840a", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736f1cc74749a4b8923840a.vtt", + "transcript_text": " Hello and good morning, everyone. I'm Eugene Leventhal. I'm currently working on governance at scroll and I'm a research director at MediGov. Today I'll talk about some of the research that we've been working on over the last two years at MetaGov in trying to also get a qualitative understanding of the nature of grant programs in our space. And so we can better interface with grant programs beyond Web3 as well. So today, I know we don't have a lot of time, so I'm going to run through some quick context, some takeaways, and kind of what I'm thinking might be happening as we go into 2025 and beyond. And so to start with some context, you know, as part of this research, as mentioned, we've covered a bunch of different grant programs. This is just a sampling of some of them. So you'll see it's, you know, a lot of the major programs in the Ethereum world, as well as some other L1 ecosystems as well. So we're lucky that we're getting a view well beyond any kind of single set of programs and a lot of these folks have very different priorities so it is really interesting to capture that. And this year as well we expanded to not just cover programs but we also got into covering grant tooling as well. And as you'll see here we aren't covering any of the grant stacks themselves, we kind of chose to focus on tools around the ecosystem that can make it possible for grant operators to perform better, for grantees to better understand how they're performing relative to others, and so that we can all just have a better sense of the data infrastructure around the space. And so I wish I could just point to a slide with a 100% confidence of this is the amount of grants shipped. And that's actually its own challenge is because a lot of the data is fractured and siloed. Not all programs are on chain. Even the ones that are need a little more sense making. There aren't perfect databases. You know, from the programs that we've covered, we've seen over $1.2 billion USD go out. We haven't covered the formal RPGF programs and the Arbitrum and Tyco and some of the other very large programs out there. So, you know, there are multiple efforts, including Open Source Observer and DAO IP5 from MetaGov and DAO Star Star where we are trying to improve this landscape. So I hope at DevCon8, wherever we'll be in the world, whether me, Shinya, or anyone else working on these problems, someone will be able to show off this wonderful new database of clarified information. Let's get into the actual takeaways from studying these programs. One thing is on the mechanism side. We see that basically, you know, perspective grants, I'm giving you money to go try a new thing, and as of more recently, I'm putting in at least some milestones in there. That's kind of the norm right now. And in the last, you know, six to 12 months, we've definitely seen more and more experimentation. I feel like thanks to Optimism, the retro programs are definitely getting the most attention. But if anyone caught Rafa from zk-sync presenting in the GovHub yesterday, you saw like 15 different little experiments running. There are a lot of cool mechanisms being created. I think Ohwaki in his book charted around 50 plus potential mechanisms to utilize. And we're going to definitely see a lot more experimentation in the coming months and years. Also looking backwards, recognizing that we all want to better understand our impact, but what we actually mean by impact and how we go about studying it has been a huge challenge. And so at the very least, as a space, there's that recognition that we want to understand the impact. We don't feel like we really understand the value of all the grants shipped, but we don't know how to do it. And related to that is also thinking about, well, great, you gave someone a grant. Was that actually enough to help them succeed? Right? A lot of grantees actually need more than just money to ensure that they're able to accomplish their desired outcomes and impact. And as mentioned, we're seeing more tooling and infra. And as we start thinking about the future, you know, just the tools themselves are great, but they're not always enough. We need more documentation of what's happening. We need more both qualitative and quantitative analyses. We want to see more experiments happening. And I personally think in this next year, we're going to see a strong embracement of pluralistic grant programs. So a single funding entity might have five to ten separate programs running with various levels of coordination between these programs. single funding entity might have five to ten separate programs running with various levels of coordination between these programs. And, you know, I think we will see kind of some data and operational coordination across programs, but then letting each program kind of run their own thing. In the context of impact, and if anyone had a chance to drop by the grant hub yesterday, you know, we had a whole day focused on impact because, again, there is this recognition. And so some groups are getting open to embracing this program evaluation model where we better understand what are the outcomes, right, the milestones you commit to, what outcomes do those produce, and over time, how does that play out into impact? But we need more accountability of the grant programs, of the grantees, and throughout. So definitely looking for more improvement and adoption of some of these tools and use cases. With that, unfortunately, the time is up for today, so I'll just shift over to questions, and if anyone wants to geek out on grant programs later, please always feel free to find me. Thank you so much, Eugene. We see that there are no questions yet. So please scan the QR code to submit your questions. But while I have Eugene on stage, I'm really curious, are there any key takeaways from grant programs run by other chains that you think the grant programs in the Ethereum ecosystem can really learn a lot from? Yeah, I think the first thing, and this isn't exclusive to Web3 individually, you know, we have already run some events where we've had traditional foundations and re-granting non-profits show up. I think it's just the recognition that impact measurement is hard for organizations that have been doing this for a century. So we shouldn't feel bad that it's hard for us. And especially going back to one of the questions that I saw Shinya got, right? A lot of grant programs might only have one person running the entire program full time. That's not enough to both build really good systems, build relationships and figure out your impact. So if you actually are trying to deeply commit to doing grants, you have to be willing to commit to the operational overhead that inherently comes with doing it well. And I see the questions pouring in now. So look at the first upvoted one, tips and tricks to find good grants. Yeah, so I think for the most part, the days of free money are over. You know, for those who have been applying, you could just like show up, vaguely express interest, and then magically money appears in your wallet. At this point, you definitely need to focus on having a clear vision of what are you trying to accomplish and what does that mean for the ecosystem that you're applying to. Don't focus on spray and pray applying to every program you can. Pick one, maybe two or three that you could deeply commit with and work with and talk to them about, well, how does this help you? How can this be a two way relationship? Thank you so much, Eugene. Unfortunately, that's all the time we have. So let's put our hands together for him.", "eventId": "devcon-7", - "slot_start": 1731563400000, - "slot_end": 1731565200000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1xsXLO6lk8scS1Bau7a1gPEtC1QKpw5GdJrAD2ZppNaI", - "resources_slides": null, + "slot_start": 1731641400000, + "slot_end": 1731642000000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1kRi6qfFHeK8txYMq58KLUaOTV4stHccKNP0m-WyZWWg", + "resources_slides": "https://drive.google.com/file/d/1C0x3d-0uCFsO6Fi8OyoxrZyKvdMWX9Nl/view", "speakers": [ - "danno-ferrin" + "eugene-leventhal" ] }, "vector": [ @@ -745002,7 +742886,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -745010,6 +742893,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -745337,7 +743221,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -745638,6 +743521,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -745764,7 +743648,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -745956,7 +743839,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -746075,7 +743957,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -746086,7 +743967,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -746250,6 +744130,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -746316,12 +744197,10 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, + 2, 0, 0, 0, @@ -746334,41 +744213,49 @@ }, { "session": { - "id": "the-future-of-layer-2-research-development-and-next-gen-technologies", - "sourceId": "PJQQSR", - "title": "The Future of Layer 2: Research, Development, and Next-Gen Technologies", - "description": "Discussion around L2 blockchain research and development. What are the major challenges for L2s to advance, and what solutions are being explored? What will the L2 space look like next year and beyond? The talk will be illustrated with examples from Arbitrum’s research and development.", - "track": "Layer 2", + "id": "the-history-and-philosophy-of-cypherpunk", + "sourceId": "8JVYCQ", + "title": "The History and Philosophy of Cypherpunk", + "description": "Rather than bend to knee to Donald Trump, the goal of the cypherpunk movement is to abolish the state in order to maximize human freedom via privacy-enhancing decentralized technologie. After reviewing the history of this deviant group of programmers in the 1980s, what philosophical and technical lessons do the cypherpunks hold for Ethereum today? Censorship-resistant digital cash was only one the start, and the missing parts of their legacy: mixnets and anonymous credentials for identity.", + "track": "Cypherpunk & Privacy", "type": "Talk", - "expertise": "Intermediate", - "audience": "Developper", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", - "Scalability", - "arbitrum", - "Layer 2s", - "Scalability" + "Anonymity", + "Censorship Resistance", + "Digital Sovereignty", + "cypherpunk", + "mixnet", + "cryptoanarchy", + "Anonymity", + "Politics", + "Values" ], "keywords": [ - "Arbitrum" + "mixnets", + "cypherpunk", + "cryptoanarchist" ], - "duration": 1539, + "duration": 1555, "language": "en", - "sources_swarmHash": "04837782db7800ae0149069f9ac27bfc65f9d8593413bf75bb9314b9c9604a2f", - "sources_youtubeId": "6GHjgjD9Va8", + "sources_swarmHash": "89ddf65d60e9d080ec70f2820e3674c757d151a0741047ec721bd81ba034a27e", + "sources_youtubeId": "OZwG_Tx1hdA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673351433a168eb5355b5f3c", "eventId": "devcon-7", - "slot_start": 1731492000000, - "slot_end": 1731493800000, + "slot_start": 1731407400000, + "slot_end": 1731409200000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1j5n0blTsDLltg5bxumMOQ0zvAqbfL-faBMhuzsnBX3k", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1ovH3oyNrS_ZaZbKCeLkHxgPjrRCAzaWP7RVIf9TRkOo", + "resources_slides": "https://drive.google.com/file/d/1PuUJE7912QacTfZSG5XsYskVHL4tIxrY/view", "speakers": [ - "ed-felten" + "max-hampshire", + "harry-halpin", + "iness-ben-guirat" ] }, "vector": [ @@ -746377,8 +744264,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -746738,6 +744623,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -747011,29 +744897,10 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -747166,6 +745033,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -747287,6 +745155,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -747474,6 +745343,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -747518,6 +745388,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -747624,45 +745495,38 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -747686,7 +745550,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -747698,47 +745561,77 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, 2, 0, 0, 0, 0, + 0, + 0, + 0, 0 ] }, { "session": { - "id": "the-future-of-light-clients", - "sourceId": "UL8U8B", - "title": "The Future of Light Clients", - "description": "Ethereum has achieved a remarkable feat: production-ready light clients. There are now at least seven light client projects active on Ethereum today.\r\n\r\nHowever, light clients have kept up with Ethereum’s future, Layer 2s. Implementations for layer 2s have been mostly overlooked. This is due to both the low prioritization of work on light clients and significant technical challenges. In this talk, we will discuss the path to layer 2 light clients and our work to bring them to production in Helios.", - "track": "Layer 2", + "id": "the-hunt-for-impactful-use-cases-from-the-crypto-for-good-fund-what-15-blockchain-pilots-revealed-in-emerging-markets", + "sourceId": "TV3QRD", + "title": "The Hunt for Impactful Use Cases from the Crypto For Good Fund: What 15 Blockchain Pilots Revealed in Emerging Markets", + "description": "* This talk will provide a snapshot of the some of most impactful real world uses of web3 in emerging markets covering the additionality added by blockchain. \r\n* Additionally, the talk will deep-dive into the insights and results of 3 web3 pilots funded by Mercy Corps Ventures in Africa & Latin America, showcasing how web3 is addressing the needs of financially underserved and climate vulnerable populations.", + "track": "Real World Ethereum", "type": "Talk", - "expertise": "Expert", - "audience": "Research", + "expertise": "Beginner", + "audience": "Product", "featured": false, "doNotRecord": false, + "keywords": [ + "Emerging Markets", + "Africa", + "Latin America" + ], "tags": [ - "Layer 2s", - "Light Clients" + "Use Cases", + "RWA", + "Ethereum for Good", + "latin", + "america", + "Ethereum for Good", + "RWA", + "Use Cases" ], - "keywords": [], - "duration": 1503, "language": "en", - "sources_swarmHash": "ae42d7faa5d49909983fcbdfc21c6fb48c8961506f5fd90a364aa222c9eea601", - "sources_youtubeId": "SrZBfwnjf7M", + "sources_swarmHash": "43e1e9395b8434ec533fbc43720e541e8b9bbd1ba59495a15958edbbc71e3873", + "sources_youtubeId": "180uJuutaYQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673486b89dbb7a90e1136426", - "eventId": "devcon-7", - "slot_start": 1731493800000, - "slot_end": 1731495600000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/11L_sO6Usnx1os7aiKFPC2mNm1diDnV9Hlo7PETnsic8", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "noah-citron" - ] + "timothy-asiimwe" + ], + "eventId": "devcon-7", + "slot_start": 1731641400000, + "slot_end": 1731643200000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1vwkrczNxrHXLNfycNjtYzjJo4jXX3Z2RUJ7NWPh4OMQ", + "resources_slides": "https://drive.google.com/file/d/1M_H16XPRyv40wQjr4Nz7RTEixXFZ95vf/view" }, "vector": [ 0, @@ -747747,7 +745640,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -748381,14 +746273,10 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, + 6, 0, 0, 0, @@ -748512,7 +746400,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -748558,7 +746445,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -748575,6 +746461,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -748621,6 +746508,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -748648,6 +746536,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -748996,6 +746885,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -749054,11 +746945,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, 0, 2, 0, @@ -749069,52 +746960,53 @@ 0, 0, 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "the-future-of-web3-grants-learnings-from-studying-30-programs", - "sourceId": "F9YCZY", - "title": "The Future of Web3 Grants: Learnings from Studying 30+ Programs", - "description": "This presentation will cover learnings from studying almost 3 dozen grant programs across multiple chains and ecosystems. I will present an overview of the state of grants across Ethereum as well as Bitcoin, Cardano, Solana, and other chains. I will present on the most pressing challenges for grant operators, feedback from grantees on their experiences, and will present a potential path forward in terms of collective priorities that can help all programs improve.", - "track": "Coordination", - "type": "Lightning Talk", + "id": "the-long-con-pig-butchering-drainers-and-job-scams", + "sourceId": "STMCNZ", + "title": "The Long Con: Pig Butchering, Drainers, and Job Scams", + "description": "I'll discuss the different types of malicious actors from low-skilled script kiddies to government-sanctioned advanced persistent threats. This presentation will include an overview on drainer groups and how sophisticated scammers string along their victims, fattening them up before extracting as much value as they can, as well as the nefarious practices these operations employ. Finally, I'll focus on the recent rise of job scams that have been targeting builders and employers alike.", + "track": "Security", + "type": "Talk", "expertise": "Beginner", "audience": "Community", "featured": false, "doNotRecord": false, - "tags": [ - "capital" - ], "keywords": [ - "Grant", - "Allocation", - "Capital" + "threat", + "intelligence" + ], + "tags": [ + "Security", + "Custody", + "threat", + "intelligence", + "Custody", + "Security" ], - "duration": 437, "language": "en", - "sources_swarmHash": "53db20665d6eb076c87dcfb4bd938bed30aa84c796cc95e39f06bdc9eff7351c", - "sources_youtubeId": "Vp6ju5k3w3A", + "sources_swarmHash": "9b9842b85cbfb2249efb51ecd459e8c85442648743e788785f6907dc66ffa381", + "sources_youtubeId": "XS2YF2WgIfU", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736f1cc74749a4b8923840a", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736f1cc74749a4b8923840a.vtt", - "transcript_text": " Hello and good morning, everyone. I'm Eugene Leventhal. I'm currently working on governance at scroll and I'm a research director at MediGov. Today I'll talk about some of the research that we've been working on over the last two years at MetaGov in trying to also get a qualitative understanding of the nature of grant programs in our space. And so we can better interface with grant programs beyond Web3 as well. So today, I know we don't have a lot of time, so I'm going to run through some quick context, some takeaways, and kind of what I'm thinking might be happening as we go into 2025 and beyond. And so to start with some context, you know, as part of this research, as mentioned, we've covered a bunch of different grant programs. This is just a sampling of some of them. So you'll see it's, you know, a lot of the major programs in the Ethereum world, as well as some other L1 ecosystems as well. So we're lucky that we're getting a view well beyond any kind of single set of programs and a lot of these folks have very different priorities so it is really interesting to capture that. And this year as well we expanded to not just cover programs but we also got into covering grant tooling as well. And as you'll see here we aren't covering any of the grant stacks themselves, we kind of chose to focus on tools around the ecosystem that can make it possible for grant operators to perform better, for grantees to better understand how they're performing relative to others, and so that we can all just have a better sense of the data infrastructure around the space. And so I wish I could just point to a slide with a 100% confidence of this is the amount of grants shipped. And that's actually its own challenge is because a lot of the data is fractured and siloed. Not all programs are on chain. Even the ones that are need a little more sense making. There aren't perfect databases. You know, from the programs that we've covered, we've seen over $1.2 billion USD go out. We haven't covered the formal RPGF programs and the Arbitrum and Tyco and some of the other very large programs out there. So, you know, there are multiple efforts, including Open Source Observer and DAO IP5 from MetaGov and DAO Star Star where we are trying to improve this landscape. So I hope at DevCon8, wherever we'll be in the world, whether me, Shinya, or anyone else working on these problems, someone will be able to show off this wonderful new database of clarified information. Let's get into the actual takeaways from studying these programs. One thing is on the mechanism side. We see that basically, you know, perspective grants, I'm giving you money to go try a new thing, and as of more recently, I'm putting in at least some milestones in there. That's kind of the norm right now. And in the last, you know, six to 12 months, we've definitely seen more and more experimentation. I feel like thanks to Optimism, the retro programs are definitely getting the most attention. But if anyone caught Rafa from zk-sync presenting in the GovHub yesterday, you saw like 15 different little experiments running. There are a lot of cool mechanisms being created. I think Ohwaki in his book charted around 50 plus potential mechanisms to utilize. And we're going to definitely see a lot more experimentation in the coming months and years. Also looking backwards, recognizing that we all want to better understand our impact, but what we actually mean by impact and how we go about studying it has been a huge challenge. And so at the very least, as a space, there's that recognition that we want to understand the impact. We don't feel like we really understand the value of all the grants shipped, but we don't know how to do it. And related to that is also thinking about, well, great, you gave someone a grant. Was that actually enough to help them succeed? Right? A lot of grantees actually need more than just money to ensure that they're able to accomplish their desired outcomes and impact. And as mentioned, we're seeing more tooling and infra. And as we start thinking about the future, you know, just the tools themselves are great, but they're not always enough. We need more documentation of what's happening. We need more both qualitative and quantitative analyses. We want to see more experiments happening. And I personally think in this next year, we're going to see a strong embracement of pluralistic grant programs. So a single funding entity might have five to ten separate programs running with various levels of coordination between these programs. single funding entity might have five to ten separate programs running with various levels of coordination between these programs. And, you know, I think we will see kind of some data and operational coordination across programs, but then letting each program kind of run their own thing. In the context of impact, and if anyone had a chance to drop by the grant hub yesterday, you know, we had a whole day focused on impact because, again, there is this recognition. And so some groups are getting open to embracing this program evaluation model where we better understand what are the outcomes, right, the milestones you commit to, what outcomes do those produce, and over time, how does that play out into impact? But we need more accountability of the grant programs, of the grantees, and throughout. So definitely looking for more improvement and adoption of some of these tools and use cases. With that, unfortunately, the time is up for today, so I'll just shift over to questions, and if anyone wants to geek out on grant programs later, please always feel free to find me. Thank you so much, Eugene. We see that there are no questions yet. So please scan the QR code to submit your questions. But while I have Eugene on stage, I'm really curious, are there any key takeaways from grant programs run by other chains that you think the grant programs in the Ethereum ecosystem can really learn a lot from? Yeah, I think the first thing, and this isn't exclusive to Web3 individually, you know, we have already run some events where we've had traditional foundations and re-granting non-profits show up. I think it's just the recognition that impact measurement is hard for organizations that have been doing this for a century. So we shouldn't feel bad that it's hard for us. And especially going back to one of the questions that I saw Shinya got, right? A lot of grant programs might only have one person running the entire program full time. That's not enough to both build really good systems, build relationships and figure out your impact. So if you actually are trying to deeply commit to doing grants, you have to be willing to commit to the operational overhead that inherently comes with doing it well. And I see the questions pouring in now. So look at the first upvoted one, tips and tricks to find good grants. Yeah, so I think for the most part, the days of free money are over. You know, for those who have been applying, you could just like show up, vaguely express interest, and then magically money appears in your wallet. At this point, you definitely need to focus on having a clear vision of what are you trying to accomplish and what does that mean for the ecosystem that you're applying to. Don't focus on spray and pray applying to every program you can. Pick one, maybe two or three that you could deeply commit with and work with and talk to them about, well, how does this help you? How can this be a two way relationship? Thank you so much, Eugene. Unfortunately, that's all the time we have. So let's put our hands together for him.", - "eventId": "devcon-7", - "slot_start": 1731641400000, - "slot_end": 1731642000000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1kRi6qfFHeK8txYMq58KLUaOTV4stHccKNP0m-WyZWWg", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "eugene-leventhal" - ] + "luker" + ], + "eventId": "devcon-7", + "slot_start": 1731582000000, + "slot_end": 1731583800000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1dFgaih8CwwDPKj_GGRG-nwZ_b7MobKt9l-QDbYxwOPk", + "resources_slides": "https://drive.google.com/file/d/1ZLi23xoIspqqFivHaw5dxbXYpID30K83/view" }, "vector": [ + 6, 0, 0, 0, @@ -749126,15 +747018,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -749756,7 +747639,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -749767,6 +747649,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -749871,6 +747754,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -750126,6 +748010,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -750342,6 +748228,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -750368,7 +748255,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -750451,58 +748337,50 @@ }, { "session": { - "id": "the-history-and-philosophy-of-cypherpunk", - "sourceId": "8JVYCQ", - "title": "The History and Philosophy of Cypherpunk", - "description": "Rather than bend to knee to Donald Trump, the goal of the cypherpunk movement is to abolish the state in order to maximize human freedom via privacy-enhancing decentralized technologie. After reviewing the history of this deviant group of programmers in the 1980s, what philosophical and technical lessons do the cypherpunks hold for Ethereum today? Censorship-resistant digital cash was only one the start, and the missing parts of their legacy: mixnets and anonymous credentials for identity.", - "track": "Cypherpunk & Privacy", - "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "the-longevity-acceleration-roadmap-a-technical-plan-to-solve-aging", + "sourceId": "V9BA8B", + "title": "The Longevity Acceleration Roadmap: A Technical Plan to Solve Aging", + "description": "The Longevity Acceleration Roadmap: A Technical Plan to Solve Aging", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Anonymity", - "Censorship Resistance", - "Digital Sovereignty", - "cypherpunk", - "mixnet", - "cryptoanarchy", - "Anonymity", - "Politics", - "Values" + "DeSci", + "e/acc" ], "keywords": [ - "mixnets", - "cypherpunk", - "cryptoanarchist" + "Longevity" ], - "duration": 1555, + "duration": 476, "language": "en", - "sources_swarmHash": "89ddf65d60e9d080ec70f2820e3674c757d151a0741047ec721bd81ba034a27e", - "sources_youtubeId": "OZwG_Tx1hdA", + "sources_swarmHash": "23706b226f61f01a6d2ee5fa74716b3f1521fb6b40769f99b9662a2e37344e20", + "sources_youtubeId": "yT7L3bPpbEw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673351433a168eb5355b5f3c", + "sources_streamethId": "67357e839dbb7a90e1096ef2", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67357e839dbb7a90e1096ef2.vtt", + "transcript_text": " Take it away. Hi, my name is Nathan Chang, and I'll be talking about the Longevity Acceleration Roadmap, a technical plan to solve aging. This roadmap is a foundational project at the LBF and it's intended to help guide newcomers to the longevity space. I can only do a speed run now, but if you're interested in the full thing, check out the link below. Okay, so the LBF is a non-profit that I co-founded to mobilize the world's top talent to work on solving aging. But why work on aging? Well, simply because life is good and death is bad. And if you ask what is the thing that causes the most death, it's aging. So by far, aging causes the most death. 75% of deaths worldwide are caused by aging. And this comes in the form of age-related diseases like heart disease, cancers, Alzheimer's, etc. And this is an emergency. All 8 billion humans are on an exponential trajectory towards physical and cognitive decline and ultimately death. But if we can solve aging, we can envision a future where people can live for as long as they want in peak health and function. But if you want to see this future personally, we need to do something about aging today. So where's the plan to solve aging? Well, when we first started, we couldn't find any clear plan to solve aging. So we had to make one on our own. And in our plan, we wanted to have three key features. So one, we wanted to exclusively focus on direct research and technology paths that could solve aging completely. And two, we wanted to focus on specific, well-defined key technical objectives. And three, we wanted it to be actionable. So have a good overview of projects in the space so that people could easily join or start them or fund them. Okay, so we got a lot of input from scientists, researchers, and entrepreneurs in the field and we identified three main strategies that could plausibly solve aging. So one is biostasis. This means pausing biological time and this is typically with cryopreservation at low temperature or chemical fixation. Two, replacement, so replacing old parts with young and most promising full body transplants plus gradual brain replacement. And three, advanced bioengineering, so understanding and modeling biology and aging and also developing genetic and cellular engineering tools. aging and also developing genetic and cellular engineering tools. Okay. So one mental model to kind of understand these three technology paths is a two by two matrix. So in one axis, you're trying to solve aging or by time. And the other axis, you're either trying to understand aging or bypass aging like the complexity of aging altogether. And just as a side note, some of these technologies can also be thought of general defensive biomedical technologies. So for example, in like replacement could be useful for things beyond aging. So if you got in a car accident, replacement could be life-saving. Okay, so let's take a look at biostasis. So the strategy here is to pause biological time until the future where medical that, but also to prevent thermal fracturing. So when you're trying to bring a body or a brain from low temperature back to room temperature. So there's a number of different projects and startups in the space, but there still needs to be a lot of work to be done. Okay, let's talk about replacement. So this strategy is really divided into two parts. So there's replacing the body with a clone body and knockout of the brain. But then, too, for the brain, you gradually replace the brain with young neural tissue graphs, bit by bit, kind of like in the style of the ship of Theseus. Now, in the body, this really has only become within reach since 2018, when Chinese scientists demonstrated the first successful cloning of primates. And it's plausible that this could be extended to humans. Okay, but you can create a clone body, but you don't want to create a brain at the same time a person so you'll need to construct a genetic construct to ensure that the capacity to have consciousness never forms in the cloned embryo. And there's actually already a natural proof of concept for this. It's a birth defect called hydra and encephaly, where cerebral hemispheres never form. And these bodies actually never have consciousness, but can develop to full maturity. And last, to put it together, you have to do some sort of head transplant. And these have been done experimentally in the past in mice, dogs, and monkeys, with survival in some cases lasting days to months, but still needs to be improved. Okay, so let's move on to the brain. As I said before, you can gradually replace the brain with engineered neural tissue graphs made from a patient's own iPSC stem cells. And neuroplasticity of the brain allows migration of brain functions away from damaged areas or if done slowly. And just recently, the US ARPA-H program hired the leading researcher in brain replacement, Jean Haber, to lead a $100 million moonshot program in this strategy. There's a number of key technical objectives in our V1 roadmap. Most of the hard stuff is in the brain, but also in spinal cord reconnection. And there's a number of startups working in this field, but most are still in stealth. Okay, we also asked researchers to estimate the time and cost to get a reasonable attempt on this roadmap, and they came up with a figure of $3.6 billion in about 10 years. they came up with a figure of $3.6 billion in about 10 years. Okay, last, bioengineering. So this is the longest and most uncertain road. It will require the convergence of four different prongs. So one, large-scale data collection, something like a protein data bank, but much larger scale. Two, computational modeling, which could include AI models. And then three, design of genetic or cellular interventions. And four, delivery of genetic interventions or cells. Okay, next steps. So clearly, there's a lot of work to be done to solve aging. But unfortunately, there's very few people or resources devoted to trying to solve aging. But unfortunately, there's very few people or resources devoted to trying to solve aging. So if you feel an intense desire to defeat aging, definitely join the LBF. We're the biggest community focused exclusively on building for indefinite lifespan extension. We run highly selective intensive workshop retreats. Our next one is in March in Berkeley, California. But fighting aging is not just for biotechs and scientists. We need everyone to get society aligned on allocating Apollo program level resources to fighting aging. And so if this resonates with you, I'd encourage you to also join the vitalism community where we're building a social political movement to make fighting aging and death humanity's number one priority. And our community is organizing the biggest longevity event in the world. It's going to be a two-month pop-up longevity city in Berkeley, California.", "eventId": "devcon-7", - "slot_start": 1731407400000, - "slot_end": 1731409200000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1ovH3oyNrS_ZaZbKCeLkHxgPjrRCAzaWP7RVIf9TRkOo", - "resources_slides": null, + "slot_start": 1731557580000, + "slot_end": 1731558000000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/160SSgpDZHkjg4YniAuH3mYD1hx7hZuv_Qp2ip0zoRso", + "resources_slides": "", "speakers": [ - "max-hampshire", - "harry-halpin", - "iness-ben-guirat" + "nathan-cheng" ] }, "vector": [ + 0, + 6, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -750862,7 +748740,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -751139,10 +749016,9 @@ 0, 0, 0, - 6, - 6, 0, 0, + 6, 0, 0, 0, @@ -751274,7 +749150,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -751298,7 +749173,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -751372,7 +749246,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -751383,6 +749256,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -751396,7 +749270,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -751405,6 +749278,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -751585,7 +749460,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -751630,7 +749504,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -751751,8 +749624,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -751816,11 +749687,12 @@ 2, 0, 0, + 2, + 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -751833,45 +749705,46 @@ }, { "session": { - "id": "the-hunt-for-impactful-use-cases-from-the-crypto-for-good-fund-what-15-blockchain-pilots-revealed-in-emerging-markets", - "sourceId": "TV3QRD", - "title": "The Hunt for Impactful Use Cases from the Crypto For Good Fund: What 15 Blockchain Pilots Revealed in Emerging Markets", - "description": "* This talk will provide a snapshot of the some of most impactful real world uses of web3 in emerging markets covering the additionality added by blockchain. \r\n* Additionally, the talk will deep-dive into the insights and results of 3 web3 pilots funded by Mercy Corps Ventures in Africa & Latin America, showcasing how web3 is addressing the needs of financially underserved and climate vulnerable populations.", - "track": "Real World Ethereum", + "id": "the-next-700-evm-languages", + "sourceId": "QE7RWH", + "title": "The Next 700 EVM Languages", + "description": "What is the role of programming languages in helping smart contracts become reliable and scalable technology? Are our current languages for the EVM up to the task? Has Ethereum lost the lead in this regard?\r\nThis talk explores these questions and proposes a roadmap for the development of the next generation of smart contract languages for the EVM.", + "track": "Developer Experience", "type": "Talk", - "expertise": "Beginner", - "audience": "Product", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "keywords": [ - "Emerging Markets", - "Africa", - "Latin America" + "programming languages", + "formal verification", + "smart contracts" ], "tags": [ - "Use Cases", - "RWA", - "Ethereum for Good", - "latin", - "america", - "Ethereum for Good", - "RWA", - "Use Cases" + "Languages", + "Formal Verification", + "smart", + "contracts" ], "language": "en", + "sources_swarmHash": "", + "sources_youtubeId": "", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "timothy-asiimwe" + "francisco-giordano" ], "eventId": "devcon-7", - "slot_start": 1731641400000, - "slot_end": 1731643200000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1vwkrczNxrHXLNfycNjtYzjJo4jXX3Z2RUJ7NWPh4OMQ" + "slot_start": 1731580200000, + "slot_end": 1731582000000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1xFEtAafqxxm1b1UAUHGb8bnoWg9x6qZQdGRk_3lPM8Y", + "resources_slides": "https://drive.google.com/file/d/1el0gFJAWBYjssHUS2rHQ_GupEbyJIkdK/view" }, "vector": [ - 0, - 0, - 0, 0, 0, 0, @@ -752513,11 +750386,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -752699,7 +750572,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -752718,6 +750590,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -752746,7 +750619,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -752774,7 +750646,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -752819,6 +750690,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -752883,6 +750755,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -753125,8 +750999,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -753182,6 +751054,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -753191,8 +751064,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -753205,40 +751076,46 @@ }, { "session": { - "id": "the-long-con-pig-butchering-drainers-and-job-scams", - "sourceId": "STMCNZ", - "title": "The Long Con: Pig Butchering, Drainers, and Job Scams", - "description": "I'll discuss the different types of malicious actors from low-skilled script kiddies to government-sanctioned advanced persistent threats. This presentation will include an overview on drainer groups and how sophisticated scammers string along their victims, fattening them up before extracting as much value as they can, as well as the nefarious practices these operations employ. Finally, I'll focus on the recent rise of job scams that have been targeting builders and employers alike.", - "track": "Security", + "id": "the-next-era-sequencing-and-its-real-impact-on-app-users", + "sourceId": "9M78AK", + "title": "The Next Era: Sequencing and Its Real Impact on App Users", + "description": "This talk will discuss app sequencing products which were developed to enhance decentralization and security via distributed transaction ordering with independent sequencing (native Mainnet L2 sequencers i.e. Base, OP) and the impact to end users and applications. It will also discuss the tradeoffs of LVR, shared sequencing, and app-specific sequencing.", + "track": "Usability", "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "expertise": "Intermediate", + "audience": "Product", "featured": false, "doNotRecord": false, - "keywords": [ - "threat", - "intelligence" - ], "tags": [ - "Security", - "Custody", - "threat", - "intelligence", - "Custody", - "Security" + "Layer 2s", + "User Experience", + "Transaction fees mechanisms", + "sequencer", + "Layer 2s", + "Transaction fees mechanisms", + "User Experience" ], - "language": "en", - "speakers": [ - "luker" + "keywords": [ + "Sequencing" ], + "duration": 975, + "language": "en", + "sources_swarmHash": "707cb042ec5704ebd467c773dedb087d6fc5a3c474c0c41441a2ed12ac9ec02d", + "sources_youtubeId": "-S2rlhSUHZY", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731582000000, - "slot_end": 1731583800000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1dFgaih8CwwDPKj_GGRG-nwZ_b7MobKt9l-QDbYxwOPk" + "slot_start": 1731405600000, + "slot_end": 1731407400000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1l63vZZz_0RN-aU0hwjhmdAat5Fq0OFy7UoMYiS3KJxc", + "resources_slides": "https://drive.google.com/file/d/1EZcCr_PB3hKeryX58xnCfgzh5KsXRnOK/view", + "speakers": [ + "tina-haibodi" + ] }, "vector": [ - 6, 0, 0, 0, @@ -753247,6 +751124,8 @@ 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -753989,8 +751868,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -754003,6 +751880,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -754026,6 +751904,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -754051,6 +751930,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -754096,6 +751976,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -754245,8 +752126,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -754464,7 +752343,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -754547,6 +752425,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -754561,12 +752440,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0 @@ -754574,46 +752447,39 @@ }, { "session": { - "id": "the-longevity-acceleration-roadmap-a-technical-plan-to-solve-aging", - "sourceId": "V9BA8B", - "title": "The Longevity Acceleration Roadmap: A Technical Plan to Solve Aging", - "description": "The Longevity Acceleration Roadmap: A Technical Plan to Solve Aging", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "", - "audience": "Engineering", + "id": "the-next-generation-of-decentralized-governance", + "sourceId": "WUSAHA", + "title": "The Next Generation of Decentralized Governance", + "description": "In this talk, tracheoptryx will share thoughts on what will define the next phase of decentralized governance and how that has informed the design of EigenGov, EigenLayer’s forthcoming governance system.", + "track": "Coordination", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, - "tags": [ - "DeSci", - "e/acc" - ], + "tags": [], "keywords": [ - "Longevity" + "see", + "doc" ], - "duration": 476, + "duration": 1629, "language": "en", - "sources_swarmHash": "23706b226f61f01a6d2ee5fa74716b3f1521fb6b40769f99b9662a2e37344e20", - "sources_youtubeId": "yT7L3bPpbEw", + "sources_swarmHash": "75a12cae9fadbaeaba434231a49a634d15b4251288154859b4667cd19622b603", + "sources_youtubeId": "VhkP2OIwIFY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67357e839dbb7a90e1096ef2", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67357e839dbb7a90e1096ef2.vtt", - "transcript_text": " Take it away. Hi, my name is Nathan Chang, and I'll be talking about the Longevity Acceleration Roadmap, a technical plan to solve aging. This roadmap is a foundational project at the LBF and it's intended to help guide newcomers to the longevity space. I can only do a speed run now, but if you're interested in the full thing, check out the link below. Okay, so the LBF is a non-profit that I co-founded to mobilize the world's top talent to work on solving aging. But why work on aging? Well, simply because life is good and death is bad. And if you ask what is the thing that causes the most death, it's aging. So by far, aging causes the most death. 75% of deaths worldwide are caused by aging. And this comes in the form of age-related diseases like heart disease, cancers, Alzheimer's, etc. And this is an emergency. All 8 billion humans are on an exponential trajectory towards physical and cognitive decline and ultimately death. But if we can solve aging, we can envision a future where people can live for as long as they want in peak health and function. But if you want to see this future personally, we need to do something about aging today. So where's the plan to solve aging? Well, when we first started, we couldn't find any clear plan to solve aging. So we had to make one on our own. And in our plan, we wanted to have three key features. So one, we wanted to exclusively focus on direct research and technology paths that could solve aging completely. And two, we wanted to focus on specific, well-defined key technical objectives. And three, we wanted it to be actionable. So have a good overview of projects in the space so that people could easily join or start them or fund them. Okay, so we got a lot of input from scientists, researchers, and entrepreneurs in the field and we identified three main strategies that could plausibly solve aging. So one is biostasis. This means pausing biological time and this is typically with cryopreservation at low temperature or chemical fixation. Two, replacement, so replacing old parts with young and most promising full body transplants plus gradual brain replacement. And three, advanced bioengineering, so understanding and modeling biology and aging and also developing genetic and cellular engineering tools. aging and also developing genetic and cellular engineering tools. Okay. So one mental model to kind of understand these three technology paths is a two by two matrix. So in one axis, you're trying to solve aging or by time. And the other axis, you're either trying to understand aging or bypass aging like the complexity of aging altogether. And just as a side note, some of these technologies can also be thought of general defensive biomedical technologies. So for example, in like replacement could be useful for things beyond aging. So if you got in a car accident, replacement could be life-saving. Okay, so let's take a look at biostasis. So the strategy here is to pause biological time until the future where medical that, but also to prevent thermal fracturing. So when you're trying to bring a body or a brain from low temperature back to room temperature. So there's a number of different projects and startups in the space, but there still needs to be a lot of work to be done. Okay, let's talk about replacement. So this strategy is really divided into two parts. So there's replacing the body with a clone body and knockout of the brain. But then, too, for the brain, you gradually replace the brain with young neural tissue graphs, bit by bit, kind of like in the style of the ship of Theseus. Now, in the body, this really has only become within reach since 2018, when Chinese scientists demonstrated the first successful cloning of primates. And it's plausible that this could be extended to humans. Okay, but you can create a clone body, but you don't want to create a brain at the same time a person so you'll need to construct a genetic construct to ensure that the capacity to have consciousness never forms in the cloned embryo. And there's actually already a natural proof of concept for this. It's a birth defect called hydra and encephaly, where cerebral hemispheres never form. And these bodies actually never have consciousness, but can develop to full maturity. And last, to put it together, you have to do some sort of head transplant. And these have been done experimentally in the past in mice, dogs, and monkeys, with survival in some cases lasting days to months, but still needs to be improved. Okay, so let's move on to the brain. As I said before, you can gradually replace the brain with engineered neural tissue graphs made from a patient's own iPSC stem cells. And neuroplasticity of the brain allows migration of brain functions away from damaged areas or if done slowly. And just recently, the US ARPA-H program hired the leading researcher in brain replacement, Jean Haber, to lead a $100 million moonshot program in this strategy. There's a number of key technical objectives in our V1 roadmap. Most of the hard stuff is in the brain, but also in spinal cord reconnection. And there's a number of startups working in this field, but most are still in stealth. Okay, we also asked researchers to estimate the time and cost to get a reasonable attempt on this roadmap, and they came up with a figure of $3.6 billion in about 10 years. they came up with a figure of $3.6 billion in about 10 years. Okay, last, bioengineering. So this is the longest and most uncertain road. It will require the convergence of four different prongs. So one, large-scale data collection, something like a protein data bank, but much larger scale. Two, computational modeling, which could include AI models. And then three, design of genetic or cellular interventions. And four, delivery of genetic interventions or cells. Okay, next steps. So clearly, there's a lot of work to be done to solve aging. But unfortunately, there's very few people or resources devoted to trying to solve aging. But unfortunately, there's very few people or resources devoted to trying to solve aging. So if you feel an intense desire to defeat aging, definitely join the LBF. We're the biggest community focused exclusively on building for indefinite lifespan extension. We run highly selective intensive workshop retreats. Our next one is in March in Berkeley, California. But fighting aging is not just for biotechs and scientists. We need everyone to get society aligned on allocating Apollo program level resources to fighting aging. And so if this resonates with you, I'd encourage you to also join the vitalism community where we're building a social political movement to make fighting aging and death humanity's number one priority. And our community is organizing the biggest longevity event in the world. It's going to be a two-month pop-up longevity city in Berkeley, California.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731557580000, - "slot_end": 1731558000000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/160SSgpDZHkjg4YniAuH3mYD1hx7hZuv_Qp2ip0zoRso", - "resources_slides": null, + "slot_start": 1731403800000, + "slot_end": 1731405600000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/12GuPqjQk66_MOFYNzQAXdDgl9b2uXDcWEc4im_qwX7E", + "resources_slides": "https://drive.google.com/file/d/10Th3sjYiiC0Yr7U4CXxNK7ALJ962nVxo/view", "speakers": [ - "nathan-cheng" + "tracheopteryx" ] }, "vector": [ - 0, - 6, - 0, 0, 0, 0, @@ -754625,6 +752491,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -755257,10 +753124,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -755496,7 +753363,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -755518,7 +753384,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -755924,9 +753789,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 2, @@ -755941,49 +753806,50 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "the-next-700-evm-languages", - "sourceId": "QE7RWH", - "title": "The Next 700 EVM Languages", - "description": "What is the role of programming languages in helping smart contracts become reliable and scalable technology? Are our current languages for the EVM up to the task? Has Ethereum lost the lead in this regard?\r\nThis talk explores these questions and proposes a roadmap for the development of the next generation of smart contract languages for the EVM.", - "track": "Developer Experience", - "type": "Talk", + "id": "the-next-generation-of-governors-will-be-modular", + "sourceId": "DEAUWE", + "title": "The next generation of governors will be modular!", + "description": "Onchain governance is one of the main non-financial usecases of ethereum. Still, innovation in that space is slow, and deployed solution are still very much tighted to financial assets. In order to move away from that situation, and build more powerfull governance solution, we need to build a more modular and evolutive approach.", + "track": "Coordination", + "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "programming languages", - "formal verification", - "smart contracts" - ], "tags": [ - "Languages", - "Formal Verification", - "smart", - "contracts" + "Governance", + "Design", + "modular", + "Design", + "Governance" ], - "language": "en", - "speakers": [ - "francisco-giordano" + "keywords": [ + "Smart contracts", + "modularity" ], + "duration": 418, + "language": "en", + "sources_swarmHash": "712084596ebb0aeddfcee323eece11a1914339db0b1d9170b841199a20de0882", + "sources_youtubeId": "iyWhVEouHn4", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6734849a9dbb7a90e1fb0634", "eventId": "devcon-7", - "slot_start": 1731580200000, - "slot_end": 1731582000000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1xFEtAafqxxm1b1UAUHGb8bnoWg9x6qZQdGRk_3lPM8Y" + "slot_start": 1731489600000, + "slot_end": 1731490200000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1DnvD2EnuiJkqkdlnAA1h6CZl0zqKU90ShcgX4KV0SrE", + "resources_slides": "https://drive.google.com/file/d/1FJadPAlern7uyE_qNEQJQaA-HQPwNLpI/view", + "speakers": [ + "hadrien-croubois" + ] }, "vector": [ - 0, - 0, - 0, - 6, - 0, 0, 0, 0, @@ -755995,6 +753861,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -756626,12 +754493,12 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -756874,6 +754741,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -756927,15 +754795,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -756992,8 +754851,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -757050,6 +754907,15 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -757315,44 +755181,28 @@ }, { "session": { - "id": "the-next-era-sequencing-and-its-real-impact-on-app-users", - "sourceId": "9M78AK", - "title": "The Next Era: Sequencing and Its Real Impact on App Users", - "description": "This talk will discuss app sequencing products which were developed to enhance decentralization and security via distributed transaction ordering with independent sequencing (native Mainnet L2 sequencers i.e. Base, OP) and the impact to end users and applications. It will also discuss the tradeoffs of LVR, shared sequencing, and app-specific sequencing.", - "track": "Usability", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Product", + "id": "the-open-source-orchestra", + "sourceId": "9PWLBV", + "title": "The Open Source Orchestra", + "description": "Member of the Open Source Orchestra", + "track": "Entertainment", + "type": "Music", + "expertise": "Expert", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Layer 2s", - "User Experience", - "Transaction fees mechanisms", - "sequencer", - "Layer 2s", - "Transaction fees mechanisms", - "User Experience" - ], - "keywords": [ - "Sequencing" - ], - "duration": 975, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "707cb042ec5704ebd467c773dedb087d6fc5a3c474c0c41441a2ed12ac9ec02d", - "sources_youtubeId": "-S2rlhSUHZY", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731405600000, - "slot_end": 1731407400000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1l63vZZz_0RN-aU0hwjhmdAat5Fq0OFy7UoMYiS3KJxc", - "resources_slides": null, "speakers": [ - "tina-haibodi" - ] + "sophia-spirlock" + ], + "eventId": "devcon-7", + "slot_start": 1731553200000, + "slot_end": 1731556800000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1MLErEiLaty6zwbafFEy3AROdYSwqpoEoEBnY5JL_9YY", + "resources_slides": "" }, "vector": [ 0, @@ -757363,6 +755213,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -758122,9 +755973,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -758146,7 +755994,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -758172,7 +756019,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -758218,7 +756064,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -758669,15 +756514,15 @@ 0, 0, 0, - 2, 0, 0, 0, + 2, + 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -758691,40 +756536,45 @@ }, { "session": { - "id": "the-next-generation-of-decentralized-governance", - "sourceId": "WUSAHA", - "title": "The Next Generation of Decentralized Governance", - "description": "In this talk, tracheoptryx will share thoughts on what will define the next phase of decentralized governance and how that has informed the design of EigenGov, EigenLayer’s forthcoming governance system.", - "track": "Coordination", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "the-political-economy-of-dacc", + "sourceId": "AXX3JD", + "title": "The political economy of d/acc", + "description": "The dynamics behind d/acc are not new. Economic history is full of examples of the private provision of public goods. If we want to reduce AI risks while preserving freedom from centralized control, it's worth thinking carefully about the different ways humans have solved isomorphic problems in the past, and how the same tools could apply today.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, - "tags": [], + "tags": [ + "Public", + "good" + ], "keywords": [ - "see", - "doc" + "d/acc" ], - "duration": 1629, + "duration": 1079, "language": "en", - "sources_swarmHash": "75a12cae9fadbaeaba434231a49a634d15b4251288154859b4667cd19622b603", - "sources_youtubeId": "VhkP2OIwIFY", + "sources_swarmHash": "44f39b5a61d0278c154c160191866ad38d028d9a4d0677cb0457ded12fe5dd30", + "sources_youtubeId": "Ukm0tcoedeg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "67356f0f9dbb7a90e17febf4", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67356f0f9dbb7a90e17febf4.vtt", + "transcript_text": " All right. Everybody, I'm thrilled to be here. I am Eli Dorado. I'm Chief Economist at the Abundance Institute, and it's a real pleasure to be here to talk about DIAC. I'm approaching this through my training as a sort of political economist, and I'm going to give a high-level overview of how I see this space. level overview of how I see this space. I want to start by talking about our instincts for both control and freedom. We all have these instincts. And in the last few years with AI innovation, we've seen this kind of like control narrative or this kind of control instinct be on display. Like we need an enforced pause on AI development. We need to shut down data centers. We need a kill switch. We need regulation. We need to restrict model releases or we need to impose bans on open source AI models or export controls. And sort of the counter reaction to that is like, no, we need freedom of speech. Are we really gonna regulate matrix multiplication? And this is a slippery slope to totalitarianism. And then I think that there's an interesting conclusion at the end that, therefore, I'm gonna not believe the premise. I'm not going to believe that there is any risk here. And I think that this conclusion is very natural. It's really bad epistemic hygiene, but it's common and we need to acknowledge it. And you see this in completely different contexts with things like climate. People will say oh, I just don't believe in it, because they don't want to, like, accept the sort of control-oriented beliefs of the other side. So just because freedom is good doesn't mean that the problem raised by the anti-freedomites is fake, right? that the problem raised by the anti-freedomites is fake, right? So, DIAC approaches both AI safety and biosafety from the perspective that they are genuine public goods. But crucially, just because we acknowledge the existence of safety as a public good doesn't mean that we think the right approach is to generate it through top-down coercive means, right? acknowledge the existence of safety as a public good doesn't mean that we think the right approach is To generate it through top-down coercive means right? We're gonna take both freedom and public goods seriously, and I think that's the ethos behind The act and about a lot of it theory um in general So that brings us to the next part of the talk the private provision of public goods So So that brings us to the next part of the talk, the private provision of public goods. So what is a public good? In classical economic terms, a public good is one that is neither excludable nor rivalrous. So like the canonical example is the lighthouse. A lighthouse on a coastline provides guidance for all the ships, whether or not they pay, right? So a lighthouse is not an excludable good. And at the same time, one ship relying on the lighthouse for guidance does not diminish the ability of any other ship to rely on the lighthouse for guidance. So the lighthouse is non-rivalrous. So game theoretically here, we have a free rider problem, and we should not expect the lighthouse to be produced by the market, right? You must rely on governments to produce lighthouses. So one of the great economists of the 20th century, Ronald Coase, got interested in this example, and he decided to actually collect data on lighthouses in Britain. And astonishingly, he found that every single one of them was privately constructed. So we have a free rider problem. It's not supposed to happen. But every single lighthouse in Britain was privately constructed. So how can this be? What Coase found is that lighthouses were vertically integrated with ports. Coase found is that lighthouses were vertically integrated with ports. So the port operator would build a lighthouse so that people could come to their port. And then they would recoup the costs through port fees. And what this example shows is that creative structuring can turn a public good problem into a private good provision. public good problem into a private good provision. Another closely related idea is a common pool resource, right? With the classic example being irrigation systems. So irrigation systems are arguably one of the things that led to the creation of states in the first place. So Carl Witt Vogel, in his book Oriental Despotism, advances the concept of a hydraulic empire a powerful state as in Egypt or Mesopotamia or China that's built around irrigation and maintenance of this common pool resource is a public good and it's worth it to accept some coercion if it means you get the benefits of irrigation so just like with the lighthouse there's a twist so later scholars if it means you get the benefits of irrigation. So just like with the lighthouse, there's a twist. So later scholars, particularly Eleanor Ostrom, figured out that many complex irrigation systems, like in the Philippines or in this one in Valencia, Spain, were cooperatively maintained without an autocrat. This one goes back a thousand years. And so she won a Nobel Prize for figuring out the conditions under which such cooperation is possible. Another example of non-coercive public good provision is standard setting. So standards are a public good, but because the value of the standard goes up when other people use it, there's an incentive to cooperate in standard production, standard development. So one of the explicit powers reserved for the government in the US Constitution is to develop standards, but it turns out that private standard setting happens all the time. This is an example from IETF, but we're here at DevCon, and Ethereum itself involves a lot of cooperatively produced standards. I'm not going to do a long spiel on each of these modalities, but there's a lot of different ways in which public good production can be incentivized without relying on sort of centralized top-down government coercion. We've talked about the first three already. You can think of a lot of open-source software development as scratching an itch. Sometimes people do things to accumulate prestige. Sometimes people give money to causes they believe in. My organization, the Abundance Institute, is a 501 nonprofit. Our budget is entirely funded by donors who get very little public recognition. Usually they don't want it. And they give just because they believe in the mission. Norms like politeness or not engaging in petty theft are also a source of public goods. And of course, mechanism design is an important one for Ethereum. We can think of consensus as a public good and the consensus protocol as a mechanism that incentivizes coming to consensus. So public goods, like AI safety and biosafety, can be provided non-coercively without government involvement, and it happens all the time, and it's possible that by some accountings, most public goods are in fact privately produced. And so now I want to show the flip side. When public goods, and particularly safety, is provided coercively top-down by the state, the results are often pretty bad. So let's look first at nuclear energy. In the 1950s and 60s, we experienced cost reductions as we deployed more nuclear plants. So this is common in industry. It's called the learning rate. And then in the 1970s, the trend reversed. Costs started to explode. Initially it began in the early 1970s with environmental regulation and then it accelerated in response to the Three Mile Island incident in 1979, which by the way, nobody was harmed by that incident at all. Another domain, which Vitalik referenced, in which safety regulation has had enormous costs is pharmaceutical development. And by the way, this is adjusted for inflation, just like the last chart was also. So these higher costs of bringing drugs to market represent thousands of missing drugs. We're missing thousands of drugs. And people die because of the lack of drugs. But these people are not identifiable victims. And my friend and former professor Alex Tabarrok calls it the invisible graveyard of people people dying because of lack of drugs brought to market Aside from nuclear and pharmaceuticals the other major industry that is regulated via like pre-market approval is Aviation so everyone thought thought after World War II that there was going to be a growing personal aviation industry, that people coming back from World War II knew how to fly, they could buy surplus military airplanes, and we would all just have essentially flying cars, just personal aircraft that would take us wherever we wanted to go. And it could have happened, but we had increased regulation at a couple different points along the way here in the mid-1960s and early 80s, and this strangled the industry. Unfortunately, FAA has recognized that they've gone too far with personal aircraft regulation. They're beginning to actually loosen the rules. And something that's super interesting is that they actually think that loosening the rules for a category called light sport aircraft will on net increase safety through both innovation, so the more you innovate, the safer the aircraft get, but also through changing the composition of the fleet and sort of increasing the use of manufactured airplanes. This is a way that deregulation can lead to actually higher levels of safety. And it's interesting to see a major federal regulator recognize that. So there's a lot of reasons, a lot of different examples in which coercive safety regulation doesn't actually make us safer. So we talked about nuclear, but let's compare it to coal. it to coal. So we have, nuclear is highly regulated, highly safe. And meanwhile, we have coal plants still operating and contributing to thousands and thousands of deaths. I estimate in the US, we have about 16,000 deaths a year from coal emissions, which is like five 9-11s every year. And everyone's apparently okay with that, but not with lowering the bar for nuclear plants. You know, experimental drugs certainly can be unsafe, but a lot of times drugs are substitutes for other medical procedures. And if you have to have a surgery or a hospital stay, those are risky also. And we don't balance the risks in those kinds of... We don't make that comparison in our regulatory system. One thing that strikes me as really interesting is in Europe, you can buy a contact lens from a vending machine. But in the U.S., you have to go, it's not safe to do that. So you have to go to an optometrist and get a prescription first before you're allowed to do that. So it's thinly veiled protectionism. There's research that shows, by an economist named Parker Rogers, that shows that deregulated and down-regulated medical devices are actually safer than the more regulated device types. He looks at cases where device types are, where the regulation changes on the device types, and he finds out that the less regulated types increase entry into the market, they increase competition, and ultimately they turn out to be both safer and cheaper. And I already mentioned, the over-regulation of manufactured aircraft means that people select for experimental aircraft, a lot of times amateur, home-built aircraft. So the background here, among all this carnival of stagnant negative effects from safety regulation, is what's called the Great Stagnation. This is a period in the U.S. and much of the West known as the Great Stagnation. And if we hadn't experienced it beginning around 1973, the U.S. would be about twice as rich today as it is. So it's a major economic event. And a lot of my work focuses on ending the Great Stagnation by enabling innovation in sort of these four sectors, health, housing, energy, transportation, which together make up about half of the economy in the U.S. And I think of stagnation as the result of sort of three separate pathologies, but like one of them is the way that we do safety regulation in the West. I think the other is sort of more generalized vitocracy and then protectionism. But today we're talking about safety. And so, yeah, so safety, the way we, this sort of top-down model of safety is a major cause, I think. So it's worth thinking about an alternative to that, and that alternative, I believe, is to build. I think that the responsibility is on us to show that there is another way if we don't have a good answer for how to deal with real or perceived risks in a decentralized and non-coercive way people will continue to agitate for the coercive way right and? And, you know, I'm showing these modalities, again, for the different ways that we can sort of privately or non-coercively or non-top-down provide public goods, and we can think about how we can apply each of these to different aspects of the public goods problem, right? So, you know, maybe there are safety resources that can be created, that can be done just via firms that can internalize things. Maybe there's a risk that AI will be able to use sophisticated phishing attacks that need to be addressed. And we can do that through Better protocols new standards authentication authenticated communication standards Maybe air filtration systems as Vitalik mentioned can protect against Biological attacks, but also provide ancillary benefits in the form of reduced illness so that they don't require like mandates or, you know, top-down regulation. So I think another aspect of the solution, of the build solution, is to focus on concrete and near-term problems. I think a lot of attention is focused on the most fanciful scenarios, but a lot of the near-term solutions also have a bearing on those risks. And so working on things that are concrete and yield tangible benefits now can, you know, if those other more extreme scenarios ever materialize, we can be more prepared for them than we otherwise would be by focusing on stuff that's more near-term, concrete, tangible. And finally, I think a lot of times people think about, you know, what can go wrong with new technologies, AI and biotech in particular, right? And I would think that this needs to be balanced by what can go right. And I don't mean that in a trite way that shuts down thinking about risks, but as a counterweight to the panic that sometimes people feel and as a motivation to build the future and to build an exciting future that we all want.", "eventId": "devcon-7", - "slot_start": 1731403800000, - "slot_end": 1731405600000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/12GuPqjQk66_MOFYNzQAXdDgl9b2uXDcWEc4im_qwX7E", - "resources_slides": null, + "slot_start": 1731553800000, + "slot_end": 1731555000000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1Ark5gHHkzTiHgbw7rdgfM5t6pIra-jjXvX-Qq1FPlRk", + "resources_slides": "https://drive.google.com/file/d/1txb1RD4YC1AqdjMaBEM3TEpGSqu3BFac/view", "speakers": [ - "tracheopteryx" + "eli-dourado" ] }, "vector": [ 0, + 6, 0, 0, 0, @@ -758735,11 +756585,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -758942,6 +756787,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -759373,7 +757219,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -759981,6 +757826,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -760043,11 +757889,9 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, + 2, 0, 0, 0, @@ -760060,42 +757904,37 @@ }, { "session": { - "id": "the-next-generation-of-governors-will-be-modular", - "sourceId": "DEAUWE", - "title": "The next generation of governors will be modular!", - "description": "Onchain governance is one of the main non-financial usecases of ethereum. Still, innovation in that space is slow, and deployed solution are still very much tighted to financial assets. In order to move away from that situation, and build more powerfull governance solution, we need to build a more modular and evolutive approach.", - "track": "Coordination", + "id": "the-rated-list", + "sourceId": "QNYDCR", + "title": "The Rated List", + "description": "The Rated List construction aims to minimise the number of requests required to complete sampling in Data Availability Sampling (DAS) for Ethereum. This optimisation becomes especially critical in the context of Full DAS, as data production per slot is anticipated to far exceed the current Deneb-Cancun (Dencun) specifications. The Rated List attempts to improve rate of successful sampling against unfavourable network conditions there by reducing the bandwidth consumption of the overall network.", + "track": "[CLS] EPF Day", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Governance", - "Design", - "modular", - "Design", - "Governance" - ], - "keywords": [ - "Smart contracts", - "modularity" + "DAS", + "Data Availability" ], - "duration": 418, + "keywords": [], + "duration": 1052, "language": "en", - "sources_swarmHash": "712084596ebb0aeddfcee323eece11a1914339db0b1d9170b841199a20de0882", - "sources_youtubeId": "iyWhVEouHn4", + "sources_swarmHash": "e480e5733ce7ffe3ddd9be001a4ffd129ed57c434bd01f5a7f62211dff3e67ab", + "sources_youtubeId": "edyn9Hi2zsY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734849a9dbb7a90e1fb0634", + "sources_streamethId": "6734828d9dbb7a90e1dd808a", "eventId": "devcon-7", - "slot_start": 1731489600000, - "slot_end": 1731490200000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1DnvD2EnuiJkqkdlnAA1h6CZl0zqKU90ShcgX4KV0SrE", - "resources_slides": null, + "slot_start": 1731486600000, + "slot_end": 1731487500000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1tvKSVVMilC4YJnTAe-LSaWUsQBBm9OaP3zYQYmWuVJ4", + "resources_slides": "https://drive.google.com/file/d/1-3xV6cSq7k-Oh3oiJeP7Y6Lns7tpY2Ey/view", "speakers": [ - "hadrien-croubois" + "hopinheimer", + "chirag-mahaveer-parmar" ] }, "vector": [ @@ -760110,11 +757949,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -760750,6 +758589,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -760913,6 +758753,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -760946,12 +758787,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -760993,7 +758828,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -761160,7 +758994,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -761208,6 +759041,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -761417,8 +759251,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -761435,27 +759269,46 @@ }, { "session": { - "id": "the-open-source-orchestra", - "sourceId": "9PWLBV", - "title": "The Open Source Orchestra", - "description": "Member of the Open Source Orchestra", - "track": "Entertainment", - "type": "Music", - "expertise": "Expert", - "audience": "Engineering", + "id": "the-ripple-effect-of-devcon-vi", + "sourceId": "E3U3XU", + "title": "The Ripple Effect of Devcon VI", + "description": "Devcon VI in Bogotá accelerated community growth across the region. Local communities emerged in several cities in Colombia and Latin America. The gathering provided leaders with a new perspective on enhancing collective creation for social impact and blockchain adoption. At ETH Bogotá, we used this spark to transition from hosting general events to creating an educational system for developers and builders, aiming to push the adoption of blockchain and Ethereum in a new way.", + "track": "Real World Ethereum", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [ - "sophia-spirlock" + "tags": [ + "Vision", + "Ethereum for Good", + "Local Impact", + "education", + "Ethereum for Good", + "Local Impact", + "Vision" ], + "keywords": [ + "Education" + ], + "duration": 460, + "language": "en", + "sources_swarmHash": "939368d83ac93c262094341c954469dc8618657b52baa547b809c7a1dd20759b", + "sources_youtubeId": "YgLrDeYqPaE", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6736c0c59dbb7a90e1cc98e5", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731553200000, - "slot_end": 1731556800000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1MLErEiLaty6zwbafFEy3AROdYSwqpoEoEBnY5JL_9YY" + "slot_start": 1731559800000, + "slot_end": 1731560400000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1vrrnCLaeOKKIwa7Mc_RpUOzo-jB1B7QzDNcIzCEOrak", + "resources_slides": "https://drive.google.com/file/d/17jBFWGM6eWx_okIKLuGaSOrJaNrLv7SO/view", + "speakers": [ + "julio-cesar-arango" + ] }, "vector": [ 0, @@ -761464,9 +759317,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -762109,31 +759959,11 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -762320,6 +760150,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -762354,6 +760185,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -762460,6 +760292,21 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, 0, 0, 0, @@ -762775,7 +760622,6 @@ 0, 0, 0, - 2, 2, 0, 0, @@ -762783,6 +760629,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -762794,51 +760642,51 @@ }, { "session": { - "id": "the-political-economy-of-dacc", - "sourceId": "AXX3JD", - "title": "The political economy of d/acc", - "description": "The dynamics behind d/acc are not new. Economic history is full of examples of the private provision of public goods. If we want to reduce AI risks while preserving freedom from centralized control, it's worth thinking carefully about the different ways humans have solved isomorphic problems in the past, and how the same tools could apply today.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "id": "the-rise-of-ai-in-web3-development-ux", + "sourceId": "LTEX8X", + "title": "The Rise of AI in Web3 Development UX", + "description": "This talk explores the intersection of artificial intelligence and Web3 technologies, highlighting how AI can enhance the development of decentralized applications and blockchain ecosystems. The presentation will provide practical examples, code snippets, and insights into Web3 AI through the lens of the recent RemixAI integration into the Remix toolset. Attendees will gain valuable knowledge on leveraging AI to build more robust, intelligent, and user-friendly decentralized applications.", + "track": "Usability", "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Community", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Public", - "good" + "Tooling", + "User Experience", + "UI/UX", + "coding", + "generation", + "Tooling", + "UI/UX", + "User Experience" ], "keywords": [ - "d/acc" + "AI Web3", + "LLM", + "Code Generation" ], - "duration": 1079, + "duration": 512, "language": "en", - "sources_swarmHash": "44f39b5a61d0278c154c160191866ad38d028d9a4d0677cb0457ded12fe5dd30", - "sources_youtubeId": "Ukm0tcoedeg", + "sources_swarmHash": "45d5cff1ad3a51e4710550ab842c2737368782c4efded4ea265d96d0a3f09a19", + "sources_youtubeId": "cAU9DswPblk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67356f0f9dbb7a90e17febf4", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67356f0f9dbb7a90e17febf4.vtt", - "transcript_text": " All right. Everybody, I'm thrilled to be here. I am Eli Dorado. I'm Chief Economist at the Abundance Institute, and it's a real pleasure to be here to talk about DIAC. I'm approaching this through my training as a sort of political economist, and I'm going to give a high-level overview of how I see this space. level overview of how I see this space. I want to start by talking about our instincts for both control and freedom. We all have these instincts. And in the last few years with AI innovation, we've seen this kind of like control narrative or this kind of control instinct be on display. Like we need an enforced pause on AI development. We need to shut down data centers. We need a kill switch. We need regulation. We need to restrict model releases or we need to impose bans on open source AI models or export controls. And sort of the counter reaction to that is like, no, we need freedom of speech. Are we really gonna regulate matrix multiplication? And this is a slippery slope to totalitarianism. And then I think that there's an interesting conclusion at the end that, therefore, I'm gonna not believe the premise. I'm not going to believe that there is any risk here. And I think that this conclusion is very natural. It's really bad epistemic hygiene, but it's common and we need to acknowledge it. And you see this in completely different contexts with things like climate. People will say oh, I just don't believe in it, because they don't want to, like, accept the sort of control-oriented beliefs of the other side. So just because freedom is good doesn't mean that the problem raised by the anti-freedomites is fake, right? that the problem raised by the anti-freedomites is fake, right? So, DIAC approaches both AI safety and biosafety from the perspective that they are genuine public goods. But crucially, just because we acknowledge the existence of safety as a public good doesn't mean that we think the right approach is to generate it through top-down coercive means, right? acknowledge the existence of safety as a public good doesn't mean that we think the right approach is To generate it through top-down coercive means right? We're gonna take both freedom and public goods seriously, and I think that's the ethos behind The act and about a lot of it theory um in general So that brings us to the next part of the talk the private provision of public goods So So that brings us to the next part of the talk, the private provision of public goods. So what is a public good? In classical economic terms, a public good is one that is neither excludable nor rivalrous. So like the canonical example is the lighthouse. A lighthouse on a coastline provides guidance for all the ships, whether or not they pay, right? So a lighthouse is not an excludable good. And at the same time, one ship relying on the lighthouse for guidance does not diminish the ability of any other ship to rely on the lighthouse for guidance. So the lighthouse is non-rivalrous. So game theoretically here, we have a free rider problem, and we should not expect the lighthouse to be produced by the market, right? You must rely on governments to produce lighthouses. So one of the great economists of the 20th century, Ronald Coase, got interested in this example, and he decided to actually collect data on lighthouses in Britain. And astonishingly, he found that every single one of them was privately constructed. So we have a free rider problem. It's not supposed to happen. But every single lighthouse in Britain was privately constructed. So how can this be? What Coase found is that lighthouses were vertically integrated with ports. Coase found is that lighthouses were vertically integrated with ports. So the port operator would build a lighthouse so that people could come to their port. And then they would recoup the costs through port fees. And what this example shows is that creative structuring can turn a public good problem into a private good provision. public good problem into a private good provision. Another closely related idea is a common pool resource, right? With the classic example being irrigation systems. So irrigation systems are arguably one of the things that led to the creation of states in the first place. So Carl Witt Vogel, in his book Oriental Despotism, advances the concept of a hydraulic empire a powerful state as in Egypt or Mesopotamia or China that's built around irrigation and maintenance of this common pool resource is a public good and it's worth it to accept some coercion if it means you get the benefits of irrigation so just like with the lighthouse there's a twist so later scholars if it means you get the benefits of irrigation. So just like with the lighthouse, there's a twist. So later scholars, particularly Eleanor Ostrom, figured out that many complex irrigation systems, like in the Philippines or in this one in Valencia, Spain, were cooperatively maintained without an autocrat. This one goes back a thousand years. And so she won a Nobel Prize for figuring out the conditions under which such cooperation is possible. Another example of non-coercive public good provision is standard setting. So standards are a public good, but because the value of the standard goes up when other people use it, there's an incentive to cooperate in standard production, standard development. So one of the explicit powers reserved for the government in the US Constitution is to develop standards, but it turns out that private standard setting happens all the time. This is an example from IETF, but we're here at DevCon, and Ethereum itself involves a lot of cooperatively produced standards. I'm not going to do a long spiel on each of these modalities, but there's a lot of different ways in which public good production can be incentivized without relying on sort of centralized top-down government coercion. We've talked about the first three already. You can think of a lot of open-source software development as scratching an itch. Sometimes people do things to accumulate prestige. Sometimes people give money to causes they believe in. My organization, the Abundance Institute, is a 501 nonprofit. Our budget is entirely funded by donors who get very little public recognition. Usually they don't want it. And they give just because they believe in the mission. Norms like politeness or not engaging in petty theft are also a source of public goods. And of course, mechanism design is an important one for Ethereum. We can think of consensus as a public good and the consensus protocol as a mechanism that incentivizes coming to consensus. So public goods, like AI safety and biosafety, can be provided non-coercively without government involvement, and it happens all the time, and it's possible that by some accountings, most public goods are in fact privately produced. And so now I want to show the flip side. When public goods, and particularly safety, is provided coercively top-down by the state, the results are often pretty bad. So let's look first at nuclear energy. In the 1950s and 60s, we experienced cost reductions as we deployed more nuclear plants. So this is common in industry. It's called the learning rate. And then in the 1970s, the trend reversed. Costs started to explode. Initially it began in the early 1970s with environmental regulation and then it accelerated in response to the Three Mile Island incident in 1979, which by the way, nobody was harmed by that incident at all. Another domain, which Vitalik referenced, in which safety regulation has had enormous costs is pharmaceutical development. And by the way, this is adjusted for inflation, just like the last chart was also. So these higher costs of bringing drugs to market represent thousands of missing drugs. We're missing thousands of drugs. And people die because of the lack of drugs. But these people are not identifiable victims. And my friend and former professor Alex Tabarrok calls it the invisible graveyard of people people dying because of lack of drugs brought to market Aside from nuclear and pharmaceuticals the other major industry that is regulated via like pre-market approval is Aviation so everyone thought thought after World War II that there was going to be a growing personal aviation industry, that people coming back from World War II knew how to fly, they could buy surplus military airplanes, and we would all just have essentially flying cars, just personal aircraft that would take us wherever we wanted to go. And it could have happened, but we had increased regulation at a couple different points along the way here in the mid-1960s and early 80s, and this strangled the industry. Unfortunately, FAA has recognized that they've gone too far with personal aircraft regulation. They're beginning to actually loosen the rules. And something that's super interesting is that they actually think that loosening the rules for a category called light sport aircraft will on net increase safety through both innovation, so the more you innovate, the safer the aircraft get, but also through changing the composition of the fleet and sort of increasing the use of manufactured airplanes. This is a way that deregulation can lead to actually higher levels of safety. And it's interesting to see a major federal regulator recognize that. So there's a lot of reasons, a lot of different examples in which coercive safety regulation doesn't actually make us safer. So we talked about nuclear, but let's compare it to coal. it to coal. So we have, nuclear is highly regulated, highly safe. And meanwhile, we have coal plants still operating and contributing to thousands and thousands of deaths. I estimate in the US, we have about 16,000 deaths a year from coal emissions, which is like five 9-11s every year. And everyone's apparently okay with that, but not with lowering the bar for nuclear plants. You know, experimental drugs certainly can be unsafe, but a lot of times drugs are substitutes for other medical procedures. And if you have to have a surgery or a hospital stay, those are risky also. And we don't balance the risks in those kinds of... We don't make that comparison in our regulatory system. One thing that strikes me as really interesting is in Europe, you can buy a contact lens from a vending machine. But in the U.S., you have to go, it's not safe to do that. So you have to go to an optometrist and get a prescription first before you're allowed to do that. So it's thinly veiled protectionism. There's research that shows, by an economist named Parker Rogers, that shows that deregulated and down-regulated medical devices are actually safer than the more regulated device types. He looks at cases where device types are, where the regulation changes on the device types, and he finds out that the less regulated types increase entry into the market, they increase competition, and ultimately they turn out to be both safer and cheaper. And I already mentioned, the over-regulation of manufactured aircraft means that people select for experimental aircraft, a lot of times amateur, home-built aircraft. So the background here, among all this carnival of stagnant negative effects from safety regulation, is what's called the Great Stagnation. This is a period in the U.S. and much of the West known as the Great Stagnation. And if we hadn't experienced it beginning around 1973, the U.S. would be about twice as rich today as it is. So it's a major economic event. And a lot of my work focuses on ending the Great Stagnation by enabling innovation in sort of these four sectors, health, housing, energy, transportation, which together make up about half of the economy in the U.S. And I think of stagnation as the result of sort of three separate pathologies, but like one of them is the way that we do safety regulation in the West. I think the other is sort of more generalized vitocracy and then protectionism. But today we're talking about safety. And so, yeah, so safety, the way we, this sort of top-down model of safety is a major cause, I think. So it's worth thinking about an alternative to that, and that alternative, I believe, is to build. I think that the responsibility is on us to show that there is another way if we don't have a good answer for how to deal with real or perceived risks in a decentralized and non-coercive way people will continue to agitate for the coercive way right and? And, you know, I'm showing these modalities, again, for the different ways that we can sort of privately or non-coercively or non-top-down provide public goods, and we can think about how we can apply each of these to different aspects of the public goods problem, right? So, you know, maybe there are safety resources that can be created, that can be done just via firms that can internalize things. Maybe there's a risk that AI will be able to use sophisticated phishing attacks that need to be addressed. And we can do that through Better protocols new standards authentication authenticated communication standards Maybe air filtration systems as Vitalik mentioned can protect against Biological attacks, but also provide ancillary benefits in the form of reduced illness so that they don't require like mandates or, you know, top-down regulation. So I think another aspect of the solution, of the build solution, is to focus on concrete and near-term problems. I think a lot of attention is focused on the most fanciful scenarios, but a lot of the near-term solutions also have a bearing on those risks. And so working on things that are concrete and yield tangible benefits now can, you know, if those other more extreme scenarios ever materialize, we can be more prepared for them than we otherwise would be by focusing on stuff that's more near-term, concrete, tangible. And finally, I think a lot of times people think about, you know, what can go wrong with new technologies, AI and biotech in particular, right? And I would think that this needs to be balanced by what can go right. And I don't mean that in a trite way that shuts down thinking about risks, but as a counterweight to the panic that sometimes people feel and as a motivation to build the future and to build an exciting future that we all want.", + "sources_streamethId": "6735a0c79dbb7a90e1aa29ad", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735a0c79dbb7a90e1aa29ad.vtt", + "transcript_text": " yeah hello everyone my name is Stefan Tessin and I work with the Remix team and my talk today will be about the rise of AI in web pre-development area I will be much more on the application side. And yeah, regarding what the AI can actually bring to the web environment, these are actually the main application fields that AI can bring a lot to. First of all, the user experience, where we can incredibly enhance how the user actually perceives the platform. And also the decentralized governance that AI can actually have to much more incentivize this term. Also the creation of digital assets, also DAOs. And we cannot speak about AI in Web3 without talking about fraud detection and also the security aspect. So actually, enhancing the user experience is pretty much easy. Here we have an example of the Remix platform where we have code completion, code explaining. Those are pretty much low-hanging fruits for those who knows how LLM works. And yeah, this actually also already provides a lot of input for the user, a lot of value. And next stage of providing value for a user is by having automated chatbots, like also AI support, which are integrated in the platform. Here again, a remix example, where the AI chatbot is aware of what the user is doing is always one step ahead in order to kind of provide the user a much more better experience and so forth. Yeah, also the AI can be used for creating personalized content, user feeds creating also ai bots that will be a safe as oracle for contracts and also provide investment recommendation and create digital arts i wish as we have already seen with nfts and automatically generate metadata for market visibilities on different platforms without sharing the asset. Regarding the security aspect of how AI can actually improve how we develop a Web3 application, it can actually provide a much more secure development mechanism. Just think about it when you're about to deploy a contract and there's AI around it that kind of pass the contract and for vulnerabilities and check pretty much everything what goes around it. And also transaction, there's AI maybe sitting somewhere watching the blockchain out in real time and kind of flagging fraudulent transactions and so on. So the AI is sitting somewhere and kind of doing behavioral analysis and profiling everything and yeah, flag bad stuff. Okay, the last thing I will be talking about is about agents. Agents is like a software layer on top of the AI model which actually kind of perform autonomous actions and have an interaction with AI in order to get out, in order to provide base values. And those agents are actually able of kind of doing vulnerability scans, help in improving the decentralized governance, and also regarding the decentralized finance, we can automate that aspect of the Web3 environment by providing best trading pairs, best price, best algorithms for that. And also the smart contract automation that the agent can actually help with. And yeah, I think that was pretty much my talk I am very much open to questions anyone with question I see the gentleman in the very back I don't think I can throw that far. Thank you. Hello. So I had basically two questions. My first question would be regarding the Remix AI. So I actually utilize Remix a lot for contract development. So what I wanted to ask was the answers that Remix AI provides, how does Remix handle hallucination over it? The second is, Remix AI on the back end uses LLM. So is that LLM open-sourced? And what are the things on which it is trained at? Are there specific contracts on which it is trained at? Like, are there specific contracts on which it is trained at? First of all, regarding the hallucination, I think it is a problem that you cannot actually avoid with the nowadays states of development. But we aim to kind of reduce that by utilizing state-of-the-art models and to be much precise regarding also the second question that will be the open the llama model we use the latest llama model which available on human face and yeah they are deployed and our back ends and very soon they will be able as a private person to run it locally at your own risk. That's all I'll say. Another gentleman? Over there. Yes. Sorry, I'm getting... Thank you very much. Very nice box. Thank you. I hear a lot of things that a lot of people are talking like how we can do better contracts how can we do better chains and i feel that we're talking too much about uh the core layer like how to make the things run and we're not talking of how to build and use things over it like like web apps, general apps, do you think that AI can improve here too? Or will it be bringing more to the table for client part and higher level builder part? Or it will be bringing more to the table for low level, like blockchain and contracts? I think it will be much more on a high-level perspective that I will bring that value. Okay, got it. Thanks. I think we have time for one last question, if there's any. One last question. Over there. There's a gentleman there. Yes. Amazing. Hi. Hi. My name is Moritz. I'm working on 42.money. We're trying to generate front-ends for smart contracts specifically. What is your vision on Web3 generation of UI? We see a lot of big players in Web2, but in Web, there's not really competition yet. I mean, my vision or my, the way I see it is that the future there is very much bright. We are right now at the beginning of the era of AI casing the Web 3 environment. And I think it is pretty much opened. You can do pretty much everything you would like to do. Yeah. Okay, thanks. Okay.", "eventId": "devcon-7", - "slot_start": 1731553800000, - "slot_end": 1731555000000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1Ark5gHHkzTiHgbw7rdgfM5t6pIra-jjXvX-Qq1FPlRk", - "resources_slides": null, + "slot_start": 1731565200000, + "slot_end": 1731565800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1zhCIin-EiFLgd3IrIQYnzWKZ4MmkJfeVVaweIJV7Mm0", + "resources_slides": "https://drive.google.com/file/d/17bBCr23MdDqMgPwMVeFKEnIf_3b07RVf/view", "speakers": [ - "eli-dourado" + "stephane-tetsing" ] }, "vector": [ - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -762847,6 +760695,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -763045,7 +760894,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -763492,6 +761340,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -763602,10 +761451,12 @@ 0, 0, 0, + 6, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -763643,6 +761494,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -764090,8 +761942,7 @@ 0, 0, 2, - 0, - 0, + 2, 0, 0, 0, @@ -764149,12 +762000,12 @@ 0, 2, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -764167,37 +762018,47 @@ }, { "session": { - "id": "the-rated-list", - "sourceId": "QNYDCR", - "title": "The Rated List", - "description": "The Rated List construction aims to minimise the number of requests required to complete sampling in Data Availability Sampling (DAS) for Ethereum. This optimisation becomes especially critical in the context of Full DAS, as data production per slot is anticipated to far exceed the current Deneb-Cancun (Dencun) specifications. The Rated List attempts to improve rate of successful sampling against unfavourable network conditions there by reducing the bandwidth consumption of the overall network.", - "track": "[CLS] EPF Day", - "type": "Lightning Talk", + "id": "the-rise-of-appchains-from-l2s-to-rollup-clusters", + "sourceId": "SEARYQ", + "title": "The rise of Appchains: from L2s to Rollup Clusters", + "description": "Ethereum's rollup-centric approach has led to the emergence of L2 Rollup Clusters reducing fees but creating fragmented liquidity and a less seamless user experience. Third-party bridges, though helpful, are cumbersome, vulnerable to hacks ($2B losses to date), and costly, leading to high fees. In this keynote, Alex will discuss how native interoperability, with ZK at its core, can resolve fragmentation, enabling Clusters to collaborate instead of competing for users and liquidity, ultimately dr", + "track": "Layer 2", + "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "DAS", - "Data Availability" + "Ethereum Roadmap", + "Appchains", + "Zero-Knowledge", + "interoperability", + "Appchains", + "Ethereum Roadmap", + "Zero-Knowledge" ], - "keywords": [], - "duration": 1052, + "keywords": [ + "Fragmentation", + "UX", + "interoperability", + "Rollup Clusters", + "L2" + ], + "duration": 1508, "language": "en", - "sources_swarmHash": "e480e5733ce7ffe3ddd9be001a4ffd129ed57c434bd01f5a7f62211dff3e67ab", - "sources_youtubeId": "edyn9Hi2zsY", + "sources_swarmHash": "67989f0d9198656929f445c0b928d7b40f9c288f16a42d67c8773572544bef03", + "sources_youtubeId": "HHl2iOgP4FA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734828d9dbb7a90e1dd808a", + "sources_streamethId": "673498739dbb7a90e1d3f53f", "eventId": "devcon-7", - "slot_start": 1731486600000, - "slot_end": 1731487500000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1tvKSVVMilC4YJnTAe-LSaWUsQBBm9OaP3zYQYmWuVJ4", - "resources_slides": null, + "slot_start": 1731493800000, + "slot_end": 1731495600000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1WOJXGXgVk5LDrCpMtULqypFYqyEzI5whhM4XbIRAcVA", + "resources_slides": "https://drive.google.com/file/d/1TcKl3w5dbErFKy6Uyaq_Yk9I_JTyZBPV/view", "speakers": [ - "hopinheimer", - "chirag-mahaveer-parmar" + "alex-gluchowski" ] }, "vector": [ @@ -764208,6 +762069,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -764216,12 +762078,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -764853,8 +762709,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -764862,6 +762716,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -764967,6 +762822,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -765019,8 +762875,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -765148,6 +763002,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -765285,6 +763140,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -765308,7 +763164,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -765343,6 +763198,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -765519,7 +763375,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -765532,51 +763387,55 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "the-ripple-effect-of-devcon-vi", - "sourceId": "E3U3XU", - "title": "The Ripple Effect of Devcon VI", - "description": "Devcon VI in Bogotá accelerated community growth across the region. Local communities emerged in several cities in Colombia and Latin America. The gathering provided leaders with a new perspective on enhancing collective creation for social impact and blockchain adoption. At ETH Bogotá, we used this spark to transition from hosting general events to creating an educational system for developers and builders, aiming to push the adoption of blockchain and Ethereum in a new way.", + "id": "the-role-of-culture-in-shaping-technology-the-case-for-cuteacc", + "sourceId": "LRJTXY", + "title": "The role of culture in shaping technology - the case for cute/acc", + "description": "Who builds technology and for whom? In decentralized technology, we must apply the cypherpunk ethos not only to the product we want to provide to the world but also to the manner we build that product. We must avoid imposing our worldview onto different cultures, or we risk reinventing tech neocolonialism. This talk will illustrate the risks of concentration of power and tech within our industry into the hands of a few cultures and present ways to build a truly cypherpunk future.", "track": "Real World Ethereum", "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Community", + "audience": "Developer", "featured": false, "doNotRecord": false, - "tags": [ - "Vision", - "Ethereum for Good", - "Local Impact", - "education", - "Ethereum for Good", - "Local Impact", - "Vision" - ], "keywords": [ - "Education" + "Philosophy", + "Diversity", + "Democracy" + ], + "tags": [ + "Network State", + "Digital Sovereignty", + "Decentralization", + "diversity", + "democracy", + "philosophy", + "Decentralization", + "Digital Sovereignty", + "Network State" ], - "duration": 460, "language": "en", - "sources_swarmHash": "939368d83ac93c262094341c954469dc8618657b52baa547b809c7a1dd20759b", - "sources_youtubeId": "YgLrDeYqPaE", + "sources_swarmHash": "4478df6dd6b17283a4f2ec08a8e0f583a39783faa563f20c364a7a264546fa1a", + "sources_youtubeId": "ENttqPaOPS8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736c0c59dbb7a90e1cc98e5", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", + "speakers": [ + "fatemeh-fannizadeh" + ], "eventId": "devcon-7", - "slot_start": 1731559800000, - "slot_end": 1731560400000, + "slot_start": 1731560400000, + "slot_end": 1731561000000, "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1vrrnCLaeOKKIwa7Mc_RpUOzo-jB1B7QzDNcIzCEOrak", - "resources_slides": null, - "speakers": [ - "julio-cesar-arango" - ] + "resources_presentation": "https://docs.google.com/presentation/d/1Wi0ob1KXq6nswjq25vU56mNvitsmnOnrWaRe-gSp-3k", + "resources_slides": "https://drive.google.com/file/d/1Jl-Fpz07dhs4JpkVKy2sOiGOvWS7sdk6/view" }, "vector": [ 0, @@ -766191,48 +764050,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -766416,14 +764233,10 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, + 2, 0, 0, 0, @@ -766456,10 +764269,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -766479,6 +764288,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -766506,6 +764316,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -766563,7 +764374,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -766576,7 +764386,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -766885,6 +764694,38 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -766895,13 +764736,23 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, 2, 0, 0, @@ -766910,53 +764761,45 @@ 0, 0, 0, + 0, + 0, + 0, 0 ] }, { "session": { - "id": "the-rise-of-ai-in-web3-development-ux", - "sourceId": "LTEX8X", - "title": "The Rise of AI in Web3 Development UX", - "description": "This talk explores the intersection of artificial intelligence and Web3 technologies, highlighting how AI can enhance the development of decentralized applications and blockchain ecosystems. The presentation will provide practical examples, code snippets, and insights into Web3 AI through the lens of the recent RemixAI integration into the Remix toolset. Attendees will gain valuable knowledge on leveraging AI to build more robust, intelligent, and user-friendly decentralized applications.", - "track": "Usability", - "type": "Lightning Talk", + "id": "the-shape-of-protocols-to-come", + "sourceId": "TYGBPN", + "title": "The Shape of Protocols to Come", + "description": "Ethereum defies easy categorization—it blends aspects of money, nations, and more, yet doesn't fit neatly into any single category. To build better mental models for understanding Ethereum, we've spent the past two years stepping back and exploring the broader class it belongs to: Protocols. This talk explores the fundamental properties of protocols, strategies for navigating them, and how Ethereum can uniquely contribute to this emerging research field.", + "track": "Coordination", + "type": "Talk", "expertise": "Beginner", "audience": "Engineering", - "featured": false, + "featured": true, "doNotRecord": false, "tags": [ - "Tooling", - "User Experience", - "UI/UX", - "coding", - "generation", - "Tooling", - "UI/UX", - "User Experience" - ], - "keywords": [ - "AI Web3", - "LLM", - "Code Generation" + "Ethereum Roadmap", + "Protocol Design", + "Use Cases" ], - "duration": 512, + "keywords": [], + "duration": 1402, "language": "en", - "sources_swarmHash": "45d5cff1ad3a51e4710550ab842c2737368782c4efded4ea265d96d0a3f09a19", - "sources_youtubeId": "cAU9DswPblk", + "sources_swarmHash": "43b3f1b06406e849ea5082a4989e38e5f86d942069ea888dc8d826cab53670a5", + "sources_youtubeId": "3-9Ep6qQS3A", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735a0c79dbb7a90e1aa29ad", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735a0c79dbb7a90e1aa29ad.vtt", - "transcript_text": " yeah hello everyone my name is Stefan Tessin and I work with the Remix team and my talk today will be about the rise of AI in web pre-development area I will be much more on the application side. And yeah, regarding what the AI can actually bring to the web environment, these are actually the main application fields that AI can bring a lot to. First of all, the user experience, where we can incredibly enhance how the user actually perceives the platform. And also the decentralized governance that AI can actually have to much more incentivize this term. Also the creation of digital assets, also DAOs. And we cannot speak about AI in Web3 without talking about fraud detection and also the security aspect. So actually, enhancing the user experience is pretty much easy. Here we have an example of the Remix platform where we have code completion, code explaining. Those are pretty much low-hanging fruits for those who knows how LLM works. And yeah, this actually also already provides a lot of input for the user, a lot of value. And next stage of providing value for a user is by having automated chatbots, like also AI support, which are integrated in the platform. Here again, a remix example, where the AI chatbot is aware of what the user is doing is always one step ahead in order to kind of provide the user a much more better experience and so forth. Yeah, also the AI can be used for creating personalized content, user feeds creating also ai bots that will be a safe as oracle for contracts and also provide investment recommendation and create digital arts i wish as we have already seen with nfts and automatically generate metadata for market visibilities on different platforms without sharing the asset. Regarding the security aspect of how AI can actually improve how we develop a Web3 application, it can actually provide a much more secure development mechanism. Just think about it when you're about to deploy a contract and there's AI around it that kind of pass the contract and for vulnerabilities and check pretty much everything what goes around it. And also transaction, there's AI maybe sitting somewhere watching the blockchain out in real time and kind of flagging fraudulent transactions and so on. So the AI is sitting somewhere and kind of doing behavioral analysis and profiling everything and yeah, flag bad stuff. Okay, the last thing I will be talking about is about agents. Agents is like a software layer on top of the AI model which actually kind of perform autonomous actions and have an interaction with AI in order to get out, in order to provide base values. And those agents are actually able of kind of doing vulnerability scans, help in improving the decentralized governance, and also regarding the decentralized finance, we can automate that aspect of the Web3 environment by providing best trading pairs, best price, best algorithms for that. And also the smart contract automation that the agent can actually help with. And yeah, I think that was pretty much my talk I am very much open to questions anyone with question I see the gentleman in the very back I don't think I can throw that far. Thank you. Hello. So I had basically two questions. My first question would be regarding the Remix AI. So I actually utilize Remix a lot for contract development. So what I wanted to ask was the answers that Remix AI provides, how does Remix handle hallucination over it? The second is, Remix AI on the back end uses LLM. So is that LLM open-sourced? And what are the things on which it is trained at? Are there specific contracts on which it is trained at? Like, are there specific contracts on which it is trained at? First of all, regarding the hallucination, I think it is a problem that you cannot actually avoid with the nowadays states of development. But we aim to kind of reduce that by utilizing state-of-the-art models and to be much precise regarding also the second question that will be the open the llama model we use the latest llama model which available on human face and yeah they are deployed and our back ends and very soon they will be able as a private person to run it locally at your own risk. That's all I'll say. Another gentleman? Over there. Yes. Sorry, I'm getting... Thank you very much. Very nice box. Thank you. I hear a lot of things that a lot of people are talking like how we can do better contracts how can we do better chains and i feel that we're talking too much about uh the core layer like how to make the things run and we're not talking of how to build and use things over it like like web apps, general apps, do you think that AI can improve here too? Or will it be bringing more to the table for client part and higher level builder part? Or it will be bringing more to the table for low level, like blockchain and contracts? I think it will be much more on a high-level perspective that I will bring that value. Okay, got it. Thanks. I think we have time for one last question, if there's any. One last question. Over there. There's a gentleman there. Yes. Amazing. Hi. Hi. My name is Moritz. I'm working on 42.money. We're trying to generate front-ends for smart contracts specifically. What is your vision on Web3 generation of UI? We see a lot of big players in Web2, but in Web, there's not really competition yet. I mean, my vision or my, the way I see it is that the future there is very much bright. We are right now at the beginning of the era of AI casing the Web 3 environment. And I think it is pretty much opened. You can do pretty much everything you would like to do. Yeah. Okay, thanks. Okay.", + "sources_streamethId": "673354fa3a168eb5356c37d7", "eventId": "devcon-7", - "slot_start": 1731565200000, - "slot_end": 1731565800000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1zhCIin-EiFLgd3IrIQYnzWKZ4MmkJfeVVaweIJV7Mm0", - "resources_slides": null, + "slot_start": 1731409200000, + "slot_end": 1731411000000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/15QhPTXl4SBVPn-h9srUsdXijj_OIaZYVL1C32DxEyiw", + "resources_slides": "https://drive.google.com/file/d/1SIw7P-wt7_TCL7h30vY0ybz16D7Jr8O9/view", "speakers": [ - "stephane-tetsing" + "tim-beiko" ] }, "vector": [ @@ -766968,13 +764811,10 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, + 6, 0, 0, 0, @@ -767149,6 +764989,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -767615,7 +765456,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -767727,12 +765567,10 @@ 0, 0, 0, - 6, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -767759,6 +765597,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -767770,7 +765609,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -767792,6 +765630,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -767904,6 +765743,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -768219,8 +766059,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -768296,59 +766134,54 @@ }, { "session": { - "id": "the-rise-of-appchains-from-l2s-to-rollup-clusters", - "sourceId": "SEARYQ", - "title": "The rise of Appchains: from L2s to Rollup Clusters", - "description": "Ethereum's rollup-centric approach has led to the emergence of L2 Rollup Clusters reducing fees but creating fragmented liquidity and a less seamless user experience. Third-party bridges, though helpful, are cumbersome, vulnerable to hacks ($2B losses to date), and costly, leading to high fees. In this keynote, Alex will discuss how native interoperability, with ZK at its core, can resolve fragmentation, enabling Clusters to collaborate instead of competing for users and liquidity, ultimately dr", - "track": "Layer 2", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "the-silicon-hospital-and-a-new-way-to-make-medical-devices", + "sourceId": "D8UTDS", + "title": "The Silicon Hospital and a New Way to Make Medical Devices", + "description": "Could silicon be more effective for medical treatment than drugs someday? We think that day could be soon. Openwater has spent nearly 9 years developing new tech to treat a range of diseases. It's not pulse ox, fNIRs, HIFU or EEG ... it's new hard tech and it's open-source. We will demo the tech on stage, and share with you our clinical results to date and explain how the technology works. We expect to be in over 100 clinical trials next year.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Ethereum Roadmap", - "Appchains", - "Zero-Knowledge", - "interoperability", - "Appchains", - "Ethereum Roadmap", - "Zero-Knowledge" + "DeSci", + "Open Source Software", + "Scalability" ], "keywords": [ - "Fragmentation", - "UX", - "interoperability", - "Rollup Clusters", - "L2" + "Healthcare", + "", + "Medical" ], - "duration": 1508, + "duration": 939, "language": "en", - "sources_swarmHash": "67989f0d9198656929f445c0b928d7b40f9c288f16a42d67c8773572544bef03", - "sources_youtubeId": "HHl2iOgP4FA", + "sources_swarmHash": "", + "sources_youtubeId": "hNFQtpNHufk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673498739dbb7a90e1d3f53f", + "sources_streamethId": "6735bf739dbb7a90e1c73a9e", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735bf739dbb7a90e1c73a9e.vtt", + "transcript_text": " Tanya Cushman Reviewer:\" Peter van de Ven Hi, great to be here. Just first a brief intro to myself. I'm a hard tech pioneer in moonshots. I've created multi-billion dollar products on the hairy edge of physics. Former MIT professor, Brown University PhD in physics, and about eight years ago I started my latest project, it's called Open Water. And in rooms like these we've been building, trying to look at this idea of using the fact that our bodies can be penetrated by infrared light, ultrasound, and electromagnetics, and a belief that maybe we could leapfrog current healthcare systems and even drugs by modulating the phase of light and sound. So we've been doing that for a while. And the reason that I did it is I've shipped a lot of innovative silicon chips. So using silicon manufacturing that's coming online in consumer electronics and phase wave steering, I knew that we could steer beams of light, decode phase with interference, and even selectively select or destroy certain cells or stimulate certain cells. So we built these systems up for about four years, got pretty interesting results, filed a bunch of patents, a lot of work, and decided in early 2020 to get them into hospitals, start testing this on people. And the results are somewhat spectacular. First in stroke, large vessel occlusion stroke is the number two killer in the world. If you could diagnose that stroke in an ambulance, you could get the patient to the one of less than 5% of hospitals that could treat that stroke to the one of less than 5% of hospitals that could treat that stroke so the person wouldn't die so we built a portable that gives higher specificity and sensitivity for large vessel collision stroke than we can find published anywhere in the world that's 6.5 million lives per year now we go over to number three glial we gave some mice glioblastoma and treated them and move these mice into remission and on autopsy found that we didn't harm any healthy cells Charles River couldn't find any while we were waiting to get into humans for that we take that same device and put it into humans for severe depression and a 20-person trial and we were able to Turn off the over firing neurons in the default mode Network sending these patients into remission So with that we're pretty excited It was time to go get more money We've raised a hundred million dollars today and we're announcing today, for the first time, that Vitalik gave us $50 million from the Shiba Inu coin. Woo-hoo! Thank you. Nondilutive. And it's made all the difference. With that money, we've been able to reduce those carts from really a laser the size of a room to this that's going into production next month and those carts that were the size of the size the room as well to these to little consoles and all that's going into production next month this is a platform with a life-saving pan disease potential for not just stroke but all cardiovascular disease not just glioblastoma but all cancers not just mental disease but potentially all mental disease and beyond so open source is cool we're all believers but where this impacts the health care is even if you make hardware and software open source we open sourced all of our patents you still need approval to use it on somebody it's FDA it's regulatory approval the reality is in a comprehensive paper published recently the average cost of a novel therapeutic medical device approval is now 658 million dollars in capitalized cost and 13 years. So even if you open-source it, huh, what do you do? Well the key is here. 85% of that cost isn't in clinical trials. It's in the device. So that takes a lot out. Further, another 7% of that is in safety. So share the safety. Do we want a safer device or a less safe device? Will the regulator be able to approve a device, pan-cancer disease with lots with a hundred times more safety data will that make a choice easier or not will a doctor and patient find it easier to be able to access the safety data we think it's yes on all of those running those numbers our hypothesis is we take 653 million dollars out of the cost of developing a novel medical device with a de novo approval. So down to 5 million sub-three years. So how does this work? Phase wave modulation. Well these are the waves in light and sound. And what we do, for example, on these yellow emitters is we delay the phase from emitter to emitter so we can focus that sound near or far right or left up or down anywhere we wish to in the brain or the body so glioblastoma cells can hide out among healthy neurons and the neurosurgeon you know that you can't take them all but these cells have a mechanical property that no other cells do all aggressive cancer cells have really large nucleuses and small cytoplasms and we can exploit that like an opera singer can ping this wine glass hit that note and destroy that wine glass and harm nothing else in the room so that's what we're doing with glioblastoma. We did sonification parameter sweeps, found the frequencies. This is low-intensity ultrasound. Lower intensity than used for diagnostic reasons on pregnant women and their fetuses for the last 50 years in rich countries. But these cells can't take it. We're destroying them. And here, at a different frequency, I can tell you the frequency of our open source, this is 400 kilohertz, the other one is 150 kilohertz, we're able to turn on, stimulate, or destimulate neurons. And that's been helpful not just for non-invasive brain computer interface but also for pulling people out of mental disease addictions and so forth it's all together I think we're enabling read write brain computer interface while we're trying to solve mental diseases it's they're impossible to separate so another module this is our read module, uses that laser I just picked up that we've reduced down. This laser and a bunch of camera chips, four camera chips that are in your smartphone. And those camera chips are super small. We take the lens off and they've got, they're cheap, because they're small. They have pixel sizes, the size of the wavelength of light. So we let that light go into the head, and what we see on that camera chip is this. And it looks like the waves on the ocean, and we can read the waves on the ocean like a sailor can read the waves on the ocean and know where the fish are, know where the land is. Here we interrogate a blood vessel and the pattern changes because where the light hits the moving blood cells, it ricochets differently changing that pattern. And we can read blood flow very accurately with this way. In fact we can read it 20 times more accurately by some measures than multi-million dollar MRI or CT scanners with this laser and some camera tips. So that's not just good for stroke, but all cardiovascular disease and all micro motions of the body, we believe. So to get this far, we've been working with a whole bunch of great institutes, but everybody wants it, so that's why we shrunk the system down and decided to go into production. And also, you can build it, it's all on GitHub, you can build it yourself if you'd like to. Everyone that wants it, you can all have it, you don't need permission, go to our website, take it. I want to mention here there's also evidence we may be able to suppress cytokine storms as amy just discussed the impact of them treat inflammation even deactivate covid with the same way we're deactivating leoblastoma cells there's some evidence we're working on that in our labs and we're using our devices to do research on long COVID and microclots. We can see if we can ID them and if we can break them apart. So as we discussed, this is the problem. $658 million in 13 years. It's why we don't get any new devices. It's why healthcare... We've got 20 to 40-year innovation modes. So here's what we're doing about it we're flipping it on the side to make a open source generally general-purpose platform and the apps on top it is either treatments so those are the ones with regulatory approval three years five million dollars then we get into the call to 510 K substantial equivalents said one million, sub one year. So the question, we're a for-profit company, I hate to be crass, but people sort of scratch their heads and ask me quietly afterwards, and so I thought I'd just come out like, look, it's free as in beer, it's free as in speech, not free as in beer, like you can publish the recipe of the beer. You can go make your own at home. Most of us just go to the store to buy it. But by making more of something, it's cheaper. We enjoy a portion of that savings as profit. We engender trust. That's new. We don't charge $1,400 for an EpiPen one day. If we misstep on something, if we screw up, you can go get it made yourself. But we ensure quality standard that's IRB ready and FDA ready, we're ISO 13485 compatible, by safety sharing. We have the cost of clinical trials and double the speed for our customers. Plus, everybody just benefits. That's a cycle of innovation that's not a 20-year moat. So here are these new units. I've got them out here, LIFU and OpenMotion. All the electronics are going into the mechanicals and being tested, so they'll be ready next month. Here's more. And this brings me to a moment. We also did something just to save another $150,000 using our smartphone. And Vitalik, are you going to come up to get scanned in? So this is just in, so we prepped a little in this so this is the whole unit that's replacing that room and so if we put this on you I can actually turn it on see it so and then I will oops the strapping is being perfected it's Belka I don't think it's sticky enough. Can you? All right. So what we do, whoops, thank you for doing this. Oops. I just opened the wrong nap. Oop. So, I'm just putting in a number here, and then I start, and, Just putting in a number here and then I start and it's just saying unable to focus, okay. Try again. Okay. Okay. So I'm just taking some pictures of him with the camera. That's all you have to do is walk around and then we have to get the top of his head. Okay. That's it so it's all so we can take this off now although we should leave it on if we were actually going to sonicate him so now we scan him in and Vitalik has an amazing brain and we get to look at it right here although this is the interesting part a bit this is an MRI he did last week and what we're gonna do is pick a spot for target the interior singlet is gonna make him happy we know that and so okay with standard so these are what the pictures look like. And then this is making him 3D. And this is so we can know where the... I'll show you what this does. So here we overlay the 3D MRI with a scan so we match his bone structure. And then we look at where the transducers are and we know exactly where they are. And now we set the target and we compute what phase delays we need on the transducers and then we nail that target. I would love to sonicate him on stage but we don't have permission from the Thai government yet. However, I met somebody that got permission to do 200 people at Esalen recently. So next year, we'll do something. But yeah, this is how it works for any part in your body. And the interesting part is, as we develop our read part, the part seeing blood flow, we can get away from the MRI machine and use different scanning technology that's lower cost. So with that, this is somewhat like a smartphone moment from 20 years ago. Smartphones changed our lives. Look, a silicon hospital can happen. If you think of your smartphone, there's a dozen radios in it, four cameras and so forth. As we populate this, it can treat us for the diseases we have. Again, you might be asking, can I have one? My country, whatever. Yes, everybody can have one. Everyone has permission. You can build your own. I'll be showing these prototypes out in the hall shortly, and thank you.", "eventId": "devcon-7", - "slot_start": 1731493800000, - "slot_end": 1731495600000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1WOJXGXgVk5LDrCpMtULqypFYqyEzI5whhM4XbIRAcVA", - "resources_slides": null, + "slot_start": 1731574200000, + "slot_end": 1731575100000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1cscUxEQdkm5QVkLDeEDz09MWGMPqPDhGH5xZlEf1yRQ", + "resources_slides": "https://drive.google.com/file/d/1gFOq5_x-vCxTLqykwut0WLMaWJp8GRuF/view", "speakers": [ - "alex-gluchowski" + "mary-lou-jepsen" ] }, "vector": [ 0, + 6, 0, 0, 0, 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -769103,8 +766936,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -769189,6 +767020,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -769224,11 +767056,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -769283,7 +767117,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -769422,7 +767255,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -769480,7 +767312,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -769658,12 +767489,10 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, + 2, 0, 0, 0, @@ -769676,41 +767505,42 @@ }, { "session": { - "id": "the-role-of-culture-in-shaping-technology-the-case-for-cuteacc", - "sourceId": "LRJTXY", - "title": "The role of culture in shaping technology - the case for cute/acc", - "description": "Who builds technology and for whom? In decentralized technology, we must apply the cypherpunk ethos not only to the product we want to provide to the world but also to the manner we build that product. We must avoid imposing our worldview onto different cultures, or we risk reinventing tech neocolonialism. This talk will illustrate the risks of concentration of power and tech within our industry into the hands of a few cultures and present ways to build a truly cypherpunk future.", - "track": "Real World Ethereum", + "id": "the-state-of-web3-support-today-what-just-happened", + "sourceId": "BZRKUD", + "title": "The State of Web3 Support Today: What Just Happened?", + "description": "One of the most common and painful experiences someone can have today is also one of the most fundamental concepts we tend to take for granted - transactions. Users who seek support for their issues lack the appropriate level of information to even understand what they were doing when it all went wrong. This talk will examine why core web3 experiences are still problematic and propose things to consider when building experiences for everyone that ranges from in app UX to community support tools.", + "track": "Usability", "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Developer", + "audience": "Product", "featured": false, "doNotRecord": false, - "keywords": [ - "Philosophy", - "Diversity", - "Democracy" - ], "tags": [ - "Network State", - "Digital Sovereignty", - "Decentralization", - "diversity", - "democracy", - "philosophy", - "Decentralization", - "Digital Sovereignty", - "Network State" + "community", + "Accessibility", + "Tooling", + "User Experience" ], - "language": "en", - "speakers": [ - "fatemeh-fannizadeh" + "keywords": [ + "User Support", + "Community" ], + "duration": 304, + "language": "en", + "sources_swarmHash": "fb6714e3f29aebfbf0c0287d93a797c37483f8f4909ffb6478031e93712229e4", + "sources_youtubeId": "sur3bRJQw-U", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731560400000, - "slot_end": 1731561000000, + "slot_start": 1731408600000, + "slot_end": 1731409200000, "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1Wi0ob1KXq6nswjq25vU56mNvitsmnOnrWaRe-gSp-3k" + "resources_presentation": "https://docs.google.com/presentation/d/1jmtrpYtos5-qZy0sfliSMlhtQfUi9JSCAcTEP4C554k", + "resources_slides": "https://drive.google.com/file/d/1uJ7lYFZ6AxX4mXijjN9idjzNk1LjnYNP/view", + "speakers": [ + "fungible-taco" + ] }, "vector": [ 0, @@ -769719,6 +767549,8 @@ 0, 0, 0, + 0, + 0, 6, 0, 0, @@ -770326,7 +768158,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -770368,6 +768199,9 @@ 0, 0, 0, + 6, + 0, + 0, 0, 0, 0, @@ -770473,10 +768307,14 @@ 0, 0, 0, + 6, 0, 0, 0, 0, + 2, + 0, + 0, 0, 0, 0, @@ -770511,7 +768349,6 @@ 0, 0, 0, - 2, 0, 0, 2, @@ -770566,7 +768403,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -770594,7 +768430,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -770632,6 +768467,18 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -770974,28 +768821,6 @@ 0, 0, 0, - 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -771033,9 +768858,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -771049,37 +768874,46 @@ }, { "session": { - "id": "the-shape-of-protocols-to-come", - "sourceId": "TYGBPN", - "title": "The Shape of Protocols to Come", - "description": "Ethereum defies easy categorization—it blends aspects of money, nations, and more, yet doesn't fit neatly into any single category. To build better mental models for understanding Ethereum, we've spent the past two years stepping back and exploring the broader class it belongs to: Protocols. This talk explores the fundamental properties of protocols, strategies for navigating them, and how Ethereum can uniquely contribute to this emerging research field.", - "track": "Coordination", + "id": "the-supreme-ruler-of-the-world", + "sourceId": "TLWWCW", + "title": "The Supreme Ruler of the World", + "description": "VK rules the world. ZK rules the world, too, like a straightedge wielded with eyes closed. Rulers rule in simple ways: by lining things up and by checking they're all in line. Bring your high school math to learn straightedges called SumCheck and SumCalc and begin to appreciate ZK in simple geometric terms. No moon math. We'll visit lines, cubes and polynomials, to see how they can be used to deduce and to generate, to check and to delegate.", + "track": "Applied Cryptography", "type": "Talk", "expertise": "Beginner", "audience": "Engineering", - "featured": true, + "featured": false, "doNotRecord": false, "tags": [ - "Ethereum Roadmap", - "Protocol Design", - "Use Cases" + "Scalability", + "Validiums", + "Zero-Knowledge", + "sumcheck", + "Scalability", + "Validiums", + "Zero-Knowledge" ], - "keywords": [], - "duration": 1402, + "keywords": [ + "sumcalc", + "sumcheck" + ], + "duration": 1477, "language": "en", - "sources_swarmHash": "43b3f1b06406e849ea5082a4989e38e5f86d942069ea888dc8d826cab53670a5", - "sources_youtubeId": "3-9Ep6qQS3A", + "sources_swarmHash": "1022835086d9f6a0b9aaa7c7256f587089b8b04eaddf0f24409aae60c8908355", + "sources_youtubeId": "xJNvd6hbCdE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673354fa3a168eb5356c37d7", + "sources_streamethId": "67346c7c9dbb7a90e1c29eff", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67346c7c9dbb7a90e1c29eff.vtt", + "transcript_text": " Thank you so much for that nice introduction. I did actually go all out to try to make this the best title ever, or if not, it's at least a tribute. What I want to talk about today, just to give a level set, is Zero Knowledge, of course course this applied cryptography track it's intended and directed at people who have a little bit of knowledge about cryptography a little bit of knowledge about zero knowledge but are basically mostly developers interested in finding out why bother doing this and what is actually going on behind the scenes so without further ado I'm going to talk a little bit not too much metaphysical or mystical, but more about how do you get calculations done by powerful entities, and how do you trust that those results are actually true when they hand something to you. This is really the realm of zero knowledge. It's actually a bit broader, zero knowledge and verifiable knowledge. There's a lot of different properties that are important. They're really important to scaling things up on the blockchain, any kind of blockchain in general, and I'm sure many of you will have seen a lot of buzz around these topics as well. So what's the buzz about? How does this actually work? What is going on behind the scenes at some reasonable level? In general, I'll do a quick background. So zero-knowledge proofs or interactive proofs are ways for a computationally limited verifier, a client, to actually ask for the truth or falsity, ask for a claim to be made, and for somebody with a lot of power to give them a proof that that claim is true. So, for example, some of these problems might be complicated, like, hey, I know a Sudoku solution. And, you know, a 9x9 is not a big deal, but an nxn is actually an NP-complete problem to solve. So these are actually intensely complex problems that people work on and try to demonstrate the truth of. More practically, you might be looking at consensus algorithms and saying, hey, look, I can tell you that I checked 400,000 signatures, and they all checked out. You don't need to bother. So trust me. Well, in this setting, we don't really want to just trust people. We want to actually verify what's going on. So that's the setting. The scalability argument here is that one person is going to do the work, and everybody else can just sort of check after that one person did the work. Okay, so for example, what did the consensus say? I checked a bunch of signatures. You know, did they all pan out? What's the new state on maybe on my own chain or on somebody else's chain? That's a lot of work to do. If you try to do that work inside of a contract, it's going to be very expensive. If somebody can do that work for you outside of a contract and just prove it to you in a contract, that helps you scale. Because it's a much cheaper thing to check than it is to calculate. And so in general, these things help us scale with things like roll-ups with other chains, with exchanges, and so forth. Anytime you have a big calculation that you want to land just the result of on another chain, we'll use these kind of results. So, all right. There are two kind of very closely connected problems here. In order to motivate it, I mean, really, we're going to talk about proving a calculation here, which is the middle one. We'll look at a couple tools. One is called sum check. It's actually kind of a very old play on words. When you're sending a message from A to B, you do a check sum to make sure it wasn't actually munged in the middle. Sum check is a way of getting a calculation from A to B and making sure the calculation wasn't messed up, that the result that you get is really true. Related to this is something I'm calling sum calc, which is how do you delegate a problem to somebody else and get them to calculate it for you. You're a contract on chain. You ask somebody off chain to do the calculation. So very related math behind it. You know, and then we'll go into a little bit of the math and see what helps us scale these different algorithms. Okay. So what's really happening here, what is the supreme rule? I'll get to it more in a second. It is basically a trick that helps us look at very, very large calculations, essentially exponential-sized calculations, and use a moderate amount of checking to see whether they're true or false. You can scale this down a little bit. You can say maybe I have a human scale and step calculation, and I have a very, very tiny verifier that only uses logarithmically many steps. I won't get too far into the weeds about the complexity of the theory, but really if you wanna think about it concretely, you got a million step calculation", "eventId": "devcon-7", - "slot_start": 1731409200000, - "slot_end": 1731411000000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/15QhPTXl4SBVPn-h9srUsdXijj_OIaZYVL1C32DxEyiw", - "resources_slides": null, + "slot_start": 1731484800000, + "slot_end": 1731486600000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1IP5PshRsU2LlH33ndPmkTGZJki3mzS-uZ3M-Yc5vD6o", + "resources_slides": "https://drive.google.com/file/d/1jzTFK1TLrydMBGUwNpxJBQyWtxPkcsTv/view", "speakers": [ - "tim-beiko" + "don-beaver" ] }, "vector": [ @@ -771093,7 +768927,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -771269,12 +769102,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -771747,6 +769574,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -771849,6 +769677,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -771880,7 +769709,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -771913,7 +769741,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -771978,6 +769805,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -772026,7 +769854,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -772286,6 +770113,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -772347,6 +770175,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -772419,46 +770248,51 @@ }, { "session": { - "id": "the-silicon-hospital-and-a-new-way-to-make-medical-devices", - "sourceId": "D8UTDS", - "title": "The Silicon Hospital and a New Way to Make Medical Devices", - "description": "Could silicon be more effective for medical treatment than drugs someday? We think that day could be soon. Openwater has spent nearly 9 years developing new tech to treat a range of diseases. It's not pulse ox, fNIRs, HIFU or EEG ... it's new hard tech and it's open-source. We will demo the tech on stage, and share with you our clinical results to date and explain how the technology works. We expect to be in over 100 clinical trials next year.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "the-tension-between-mev-and-censorship-resistance-gadgets", + "sourceId": "G3MBF7", + "title": "The tension between MEV and Censorship Resistance Gadgets", + "description": "Although fairly unrelated at first glance, MEV is currently *the* bottleneck for a censorship-resistant Ethereum. This talk will first explore why MEV and censorship resistance are fundamentally counterforces. Then, we will dive into how MEV constrains the design space of censorship-resistant gadgets like Inclusion Lists and Concurrent Block Producers. What does the future of censorship resistance look like for Ethereum?", + "track": "Cryptoeconomics", + "type": "Talk", + "expertise": "Expert", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "DeSci", - "Open Source Software", - "Scalability" + "Ethereum Roadmap", + "Censorship Resistance", + "Design", + "MEV", + "protocol", + "Censorship Resistance", + "Ethereum Roadmap", + "MEV" ], "keywords": [ - "Healthcare", - "", - "Medical" + "Inclusion Lists", + "Protocol Design" ], - "duration": 939, + "duration": 1463, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "d9d78ece0ddf69b8a9645577faf0b8079b898bf69e9ec1ff922fed3e2860bd2d", + "sources_youtubeId": "4OJ1eCtEAVs", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735bf739dbb7a90e1c73a9e", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735bf739dbb7a90e1c73a9e.vtt", - "transcript_text": " Tanya Cushman Reviewer:\" Peter van de Ven Hi, great to be here. Just first a brief intro to myself. I'm a hard tech pioneer in moonshots. I've created multi-billion dollar products on the hairy edge of physics. Former MIT professor, Brown University PhD in physics, and about eight years ago I started my latest project, it's called Open Water. And in rooms like these we've been building, trying to look at this idea of using the fact that our bodies can be penetrated by infrared light, ultrasound, and electromagnetics, and a belief that maybe we could leapfrog current healthcare systems and even drugs by modulating the phase of light and sound. So we've been doing that for a while. And the reason that I did it is I've shipped a lot of innovative silicon chips. So using silicon manufacturing that's coming online in consumer electronics and phase wave steering, I knew that we could steer beams of light, decode phase with interference, and even selectively select or destroy certain cells or stimulate certain cells. So we built these systems up for about four years, got pretty interesting results, filed a bunch of patents, a lot of work, and decided in early 2020 to get them into hospitals, start testing this on people. And the results are somewhat spectacular. First in stroke, large vessel occlusion stroke is the number two killer in the world. If you could diagnose that stroke in an ambulance, you could get the patient to the one of less than 5% of hospitals that could treat that stroke to the one of less than 5% of hospitals that could treat that stroke so the person wouldn't die so we built a portable that gives higher specificity and sensitivity for large vessel collision stroke than we can find published anywhere in the world that's 6.5 million lives per year now we go over to number three glial we gave some mice glioblastoma and treated them and move these mice into remission and on autopsy found that we didn't harm any healthy cells Charles River couldn't find any while we were waiting to get into humans for that we take that same device and put it into humans for severe depression and a 20-person trial and we were able to Turn off the over firing neurons in the default mode Network sending these patients into remission So with that we're pretty excited It was time to go get more money We've raised a hundred million dollars today and we're announcing today, for the first time, that Vitalik gave us $50 million from the Shiba Inu coin. Woo-hoo! Thank you. Nondilutive. And it's made all the difference. With that money, we've been able to reduce those carts from really a laser the size of a room to this that's going into production next month and those carts that were the size of the size the room as well to these to little consoles and all that's going into production next month this is a platform with a life-saving pan disease potential for not just stroke but all cardiovascular disease not just glioblastoma but all cancers not just mental disease but potentially all mental disease and beyond so open source is cool we're all believers but where this impacts the health care is even if you make hardware and software open source we open sourced all of our patents you still need approval to use it on somebody it's FDA it's regulatory approval the reality is in a comprehensive paper published recently the average cost of a novel therapeutic medical device approval is now 658 million dollars in capitalized cost and 13 years. So even if you open-source it, huh, what do you do? Well the key is here. 85% of that cost isn't in clinical trials. It's in the device. So that takes a lot out. Further, another 7% of that is in safety. So share the safety. Do we want a safer device or a less safe device? Will the regulator be able to approve a device, pan-cancer disease with lots with a hundred times more safety data will that make a choice easier or not will a doctor and patient find it easier to be able to access the safety data we think it's yes on all of those running those numbers our hypothesis is we take 653 million dollars out of the cost of developing a novel medical device with a de novo approval. So down to 5 million sub-three years. So how does this work? Phase wave modulation. Well these are the waves in light and sound. And what we do, for example, on these yellow emitters is we delay the phase from emitter to emitter so we can focus that sound near or far right or left up or down anywhere we wish to in the brain or the body so glioblastoma cells can hide out among healthy neurons and the neurosurgeon you know that you can't take them all but these cells have a mechanical property that no other cells do all aggressive cancer cells have really large nucleuses and small cytoplasms and we can exploit that like an opera singer can ping this wine glass hit that note and destroy that wine glass and harm nothing else in the room so that's what we're doing with glioblastoma. We did sonification parameter sweeps, found the frequencies. This is low-intensity ultrasound. Lower intensity than used for diagnostic reasons on pregnant women and their fetuses for the last 50 years in rich countries. But these cells can't take it. We're destroying them. And here, at a different frequency, I can tell you the frequency of our open source, this is 400 kilohertz, the other one is 150 kilohertz, we're able to turn on, stimulate, or destimulate neurons. And that's been helpful not just for non-invasive brain computer interface but also for pulling people out of mental disease addictions and so forth it's all together I think we're enabling read write brain computer interface while we're trying to solve mental diseases it's they're impossible to separate so another module this is our read module, uses that laser I just picked up that we've reduced down. This laser and a bunch of camera chips, four camera chips that are in your smartphone. And those camera chips are super small. We take the lens off and they've got, they're cheap, because they're small. They have pixel sizes, the size of the wavelength of light. So we let that light go into the head, and what we see on that camera chip is this. And it looks like the waves on the ocean, and we can read the waves on the ocean like a sailor can read the waves on the ocean and know where the fish are, know where the land is. Here we interrogate a blood vessel and the pattern changes because where the light hits the moving blood cells, it ricochets differently changing that pattern. And we can read blood flow very accurately with this way. In fact we can read it 20 times more accurately by some measures than multi-million dollar MRI or CT scanners with this laser and some camera tips. So that's not just good for stroke, but all cardiovascular disease and all micro motions of the body, we believe. So to get this far, we've been working with a whole bunch of great institutes, but everybody wants it, so that's why we shrunk the system down and decided to go into production. And also, you can build it, it's all on GitHub, you can build it yourself if you'd like to. Everyone that wants it, you can all have it, you don't need permission, go to our website, take it. I want to mention here there's also evidence we may be able to suppress cytokine storms as amy just discussed the impact of them treat inflammation even deactivate covid with the same way we're deactivating leoblastoma cells there's some evidence we're working on that in our labs and we're using our devices to do research on long COVID and microclots. We can see if we can ID them and if we can break them apart. So as we discussed, this is the problem. $658 million in 13 years. It's why we don't get any new devices. It's why healthcare... We've got 20 to 40-year innovation modes. So here's what we're doing about it we're flipping it on the side to make a open source generally general-purpose platform and the apps on top it is either treatments so those are the ones with regulatory approval three years five million dollars then we get into the call to 510 K substantial equivalents said one million, sub one year. So the question, we're a for-profit company, I hate to be crass, but people sort of scratch their heads and ask me quietly afterwards, and so I thought I'd just come out like, look, it's free as in beer, it's free as in speech, not free as in beer, like you can publish the recipe of the beer. You can go make your own at home. Most of us just go to the store to buy it. But by making more of something, it's cheaper. We enjoy a portion of that savings as profit. We engender trust. That's new. We don't charge $1,400 for an EpiPen one day. If we misstep on something, if we screw up, you can go get it made yourself. But we ensure quality standard that's IRB ready and FDA ready, we're ISO 13485 compatible, by safety sharing. We have the cost of clinical trials and double the speed for our customers. Plus, everybody just benefits. That's a cycle of innovation that's not a 20-year moat. So here are these new units. I've got them out here, LIFU and OpenMotion. All the electronics are going into the mechanicals and being tested, so they'll be ready next month. Here's more. And this brings me to a moment. We also did something just to save another $150,000 using our smartphone. And Vitalik, are you going to come up to get scanned in? So this is just in, so we prepped a little in this so this is the whole unit that's replacing that room and so if we put this on you I can actually turn it on see it so and then I will oops the strapping is being perfected it's Belka I don't think it's sticky enough. Can you? All right. So what we do, whoops, thank you for doing this. Oops. I just opened the wrong nap. Oop. So, I'm just putting in a number here, and then I start, and, Just putting in a number here and then I start and it's just saying unable to focus, okay. Try again. Okay. Okay. So I'm just taking some pictures of him with the camera. That's all you have to do is walk around and then we have to get the top of his head. Okay. That's it so it's all so we can take this off now although we should leave it on if we were actually going to sonicate him so now we scan him in and Vitalik has an amazing brain and we get to look at it right here although this is the interesting part a bit this is an MRI he did last week and what we're gonna do is pick a spot for target the interior singlet is gonna make him happy we know that and so okay with standard so these are what the pictures look like. And then this is making him 3D. And this is so we can know where the... I'll show you what this does. So here we overlay the 3D MRI with a scan so we match his bone structure. And then we look at where the transducers are and we know exactly where they are. And now we set the target and we compute what phase delays we need on the transducers and then we nail that target. I would love to sonicate him on stage but we don't have permission from the Thai government yet. However, I met somebody that got permission to do 200 people at Esalen recently. So next year, we'll do something. But yeah, this is how it works for any part in your body. And the interesting part is, as we develop our read part, the part seeing blood flow, we can get away from the MRI machine and use different scanning technology that's lower cost. So with that, this is somewhat like a smartphone moment from 20 years ago. Smartphones changed our lives. Look, a silicon hospital can happen. If you think of your smartphone, there's a dozen radios in it, four cameras and so forth. As we populate this, it can treat us for the diseases we have. Again, you might be asking, can I have one? My country, whatever. Yes, everybody can have one. Everyone has permission. You can build your own. I'll be showing these prototypes out in the hall shortly, and thank you.", + "sources_streamethId": "6736f4bb1b0f83434d475cef", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731574200000, - "slot_end": 1731575100000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1cscUxEQdkm5QVkLDeEDz09MWGMPqPDhGH5xZlEf1yRQ", - "resources_slides": null, + "slot_start": 1731641400000, + "slot_end": 1731643200000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1q6BQXCGubElt47T2cCMmisWZixsWRezzeO8I3FiONPU", + "resources_slides": "https://drive.google.com/file/d/1i5SzPIVMPh8S46n_z8uvZ66Vo5JCmXBs/view", "speakers": [ - "mary-lou-jepsen" + "julian-ma" ] }, "vector": [ + 0, 0, 6, 0, @@ -773066,6 +770900,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -773116,7 +770951,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -773208,6 +771042,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -773308,7 +771143,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -773344,7 +771178,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -773355,6 +771188,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -773398,6 +771232,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -773592,13 +771427,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -773777,13 +771606,12 @@ 0, 2, 0, + 2, 0, 0, 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -773795,42 +771623,45 @@ }, { "session": { - "id": "the-state-of-web3-support-today-what-just-happened", - "sourceId": "BZRKUD", - "title": "The State of Web3 Support Today: What Just Happened?", - "description": "One of the most common and painful experiences someone can have today is also one of the most fundamental concepts we tend to take for granted - transactions. Users who seek support for their issues lack the appropriate level of information to even understand what they were doing when it all went wrong. This talk will examine why core web3 experiences are still problematic and propose things to consider when building experiences for everyone that ranges from in app UX to community support tools.", - "track": "Usability", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "the-three-transitions-cross-chain-smart-wallets-with-privacy", + "sourceId": "JESAHN", + "title": "The Three Transitions: Cross-Chain Smart Wallets with Privacy", + "description": "Last year, Vitalik outlined [\"The Three Transitions\"](https://vitalik.eth.limo/general/2023/06/09/three_transitions.html) ahead for the Ethereum stack: moving to L2s, smart wallets, and private transactions. The Base team has built [Keyspace](https://docs.key.space/), a cross-chain keystore that helps all wallets makes these transitions. Come learn about how Keyspace works and how Keyspace helps smart wallets sync signers and send private transactions in a multichain world.", + "track": "Layer 2", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "community", - "Accessibility", - "Tooling", - "User Experience" - ], "keywords": [ - "User Support", - "Community" + "Wallets" + ], + "tags": [ + "Zk Rollups", + "Cross-L2", + "Account Abstraction", + "wallet", + "Account Abstraction", + "Cross-L2", + "Zk Rollups" ], - "duration": 304, "language": "en", - "sources_swarmHash": "fb6714e3f29aebfbf0c0287d93a797c37483f8f4909ffb6478031e93712229e4", - "sources_youtubeId": "sur3bRJQw-U", + "sources_swarmHash": "2dbb90931765bca295153542c6c3dc5980889b7e4a1c0368fba93fac384be971", + "sources_youtubeId": "DibVD2gCyp8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731408600000, - "slot_end": 1731409200000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1jmtrpYtos5-qZy0sfliSMlhtQfUi9JSCAcTEP4C554k", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "fungible-taco" - ] + "niran-babalola" + ], + "eventId": "devcon-7", + "slot_start": 1731472200000, + "slot_end": 1731474000000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/12qgh9Oa6U7CvGBkNUiXG-L-E0qYKLqahhOhkZATUF_Q", + "resources_slides": "https://drive.google.com/file/d/1gxh22rMqmwQSB9UgDPjQN2pJKV2WBr-B/view" }, "vector": [ 0, @@ -773840,7 +771671,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -774491,17 +772321,8 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -774600,12 +772421,10 @@ 0, 0, 0, - 6, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -774662,6 +772481,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -774760,47 +772580,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -774833,6 +772612,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -775143,18 +772923,60 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, 2, 0, 0, @@ -775164,51 +772986,54 @@ 0, 0, 0, + 0, + 0, + 0, + 0, 0 ] }, { "session": { - "id": "the-supreme-ruler-of-the-world", - "sourceId": "TLWWCW", - "title": "The Supreme Ruler of the World", - "description": "VK rules the world. ZK rules the world, too, like a straightedge wielded with eyes closed. Rulers rule in simple ways: by lining things up and by checking they're all in line. Bring your high school math to learn straightedges called SumCheck and SumCalc and begin to appreciate ZK in simple geometric terms. No moon math. We'll visit lines, cubes and polynomials, to see how they can be used to deduce and to generate, to check and to delegate.", - "track": "Applied Cryptography", - "type": "Talk", - "expertise": "Beginner", - "audience": "Engineering", + "id": "the-trustless-trade-supply-chain", + "sourceId": "RQZADG", + "title": "The Trustless Trade Supply Chain", + "description": "Trades are fundamental to defi. Without credibly neutral trade execution – we risk the same centralisation and rent extraction through privileged actors that we have in tradfi.\r\n\r\nToday, the trade supply chain in defi is mostly centralised: Intent auctions, builders, solvers and market makers are handful of off-chain actors with privileged access.\r\n\r\nHowever, a trustless, and decentralised trade supply chain is possible. This talk highlights the current and future technologies that make it possible.", + "track": "Real World Ethereum", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Scalability", - "Validiums", - "Zero-Knowledge", - "sumcheck", - "Scalability", - "Validiums", - "Zero-Knowledge" + "PBS", + "MEV", + "Trading", + "Intents", + "TEE", + "Intents", + "MEV", + "PBS", + "Trading" ], "keywords": [ - "sumcalc", - "sumcheck" + "TEE" ], - "duration": 1477, + "duration": 460, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "8eddb90eeded5ff214a45d5bdf580280a4d8a2356f2f3614fcd3ea3f15d1049a", + "sources_youtubeId": "9EPCog8GiiQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67346c7c9dbb7a90e1c29eff", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67346c7c9dbb7a90e1c29eff.vtt", - "transcript_text": " Thank you so much for that nice introduction. I did actually go all out to try to make this the best title ever, or if not, it's at least a tribute. What I want to talk about today, just to give a level set, is Zero Knowledge, of course course this applied cryptography track it's intended and directed at people who have a little bit of knowledge about cryptography a little bit of knowledge about zero knowledge but are basically mostly developers interested in finding out why bother doing this and what is actually going on behind the scenes so without further ado I'm going to talk a little bit not too much metaphysical or mystical, but more about how do you get calculations done by powerful entities, and how do you trust that those results are actually true when they hand something to you. This is really the realm of zero knowledge. It's actually a bit broader, zero knowledge and verifiable knowledge. There's a lot of different properties that are important. They're really important to scaling things up on the blockchain, any kind of blockchain in general, and I'm sure many of you will have seen a lot of buzz around these topics as well. So what's the buzz about? How does this actually work? What is going on behind the scenes at some reasonable level? In general, I'll do a quick background. So zero-knowledge proofs or interactive proofs are ways for a computationally limited verifier, a client, to actually ask for the truth or falsity, ask for a claim to be made, and for somebody with a lot of power to give them a proof that that claim is true. So, for example, some of these problems might be complicated, like, hey, I know a Sudoku solution. And, you know, a 9x9 is not a big deal, but an nxn is actually an NP-complete problem to solve. So these are actually intensely complex problems that people work on and try to demonstrate the truth of. More practically, you might be looking at consensus algorithms and saying, hey, look, I can tell you that I checked 400,000 signatures, and they all checked out. You don't need to bother. So trust me. Well, in this setting, we don't really want to just trust people. We want to actually verify what's going on. So that's the setting. The scalability argument here is that one person is going to do the work, and everybody else can just sort of check after that one person did the work. Okay, so for example, what did the consensus say? I checked a bunch of signatures. You know, did they all pan out? What's the new state on maybe on my own chain or on somebody else's chain? That's a lot of work to do. If you try to do that work inside of a contract, it's going to be very expensive. If somebody can do that work for you outside of a contract and just prove it to you in a contract, that helps you scale. Because it's a much cheaper thing to check than it is to calculate. And so in general, these things help us scale with things like roll-ups with other chains, with exchanges, and so forth. Anytime you have a big calculation that you want to land just the result of on another chain, we'll use these kind of results. So, all right. There are two kind of very closely connected problems here. In order to motivate it, I mean, really, we're going to talk about proving a calculation here, which is the middle one. We'll look at a couple tools. One is called sum check. It's actually kind of a very old play on words. When you're sending a message from A to B, you do a check sum to make sure it wasn't actually munged in the middle. Sum check is a way of getting a calculation from A to B and making sure the calculation wasn't messed up, that the result that you get is really true. Related to this is something I'm calling sum calc, which is how do you delegate a problem to somebody else and get them to calculate it for you. You're a contract on chain. You ask somebody off chain to do the calculation. So very related math behind it. You know, and then we'll go into a little bit of the math and see what helps us scale these different algorithms. Okay. So what's really happening here, what is the supreme rule? I'll get to it more in a second. It is basically a trick that helps us look at very, very large calculations, essentially exponential-sized calculations, and use a moderate amount of checking to see whether they're true or false. You can scale this down a little bit. You can say maybe I have a human scale and step calculation, and I have a very, very tiny verifier that only uses logarithmically many steps. I won't get too far into the weeds about the complexity of the theory, but really if you wanna think about it concretely, you got a million step calculation", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731484800000, - "slot_end": 1731486600000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1IP5PshRsU2LlH33ndPmkTGZJki3mzS-uZ3M-Yc5vD6o", - "resources_slides": null, + "slot_start": 1731410400000, + "slot_end": 1731411000000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1ZpnW0qJAIFrezIxxeweffstYIWJbW-4Aa1uhy79go6A", + "resources_slides": "https://drive.google.com/file/d/1Fx5_1U9978oSlaXO94gXfVLqxzdK9eT0/view", "speakers": [ - "don-beaver" + "markus" ] }, "vector": [ @@ -775218,10 +773043,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -775871,12 +773692,11 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -775967,6 +773787,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -775975,7 +773796,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -776004,6 +773824,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -776019,6 +773840,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -776045,6 +773867,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -776103,7 +773926,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -776296,6 +774118,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -776412,7 +774235,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -776475,7 +774297,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -776525,10 +774346,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 2, 0, @@ -776542,52 +774363,57 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "the-tension-between-mev-and-censorship-resistance-gadgets", - "sourceId": "G3MBF7", - "title": "The tension between MEV and Censorship Resistance Gadgets", - "description": "Although fairly unrelated at first glance, MEV is currently *the* bottleneck for a censorship-resistant Ethereum. This talk will first explore why MEV and censorship resistance are fundamentally counterforces. Then, we will dive into how MEV constrains the design space of censorship-resistant gadgets like Inclusion Lists and Concurrent Block Producers. What does the future of censorship resistance look like for Ethereum?", - "track": "Cryptoeconomics", + "id": "the-verge-is-not-going-to-break-your-contracts", + "sourceId": "NJXNE3", + "title": "The verge is (not) going to break your contracts!", + "description": "The verge is comming, and with it a new pricing model for storage. This breaks many assumption that compilers have been doing for years. We'll see how part and future contracts are going to be affected, and what design should be favored in anticipation of the verge.", + "track": "Developer Experience", "type": "Talk", "expertise": "Expert", - "audience": "Research", + "audience": "Developper", "featured": false, "doNotRecord": false, - "keywords": [ - "Inclusion Lists", - "Protocol Design" - ], "tags": [ - "Ethereum Roadmap", - "Censorship Resistance", - "Design", - "MEV", - "protocol", - "Censorship Resistance", - "Ethereum Roadmap", - "MEV" + "Verkle trees", + "Libraries", + "Best Practices", + "compilers", + "Best Practices", + "Libraries", + "Verkle trees" ], - "language": "en", - "speakers": [ - "julian-ma" + "keywords": [ + "compiler" ], + "duration": 1140, + "language": "en", + "sources_swarmHash": "", + "sources_youtubeId": "SAhp3LgbMYo ", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "", + "transcript_text": "", "eventId": "devcon-7", - "slot_start": 1731641400000, - "slot_end": 1731643200000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1q6BQXCGubElt47T2cCMmisWZixsWRezzeO8I3FiONPU" + "slot_start": 1731492000000, + "slot_end": 1731493800000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1qXCj-zxWc3N3cgUT-kq17kAdjRXdLfCUoe5VGTpy0TE", + "resources_slides": "https://drive.google.com/file/d/1p_t4oRwZR0YvwZMVfL4JO7tTbgI06sAf/view", + "speakers": [ + "hadrien-croubois" + ] }, "vector": [ 0, 0, - 6, - 0, 0, + 6, 0, 0, 0, @@ -777192,7 +775018,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -777233,6 +775058,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -777336,7 +775162,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -777344,6 +775169,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -777363,6 +775189,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -777477,12 +775304,10 @@ 0, 0, 0, - 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -777561,6 +775386,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -777722,10 +775548,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -777902,7 +775724,6 @@ 0, 2, 0, - 2, 0, 0, 0, @@ -777910,6 +775731,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -777919,37 +775741,49 @@ }, { "session": { - "id": "the-three-transitions-cross-chain-smart-wallets-with-privacy", - "sourceId": "JESAHN", - "title": "The Three Transitions: Cross-Chain Smart Wallets with Privacy", - "description": "Last year, Vitalik outlined [\"The Three Transitions\"](https://vitalik.eth.limo/general/2023/06/09/three_transitions.html) ahead for the Ethereum stack: moving to L2s, smart wallets, and private transactions. The Base team has built [Keyspace](https://docs.key.space/), a cross-chain keystore that helps all wallets makes these transitions. Come learn about how Keyspace works and how Keyspace helps smart wallets sync signers and send private transactions in a multichain world.", - "track": "Layer 2", + "id": "the-verifiability-vision", + "sourceId": "KXRMGY", + "title": "The verifiability vision", + "description": "Imagine all data was guaranteed to be correct. We could build a trustworthy digital world based only on correct data. In this presentation, we will sketch layers and techniques that can realize this dream, in particular proof carrying data and succinct proofs. We will also discuss the connection to the proof singularity vision for Ethereum as well as highlight caveats that apply; humanity is still in the early stages of the journey and there are obstacles and constraints to tackle", + "track": "Applied Cryptography", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, - "keywords": [ - "Wallets" - ], "tags": [ - "Zk Rollups", - "Cross-L2", - "Account Abstraction", - "wallet", - "Account Abstraction", - "Cross-L2", - "Zk Rollups" + "Scalability", + "Vision", + "ZKP", + "proof", + "succinct", + "Scalability", + "Vision", + "ZKP" ], - "language": "en", - "speakers": [ - "niran-babalola" + "keywords": [ + "Verifiability", + "proof carrying data", + "succinct proofs" ], + "duration": 1670, + "language": "en", + "sources_swarmHash": "", + "sources_youtubeId": "5l6XY2lX244", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735d5949dbb7a90e184c02b", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735dbd09dbb7a90e1646d41.vtt", + "transcript_text": " Okay, and we're live. Welcome, everybody. My name's Andrew. I'm with Xerox PARC. And I'm here to talk to you a little bit about pods. How many here have been using pods this week? I think the answer should be everyone because you got into the building somehow and that actually required one. So what are pods? So your devs con ticket is a pod. The proof of attendance to this talk that you can claim from the Q&A app that's up on the screen there right now. If you're in the room, I don't think this works remotely. That's also a pod. Some of you have been playing Frog Crypto this week. I see some frog hats out there. All those frogs are pods. A pod really can be anything. It can be a secret message. It can be your identity credentials. It could be your driver's license if we get any governments involved to actually do this. If we get any governments involved to actually do this it can it's cryptographic data of any sort So what is the pod framework? So pod is is a technology a framework that makes it easy for apps to issue cryptographic data and to make zk proofs about that Data, it's a data format that's optimized for fish improving It's a standard of how that data format can be sent around and things can be proven about it And it's a framework of how that data format can be sent around and things can be proven about it. And it's a framework with some developer SDKs. Check out our documentation site. I'll have a link at the end if you want to try it out. It's mostly in TypeScript, but can be used on other platforms as well. We have ports in a few other languages. So I'm hoping some of you will get some use out of it. So one last WTF is zero-knowledge proofs. How many people here have used ZK proofs before? I feel like you understand them. Okay, a few. It's kind of obscure technology. That's kind of the point of pods is to make it easier to use so you don't have to understand how the underlying math works. But in brief, a ZK proof lets you prove the validity of any private data or any computation on your private data without revealing that private data itself. And that proof is trustworthy because there's some math that basically you can only calculate if you did it validly. At Xerox PARC, we think of ZKProofs as a universal cryptographic adapter. Basically, I've got lots of different kinds of private data. By doing computations on that data in a verifiable way, I can present to somebody whatever I want that is validly proven from that data. The example in this diagram, which you'll find in our blog post, is like, what if I could calculate my own credit score from signed data I got from my bank or from the IRS? I don't need to ask a credit reporting company to gather all this stuff together. I can gather it myself, and I can make a provable statement about what my credit course score is and apply for a loan. This is part of the vision of something we call the Parknet, programmable cryptography internet, which we think is going to be much better once programmable cryptography catches on in all of these ways. ZK proofs are a big part of this, but it's only the beginning. See the other talks being given by my colleagues this week. Also, we have a whole day CLS tomorrow about programmable cryptography. But today we're going to be focused on ZK proofs and what pods let you do with them. So this is the pod ecosystem that we envision. You need issuers who are issuing this cryptographic data. They're mostly using a private key to sign it. Those takes the form of attestations, which users themselves can hold on to. They hold on to their own private data. They don't need an intermediary. At some point, some consumer asks the user, please prove something about yourself, your identity, your age, the fact that you went to DevCon, things like that. And then the consumer can generate a ZK proof and send, sorry, the user can send the ZK proof to the consumer who can verify that proof. They do need that third arrow in the diagram, which is a little bit of knowledge about who the attester is. You need at the very least to know that that attester has a public key that you should trust. There might also be things like what is the event ID that represents DevCon on a ticket, things like that. But that kind of completes the diagram. Okay. So why are we doing this? So I work with the team that builds ZooPass. You've all been using that to check into DevCon. And we believe that the best learning on this kind of technology comes from contact with reality, meaning we want real users to try this. We want to do it at scale. There are 12,000 people at DevCon this week who are stress-testing ZooPass for us. Thank you. I'm sorry if the performance has not always been as great, but it seems to be standing up. And we want to use these opportunities to onboard new users by bridging data that is not ZK-friendly into our ZK-friendly world and take advantage of people who are willing to be early adopters like the crypto community. So by bridging, what I mean is we're bringing data in the red boxes on this diagram Into the green world red in my diagrams of this talk and the next one means like non zk friendly systems Whereas green means ek friendly systems we can bridge it in we can then issue it like your devcon ticket Which is loaded from a database that isn't cryptographically signed And then you can the verifiers can get you into another system like telegram in order to join the the DEVCON chat group. All that is working today. In order to bring this in front of the most users, we do have to accept some constraints. So we're not using the most cutting edge of ZK technologies. We want everyone to be able to use it, which means we've built a mobile friendly web app. Which means everything we do has to run in a browser on a phone. Even an older phone, even on a bad network when the Wi-Fi is overloaded at the conference. So that became a bit of a mantra when I was building some of these technologies. There's a lot of cool ZK technology out there that is great, but it needs to run on a big back-end server and I don't have one of those when I'm in a browser on a phone. So we've got to use tried and true technologies. For people who are in the know, we use SIRCOM and GROSS16. You may or may not have heard of those, but that's kind of the underlying technology. They've been around for quite a few years, so they're pretty well battle tested at this point. So I want to talk a little bit about the systems we built along the way. So this is what ZooPass ticketing looked like a year ago at DevConnect when we were in Istanbul. So it's the same triangle that you've seen here. We were pulling data out of Precix and issuing the tickets. We used a format called an EDDSA ticket. That's a signed piece of data, but it's not a pod, which I'll explain a little bit later. And then we had a proving circuit where you could prove your ticket, you could reveal some fields, you can prove that you owned it, etc. So what did it take us to build this? Don't pay attention to all the details here, but look at the line counts on these PRs when we wrote these things. It's pretty large. That's quite a few lines of code that it took. And in just the ZK proof, there's about 15,000 lines of code that are still there, not including tests and documentation. So it's kind of complicated. So that was the first thing we built. The second thing we built was Frog Crypto, the first version last year, which used a very similar data format. So frogs were issued by the server as what was called an EDDSA frog, very similar format to tickets, and then you could make a proof about it, you could present it to our Zucat telegram bot who would let you into the secret frog holders chat. This all happened last year in Istanbul. So what did it take to build that? It turns out it was very similar. There was a lot of duplication of effort. There was a lot of similar patterns, but you couldn't actually reuse the underlying data. So there clearly is a pattern here, right? We want to issue some signed data. We want someone to request a proof and then to be given a proof of that signed data, we want someone to request a proof and then to be given a proof of that signed data, but it turned out that each time we had to build it, we had to rewrite a whole bunch of code in order to customize it. So I'm an engineer, I don't like this kind of complexity, I'd rather do things once because I'm lazy. So why is this so hard? So the signed data part, the EDSA PCD that we were using as our underlying data format. Used as a fixed size hash, it hashes 16 numbers in an array. And therefore, every new data type that we wanted to put in there, we had to do some custom coding to decide how those numbers in that array go together to make this data type. I would analogize this to imagine you were processing all your data in a hex editor directly as bytes. It's kind of inconvenient. We have better tools than that now. And on the proof side, ZK circuits are a little bit awkward to program. Like they don't use a normal programming model. You don't write it in a language you're used to. Every variable is what's called a field element. This is a mathematical concept. It's a very big number, modulo some big prime number, and you've got to like write equations on those field elements. So it's kind of complicated. And also once you build a ZK circuit, it's very fixed. In order for the prover and verifier to agree on what's valid, the circuit can't change very much. You have to publish a whole new circuit. So that makes this a bit hard. I would analogize this, again, to in the hardware world, this is like an ASIC. It's a chip that does one thing. It In the hardware world, this is like an ASIC. It's a chip that does one thing. It might do it very well, but it still only does one thing, and every time you want to do another thing, you've got to build a whole new chip. It's kind of inconvenient. So what do we need here? Well, what we'd really like to have is what's called a ZKVM. Basically, if you have an ASIC and you want something more general, why don't you use a out there that lets you basically write code, run it inside of a ZK circuit and validate that this is the correct output. It's great. Some other people are giving talks about it this week. But unfortunately for our situation, it's a little bit too much. Like I said, my mantra has to work in a browser on a phone. ZK VMs are pretty big right now. You're not going to be able to do that on an older phone in a few seconds. So we have to do something a little bit more limited than that. But again, I'm an engineer, I like working within constraints and coming up with clever solutions. So here's what we came up with. So on the data side, I'm finally gonna explain to you what a pod is at some level. So a pod is just a collection of names and values. Think of it like a JSON object, except that it's flat. There's no hierarchy of nested objects, just names and values. It can have multiple data types in it for those values. The data is then cryptographically signed in a way that makes it easy to make proofs about it. And I'm going to get into more of that a little bit later. Also, I forgot to mention this at the beginning. We are having a deep dive session after this intro session. So stick around for that if you want lots more detail. But I'll give you what I can in the next 15 minutes. On the proof side, we also can generalize. So we have what we call a general purpose circuit, which means rather than having a fully CPU-like circuit in a ZKVM or having the ASIC fixed circuit, we can do something in between. I would analogize it more to an FPGA. We've got some logic blocks. We call them modules. You can feed in some inputs to your circuit in order to decide how those logic blocks are connected to each other and make a different proof every time using the same circuit. We call this framework GPC for general purpose circuit. And in addition to the circuits individually being configurable, we precompile a set of circuits in what we call a family at different sizes with different sets of modules. So when you want to make a proof, you can pick the circuit in the family that has enough modules for what you want and not any more because having a bigger circuit means more time to prove, more memory, etc. So you can make the right trade-offs there. So with that, we get the generalized version of the ZK ecosystem where every issuer is issuing pods. They might contain very different kinds of data. It might be a frog, it might be a driver's license, but it's still a pod. And then when you make proofs about it, you can freely decide what you want to prove and write a configuration to represent that proof. So with that in mind, at this point, what is a pod? So a pod is a data format that makes zkproofs easy. It's a key value store. It's going to be hashed and signed in a very specific way involving a Merkle tree, which I can explain more of later. And it's optimized for efficiency zkproving. Here's an example of a pod. So we've got some names and values. Most of these are very straightforward, so I'm not going to go through them all in detail. The one that's maybe a little bit interesting is the cardholder. So this is meant to look like a driver's license in some fictional country. The cardholder is my semaphore ID. This is what Zupass uses to identify you. It's really a public-private key pair. So the public key is what's going to go in the pod to say that this is my pod, or in this case, this is my driver's license. What you see on the right is the JSON format for this. It's optimized to be a little bit terse and also human readable. So things that don't need a type annotation, you'll notice don't have them because the JSON type itself is enough data for that. Once because the JSON type itself is enough data for that. Once you get down to actually building the Merkle tree, like everything does have a type, but in this table I call them type hints because the type is not part of the cryptographic data. Instead, it is guidance to how do I hash this data into a piece of cryptographically verifiable data. More on that later. So the first thing I do to make this into a pod is I build a Merkle tree tree i'm not going to go into detail on that but basically you arrange the elements into a tree you hash them all together until you get to a root and that root is what we call a content id the content id is derived from the data so if you have the same data you can derive the same content id regardless of how it was formatted in json one detail that you might notice on the right is that the names have been alphabetized. That's how we make sure that it is deterministic and you always get the same content ID. But everything else is just hashing. And then now once I've got the content ID, that's the thing that I sign. So if I'm an issuer and I want to issue a pod, first I get the data, I Merkle-ize it, I get a content ID, and then I just write a signature on that content ID, and that's enough to validate that the entire pod is valid. So we have a ZK-friendly data format. We'd probably like to do some ZK proving on it. So let's talk about the GPC side of this that is what lets you do that. As I mentioned earlier, GPCs are circuits made of reusable modules, as well as a family of multiple circuits so you can pick the size that you want. Let's look at what that looks like. So this is an example of a GPC configuration. This is how you say, what do I want to prove? And you're gonna present this as this JSON object that says what you wanna prove, and the system is gonna do the rest rest compiling this down to what to do with the circuit so here's a very minimal proof i'm going to try and prove that i have a driver's license that says i'm allowed to drive right so i my configuration says i have a pod i'm going to call it id card this is actually an arbitrary name that's just part of the configuration to refer to it later it has some entries and one of those entries is driver that is not not an arbitrary name. That's a name that was in the pod and is going to be hashed and checked. And what do I want to do with it? Well, I want to reveal it. So is reveal is true means this is a proof. It's going to prove that I have a pod, that it contains this entry, and it's going to reveal that its value is hopefully true because I'm going to try and drive a car. So that's simple enough. There's one detail that wasn't on the previous slide. That's because it's done by default, so I didn't need to include it in the config, but it's important to talk about. What I proved if I don't have, think about the signup key, is I just proved that I have a pod containing the word driver with the value true. That doesn't mean it's actually a driver's license. In order to do that, you've got to do something cryptographic. So the easiest way to do that is you check that the pod was signed by a public key that is well known. That might be the government of California, which is where I live. Hopefully we'll get them to issue pods eventually. But that is implicit. The signing key is also always revealed by default, but you can choose to not reveal it if you want to, in which case you can constrain it in other ways. You might constrain it to be equal to some other element without actually revealing it or constrain it to be a member of a list like maybe i have a list of all the signing keys of the 50 u.s states and i just want to prove i have a driver's license from one of them i don't want to tell you which one okay let's get straight and get a little bit more complicated um so i've proven that i have a driver's license that says driver equals true. I haven't actually proven that it's my driver's license yet. I could have stolen somebody else's. The thing is that pods, because they're just data, they are transferable. I can copy them. The way we make a pod bound to a single user is by putting that user's public key in it, which I showed earlier when we were looking at the entries. And the way you prove that you are that user is you make a proof that you hold the private key that corresponds to that public key. And the way you say that in the gpcconfig is this is owner ID field. You say is owner ID, and I give the type of public key I'm using, which is semaphore version 4 from our friends at PSE. And that basically means that this proof is going to be configured to check that I have the right private key in my private inputs. And in this case, it's not even going to reveal what my public key is, just that I own this pod and this pod says I can drive. Okay, let's get to a little bit more ZK and hiding some more data. Instead of proving that I'm a driver, what if I just want to prove I'm over 21? Maybe I want to go buy some alcohol. I don't know what the age is in Thailand, but back home it's 21. So I can just say I have a pod containing an entry called date of birth. That entry is not going to be revealed, but it's going to be in this range, and that's the numeric range for the date that is 21 years ago. We should make this more friendly and let you just pass in a date object, but for now it's a number. So this is a proof that I am over 21 and that I own this pod. I didn't take out that field, but everything else is not revealed and I'm being very anonymous. One last example, we can make proofs of multiple pods at once if we have a circuit with enough modules. So here's one that I'm proving I'm over 21 and also proving that I have a ticket to an event that maybe I'm going to go to an after party after DevCon. And in this case, the ticket, I'm proving that its attendee name is the same as the name in my driver's license. I'm proving that I own it and I'm also proving that the event ID of that ticket is in a valid list. I'm not revealing what I have a ticket to, but it's maybe a list of like DevCon related events that are happening in Thailand this week. So this is kind of a minimal anonymous way of checking into a party. Of course, if I'm there in person, I'm revealing some more about myself by being there, but you get the idea. Okay. So last piece of this, I've now configured my proof. I've decided what I want to prove How do I actually make a proof and all of this is an example of what you can do with the the GPC libraries So the three things I need in order to make a proof one of them is the proof config that I've already given you some examples of The second thing is the inputs. That's the actual pods Which I need to have in order to make proofs about them There are also other inputs like my private key or like that list of valid event IDs that I want to prove that my event ID is one of. Those are all inputs. The third thing I have to feed in is something called an artifact path. That is, where do we find the binaries that know how to generate this circuit? So when a ZK circuit is compiled, it generates a proving key, a verification key, and also a witness generator. Don't worry about what those are, but there's some like big binary things that the prover and verifier have to agree with. We distribute these via NPM. We also put them on various CDNs. You can download them. So you have to just decide for your app. Are you going to download them, put them on disk, give a path to them? Are you going to download them from a URL? There are options. give a path to them, are you going to download them from a URL, there are options. Once you've got these things together, the gpc proof function will generate the proof. It puts together that configuration, it picks a circuit that fits that configuration with enough modules, it downloads the corresponding artifacts for that circuit, and it generates the proof. And then the last thing it does, oh, I should have gone to the next slide, here we go. So it needs to compile down all those inputs into circuit signals that can feed into the actual ZK circuit, which are mathematical field elements, as I mentioned. And then after it's done and it gets a valid proof, it will decompile some of the outputs and turn them into what's called the revealed claims object. So it comes out of a proof. You've got the actual claims object. So it comes out of a proof. You've got the actual mathematical proof. That's just opaque numbers that are needed by the verifier. That's the actual ZK part. You've got a bound config, which is exactly like the configuration that you fed in, except that now it contains the identifier of the circuit that was selected so that the verifier knows how to verify it correctly. And then you've got the revealed claims. If I revealed that I am a licensed driver, driver equals true, that would be in this object. If I revealed my name, et cetera, that would be here. And that's what the decompiling is for. It's taking the circuit outputs and turning them back into a string or whatever the representative thing is. Okay, so those three things are exactly what I should send to a verifier, whoever I'm gonna prove this to. They need those three things. They also need an artifact path to download the corresponding verification key. And then they can verify the proof. They just do very much the same thing. They're going to compile some of those inputs back down into ZK land where there are circuit signals. They're going to verify the proof and they're going to say yes or no, whether it's valid. And, you know, gravy, we're at the end and hopefully everything went right and I've proven what I wanted to prove to you. So final takeaways, summary of what this was a bit of a speed run through. So pods are data that's designed to be proven about. Any pod is a signed attestation of something, whether it's I have a ticket, whether it's I have a driver's license, etc. GPCs allow you to flexibly make proofs about those pods by using modular circuits, which can be configured using a JSON-like configuration language. And the system will auto-select the circuit that you need depending on your configuration. So all your app needs to do is say, please make me a proof of this with these inputs and everything else is handled for you. Then the last step is the verifier verifies the proof, and then the apps do have to decide what things they trust. How do you trust that this is the correct proof? Like I alluded to before, you should check that this ID card was actually signed by the government. You should know the public key or you should check that this ID card was actually signed by the government. You should know the public key or you should know the event ID for DevCon. You should also check, and I'll say a little more about this in the deep dive, that the configuration that was sent to you was actually the configuration you asked for. So you don't want the prover to say, oh, I have a proof of something, but not necessarily the thing you asked for. That's something that you should check as well. But once you do all of that, this end-to-end should be very solid and you should be getting the information you need. Okay. That's it for the speedrun intro. Please check out our documentation. They're on pod.org that just went live yesterday. And also there's a link that just went by, t.me slash zoo pass to join the telegram group. And yeah, let's go do some Q&A. All right. Where do you store the sound for identity secret for users in Zupass? So that's all client side. Zupass stores all of your private data client side. The Zupass server is aware of your public key because that's how it can make sure that you get issued the right Devcon tickets and things like that But yeah, zoo pass is a client-side cryptographic data manager To what extent is pod an open data standard, so I consider it open we haven't like published a spec for it I should work on that but all of our code is open source, so people can do interoperability with it. The pod format itself is very generic and interoperable. It's the GPC compiler that turns a pod into the specifics of what you need to prove with a specific GPC. So the GPCs are kind of less standard and generic, though they also could be used on multiple platforms. We do have an example of GPC verification on chain that just started working a couple days ago, so all that is possible outside of a browser, but we don't have as many examples there as we do on the pod data. Can we scroll down? Is there anything more? Can you compare pod to verifiable credential? Yes. This is something I looked into. Pod is simpler. It doesn't really have a fixed schema or anything that ties it into a specific standard. You could put JSON-LD-based verifiable credential data in a pod if you wanted to. But a pod is much more flexible. At the cryptographic level, there is a difference in the kind of Merkle tree we use. The pod uses the lean IMT, which is something that Semaphore created, which is much shallower because pods tend to be relatively small, as opposed to the sparse Merkle tree that is used, at least for the implementation of verifiable credentials that I'm aware of, which is the one from IDEN3. That is a much deeper Merkle tree, but it can do things like prove absence of an entry, which pods can't do. Okay. What else do we have? How frequent is pod refresh? Very frequent so far, but we're hoping to keep it much more stable after DevCon. I don't have a strong answer to that. What else? How do you convert JSON to a Merkle tree? Please stick around for the deep dive session that's coming up. I'll tell you all about that. What else? Yeah. So, the—in the example of prover and verifier, the user's device can generate the proof and that's why everything has to work in a browser on a phone. Client side proving is definitely the default in ZooPass. Not every app has to do it. These are libraries. You can call them wherever you want. There's much more difference between verifiers, whether they're doing server side verification or client side verification. That depends what your use case is and what you're protecting against Are the issued credentials signed and the proof that the crunch loops we scrolled away We do not use BSS signatures to verify partial properties, that's what we use the Merkel tree for again more details on that coming up Is it possible to make information in ZooPass portable? I think that pods do make that possible, yes, as long as it's a pod and there are APIs for getting data out of ZooPass if you want to. That's called the ZAPI, at which point you can take this to whatever platform you want. We have implementations of pods in Python, C, and Rust for various projects, so it's not too hard to do. How do apps know whether a proof from a verifier is legit? Well, the framework tells you that it is a valid proof. And it will confirm for you that this configuration and these revealed claims and this proof match up and are valid. So the prover couldn't have cheated about that. What they could cheat about is app level semantics. So if you ask for a proof of a driver's license and I sent you a proof of a frog instead, that's something that the framework can't tell you because it just says that's a valid proof. So you do have to check, is that the configure I asked for? Is the signer of this driver's license the government, etc.? But yeah, that's the kind of level of verification we got. Okay. I think that's it. Can we go back to the slides briefly? Okay. Those of you who are collecting frogs, I've got something for you if we can switch back to my slides. Oh, yeah. We'll leave that up for a minute or two. I think we've got like three minutes before the next session starts anyway. So feel free to frog away. Okay. And as I said, we're going to go straight into a deep dive session, which is going to be 90 minutes. We probably won't use the whole thing, but that's what we're scheduled for. So stick around if you want more details to answer any of those questions.", "eventId": "devcon-7", - "slot_start": 1731472200000, - "slot_end": 1731474000000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/12qgh9Oa6U7CvGBkNUiXG-L-E0qYKLqahhOhkZATUF_Q" + "slot_start": 1731578400000, + "slot_end": 1731580200000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1D13mwNG569Eo7vRzSRs1BRHF7sCXAys5mnZEJpklwtg", + "resources_slides": "https://drive.google.com/file/d/19LsUDgMe98h3PEuB2Yx7Qe2oaaE7ikY0/view", + "speakers": [ + "jens-groth" + ] }, "vector": [ 0, @@ -777959,10 +775793,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -778754,9 +776588,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -778772,13 +776603,13 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -778843,6 +776674,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -778903,7 +776735,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -778927,6 +776758,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -778935,6 +776767,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -779213,11 +777046,7 @@ 0, 0, 0, - 0, - 0, - 0, - 2, - 0, + 2, 0, 0, 0, @@ -779270,8 +777099,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -779288,45 +777117,43 @@ }, { "session": { - "id": "the-trustless-trade-supply-chain", - "sourceId": "RQZADG", - "title": "The Trustless Trade Supply Chain", - "description": "Trades are fundamental to defi. Without credibly neutral trade execution – we risk the same centralisation and rent extraction through privileged actors that we have in tradfi.\r\n\r\nToday, the trade supply chain in defi is mostly centralised: Intent auctions, builders, solvers and market makers are handful of off-chain actors with privileged access.\r\n\r\nHowever, a trustless, and decentralised trade supply chain is possible. This talk highlights the current and future technologies that make it possible.", - "track": "Real World Ethereum", - "type": "Lightning Talk", + "id": "the-verkle-advantage", + "sourceId": "YLBEZN", + "title": "The verkle advantage", + "description": "This talk provides a comprehensive overview of the achievements by the stateless development effort, over the past year. It will explore some of the discoveries we made while implementing verkle trees, that improve the user and developer experience of Ethereum.", + "track": "Core Protocol", + "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "PBS", - "MEV", - "Trading", - "Intents", - "TEE", - "Intents", - "MEV", - "PBS", - "Trading" + "Core Protocol", + "Protocol Design", + "Verkle trees", + "stateless", + "Core Protocol", + "Protocol Design", + "Verkle trees" ], "keywords": [ - "TEE" + "stateless" ], - "duration": 460, + "duration": 1543, "language": "en", - "sources_swarmHash": "8eddb90eeded5ff214a45d5bdf580280a4d8a2356f2f3614fcd3ea3f15d1049a", - "sources_youtubeId": "9EPCog8GiiQ", + "sources_swarmHash": "5a0b9f1615e20eb0e9597edb51957d0bf0f2f906610c445999fac2dd23a18440", + "sources_youtubeId": "f0e3ulrO9Ik", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673471199dbb7a90e107f5eb", "eventId": "devcon-7", - "slot_start": 1731410400000, - "slot_end": 1731411000000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1ZpnW0qJAIFrezIxxeweffstYIWJbW-4Aa1uhy79go6A", - "resources_slides": null, + "slot_start": 1731488400000, + "slot_end": 1731490200000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1zs9ePGkdyS7IfCoOeK_dArKiELQYjDXk5L-A70d7Gf4", + "resources_slides": "https://drive.google.com/file/d/1NEivVAmjpzZFmsI8R1DFTzcQZOwB4pgC/view", "speakers": [ - "markus" + "guillaume-ballet" ] }, "vector": [ @@ -779334,8 +777161,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -779991,11 +777816,9 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -780083,7 +777906,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -780102,6 +777924,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -780120,7 +777943,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -780129,6 +777951,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -780136,7 +777959,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -780163,7 +777985,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -780277,6 +778098,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -780415,7 +778237,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -780597,6 +778418,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -780648,7 +778470,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -780661,63 +778482,64 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "the-verge-is-not-going-to-break-your-contracts", - "sourceId": "NJXNE3", - "title": "The verge is (not) going to break your contracts!", - "description": "The verge is comming, and with it a new pricing model for storage. This breaks many assumption that compilers have been doing for years. We'll see how part and future contracts are going to be affected, and what design should be favored in anticipation of the verge.", - "track": "Developer Experience", + "id": "the-wallet-and-ux-stack-to-build-web3-applications-for-the-masses", + "sourceId": "LCNEGW", + "title": "The Wallet and UX Stack to Build Web3 Applications for the Masses", + "description": "In this talk I will give an overview of how wallet infrastructure and the relationship between wallets and dapps have evolved over the past 5 years. And give a layer-by-layer breakdown of the modern wallet stack from signers to smart account modules, how each component contributes to a UX unlock on Ethereum/L2s, and how application developers can use them today. We will also touch on pertinent ongoing EIPs such as 7702 (deploy code for EOAs), and 7715 (permissions).", + "track": "Usability", "type": "Talk", - "expertise": "Expert", - "audience": "Developper", + "expertise": "Intermediate", + "audience": "Product", "featured": false, "doNotRecord": false, - "tags": [ - "Verkle trees", - "Libraries", - "Best Practices", - "compilers", - "Best Practices", - "Libraries", - "Verkle trees" - ], "keywords": [ - "compiler" + "Wallets", + "Signers", + "Permissions" + ], + "tags": [ + "Developer Infrastructure", + "User Experience", + "Account Abstraction", + "permissions", + "Account Abstraction", + "Developer Infrastructure", + "User Experience" ], - "duration": 1140, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "55d5707ff2452cbecbf3c9de175bda79205405d45aa4a8242a116659c8ea8838", + "sources_youtubeId": "5vkjN9LKsiw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734801d9dbb7a90e1be4da3", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731492000000, - "slot_end": 1731493800000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1qXCj-zxWc3N3cgUT-kq17kAdjRXdLfCUoe5VGTpy0TE", - "resources_slides": null, "speakers": [ - "hadrien-croubois" - ] + "nichanan-kesonpat" + ], + "eventId": "devcon-7", + "slot_start": 1731470400000, + "slot_end": 1731472200000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1EwxJbkAW9PZZpjRozkPVAnLaQpoQZm7uf1kolnUFM_0", + "resources_slides": "https://drive.google.com/file/d/13VNvonmWR8H1GLFUkHCdztpesOJNWrVP/view" }, "vector": [ 0, 0, 0, - 6, - 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -781358,7 +779180,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -781372,6 +779193,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -781470,10 +779292,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -781490,7 +779312,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -781509,8 +779330,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -781652,7 +779475,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -781687,7 +779509,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -781972,6 +779793,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -782018,6 +779840,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -782034,59 +779857,67 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "the-verifiability-vision", - "sourceId": "KXRMGY", - "title": "The verifiability vision", - "description": "Imagine all data was guaranteed to be correct. We could build a trustworthy digital world based only on correct data. In this presentation, we will sketch layers and techniques that can realize this dream, in particular proof carrying data and succinct proofs. We will also discuss the connection to the proof singularity vision for Ethereum as well as highlight caveats that apply; humanity is still in the early stages of the journey and there are obstacles and constraints to tackle", - "track": "Applied Cryptography", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "the-wellbeing-protocol-scaling-localism", + "sourceId": "HC3QGN", + "title": "The Wellbeing Protocol - Scaling Localism", + "description": "Imagine a world where:\r\n - hyper-local marginalised communities could create impact DAOs as easily as creating FB groups\r\n - we could create a UI that abstracted the complexity of quadratic / conviction / delegated voting to create a continuous resource allocation alternative to governance\r\n - funders could stream money into millions of these treasuries\r\n\r\nFind out how this New Zealand government funded project, now running trials in three countries, is creating a network of grassroots changemakers.", + "track": "Real World Ethereum", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, - "tags": [ - "Scalability", - "Vision", - "ZKP", - "proof", - "succinct", - "Scalability", - "Vision", - "ZKP" - ], "keywords": [ - "Verifiability", - "proof carrying data", - "succinct proofs" + "conviction", + "zealand" + ], + "tags": [ + "DAO", + "Governance", + "Quadratic Voting", + "Collective Intelligence", + "Conviction", + "Ethereum for Good", + "Public good", + "Climate", + "ReFi", + "Regenerative Applications", + "User Experience", + "zealand", + "Climate", + "Collective Intelligence", + "Conviction", + "DAO", + "Ethereum for Good", + "Governance", + "Public good", + "Quadratic Voting", + "ReFi", + "Regenerative Applications", + "User Experience" ], - "duration": 1670, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "262eb3ab3f23b0ae9b773e2fc4e01025258988511f1fd7cd530249bccbf39dba", + "sources_youtubeId": "doOQJRqTzSs", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735d5949dbb7a90e184c02b", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735dbd09dbb7a90e1646d41.vtt", - "transcript_text": " Okay, and we're live. Welcome, everybody. My name's Andrew. I'm with Xerox PARC. And I'm here to talk to you a little bit about pods. How many here have been using pods this week? I think the answer should be everyone because you got into the building somehow and that actually required one. So what are pods? So your devs con ticket is a pod. The proof of attendance to this talk that you can claim from the Q&A app that's up on the screen there right now. If you're in the room, I don't think this works remotely. That's also a pod. Some of you have been playing Frog Crypto this week. I see some frog hats out there. All those frogs are pods. A pod really can be anything. It can be a secret message. It can be your identity credentials. It could be your driver's license if we get any governments involved to actually do this. If we get any governments involved to actually do this it can it's cryptographic data of any sort So what is the pod framework? So pod is is a technology a framework that makes it easy for apps to issue cryptographic data and to make zk proofs about that Data, it's a data format that's optimized for fish improving It's a standard of how that data format can be sent around and things can be proven about it And it's a framework of how that data format can be sent around and things can be proven about it. And it's a framework with some developer SDKs. Check out our documentation site. I'll have a link at the end if you want to try it out. It's mostly in TypeScript, but can be used on other platforms as well. We have ports in a few other languages. So I'm hoping some of you will get some use out of it. So one last WTF is zero-knowledge proofs. How many people here have used ZK proofs before? I feel like you understand them. Okay, a few. It's kind of obscure technology. That's kind of the point of pods is to make it easier to use so you don't have to understand how the underlying math works. But in brief, a ZK proof lets you prove the validity of any private data or any computation on your private data without revealing that private data itself. And that proof is trustworthy because there's some math that basically you can only calculate if you did it validly. At Xerox PARC, we think of ZKProofs as a universal cryptographic adapter. Basically, I've got lots of different kinds of private data. By doing computations on that data in a verifiable way, I can present to somebody whatever I want that is validly proven from that data. The example in this diagram, which you'll find in our blog post, is like, what if I could calculate my own credit score from signed data I got from my bank or from the IRS? I don't need to ask a credit reporting company to gather all this stuff together. I can gather it myself, and I can make a provable statement about what my credit course score is and apply for a loan. This is part of the vision of something we call the Parknet, programmable cryptography internet, which we think is going to be much better once programmable cryptography catches on in all of these ways. ZK proofs are a big part of this, but it's only the beginning. See the other talks being given by my colleagues this week. Also, we have a whole day CLS tomorrow about programmable cryptography. But today we're going to be focused on ZK proofs and what pods let you do with them. So this is the pod ecosystem that we envision. You need issuers who are issuing this cryptographic data. They're mostly using a private key to sign it. Those takes the form of attestations, which users themselves can hold on to. They hold on to their own private data. They don't need an intermediary. At some point, some consumer asks the user, please prove something about yourself, your identity, your age, the fact that you went to DevCon, things like that. And then the consumer can generate a ZK proof and send, sorry, the user can send the ZK proof to the consumer who can verify that proof. They do need that third arrow in the diagram, which is a little bit of knowledge about who the attester is. You need at the very least to know that that attester has a public key that you should trust. There might also be things like what is the event ID that represents DevCon on a ticket, things like that. But that kind of completes the diagram. Okay. So why are we doing this? So I work with the team that builds ZooPass. You've all been using that to check into DevCon. And we believe that the best learning on this kind of technology comes from contact with reality, meaning we want real users to try this. We want to do it at scale. There are 12,000 people at DevCon this week who are stress-testing ZooPass for us. Thank you. I'm sorry if the performance has not always been as great, but it seems to be standing up. And we want to use these opportunities to onboard new users by bridging data that is not ZK-friendly into our ZK-friendly world and take advantage of people who are willing to be early adopters like the crypto community. So by bridging, what I mean is we're bringing data in the red boxes on this diagram Into the green world red in my diagrams of this talk and the next one means like non zk friendly systems Whereas green means ek friendly systems we can bridge it in we can then issue it like your devcon ticket Which is loaded from a database that isn't cryptographically signed And then you can the verifiers can get you into another system like telegram in order to join the the DEVCON chat group. All that is working today. In order to bring this in front of the most users, we do have to accept some constraints. So we're not using the most cutting edge of ZK technologies. We want everyone to be able to use it, which means we've built a mobile friendly web app. Which means everything we do has to run in a browser on a phone. Even an older phone, even on a bad network when the Wi-Fi is overloaded at the conference. So that became a bit of a mantra when I was building some of these technologies. There's a lot of cool ZK technology out there that is great, but it needs to run on a big back-end server and I don't have one of those when I'm in a browser on a phone. So we've got to use tried and true technologies. For people who are in the know, we use SIRCOM and GROSS16. You may or may not have heard of those, but that's kind of the underlying technology. They've been around for quite a few years, so they're pretty well battle tested at this point. So I want to talk a little bit about the systems we built along the way. So this is what ZooPass ticketing looked like a year ago at DevConnect when we were in Istanbul. So it's the same triangle that you've seen here. We were pulling data out of Precix and issuing the tickets. We used a format called an EDDSA ticket. That's a signed piece of data, but it's not a pod, which I'll explain a little bit later. And then we had a proving circuit where you could prove your ticket, you could reveal some fields, you can prove that you owned it, etc. So what did it take us to build this? Don't pay attention to all the details here, but look at the line counts on these PRs when we wrote these things. It's pretty large. That's quite a few lines of code that it took. And in just the ZK proof, there's about 15,000 lines of code that are still there, not including tests and documentation. So it's kind of complicated. So that was the first thing we built. The second thing we built was Frog Crypto, the first version last year, which used a very similar data format. So frogs were issued by the server as what was called an EDDSA frog, very similar format to tickets, and then you could make a proof about it, you could present it to our Zucat telegram bot who would let you into the secret frog holders chat. This all happened last year in Istanbul. So what did it take to build that? It turns out it was very similar. There was a lot of duplication of effort. There was a lot of similar patterns, but you couldn't actually reuse the underlying data. So there clearly is a pattern here, right? We want to issue some signed data. We want someone to request a proof and then to be given a proof of that signed data, we want someone to request a proof and then to be given a proof of that signed data, but it turned out that each time we had to build it, we had to rewrite a whole bunch of code in order to customize it. So I'm an engineer, I don't like this kind of complexity, I'd rather do things once because I'm lazy. So why is this so hard? So the signed data part, the EDSA PCD that we were using as our underlying data format. Used as a fixed size hash, it hashes 16 numbers in an array. And therefore, every new data type that we wanted to put in there, we had to do some custom coding to decide how those numbers in that array go together to make this data type. I would analogize this to imagine you were processing all your data in a hex editor directly as bytes. It's kind of inconvenient. We have better tools than that now. And on the proof side, ZK circuits are a little bit awkward to program. Like they don't use a normal programming model. You don't write it in a language you're used to. Every variable is what's called a field element. This is a mathematical concept. It's a very big number, modulo some big prime number, and you've got to like write equations on those field elements. So it's kind of complicated. And also once you build a ZK circuit, it's very fixed. In order for the prover and verifier to agree on what's valid, the circuit can't change very much. You have to publish a whole new circuit. So that makes this a bit hard. I would analogize this, again, to in the hardware world, this is like an ASIC. It's a chip that does one thing. It In the hardware world, this is like an ASIC. It's a chip that does one thing. It might do it very well, but it still only does one thing, and every time you want to do another thing, you've got to build a whole new chip. It's kind of inconvenient. So what do we need here? Well, what we'd really like to have is what's called a ZKVM. Basically, if you have an ASIC and you want something more general, why don't you use a out there that lets you basically write code, run it inside of a ZK circuit and validate that this is the correct output. It's great. Some other people are giving talks about it this week. But unfortunately for our situation, it's a little bit too much. Like I said, my mantra has to work in a browser on a phone. ZK VMs are pretty big right now. You're not going to be able to do that on an older phone in a few seconds. So we have to do something a little bit more limited than that. But again, I'm an engineer, I like working within constraints and coming up with clever solutions. So here's what we came up with. So on the data side, I'm finally gonna explain to you what a pod is at some level. So a pod is just a collection of names and values. Think of it like a JSON object, except that it's flat. There's no hierarchy of nested objects, just names and values. It can have multiple data types in it for those values. The data is then cryptographically signed in a way that makes it easy to make proofs about it. And I'm going to get into more of that a little bit later. Also, I forgot to mention this at the beginning. We are having a deep dive session after this intro session. So stick around for that if you want lots more detail. But I'll give you what I can in the next 15 minutes. On the proof side, we also can generalize. So we have what we call a general purpose circuit, which means rather than having a fully CPU-like circuit in a ZKVM or having the ASIC fixed circuit, we can do something in between. I would analogize it more to an FPGA. We've got some logic blocks. We call them modules. You can feed in some inputs to your circuit in order to decide how those logic blocks are connected to each other and make a different proof every time using the same circuit. We call this framework GPC for general purpose circuit. And in addition to the circuits individually being configurable, we precompile a set of circuits in what we call a family at different sizes with different sets of modules. So when you want to make a proof, you can pick the circuit in the family that has enough modules for what you want and not any more because having a bigger circuit means more time to prove, more memory, etc. So you can make the right trade-offs there. So with that, we get the generalized version of the ZK ecosystem where every issuer is issuing pods. They might contain very different kinds of data. It might be a frog, it might be a driver's license, but it's still a pod. And then when you make proofs about it, you can freely decide what you want to prove and write a configuration to represent that proof. So with that in mind, at this point, what is a pod? So a pod is a data format that makes zkproofs easy. It's a key value store. It's going to be hashed and signed in a very specific way involving a Merkle tree, which I can explain more of later. And it's optimized for efficiency zkproving. Here's an example of a pod. So we've got some names and values. Most of these are very straightforward, so I'm not going to go through them all in detail. The one that's maybe a little bit interesting is the cardholder. So this is meant to look like a driver's license in some fictional country. The cardholder is my semaphore ID. This is what Zupass uses to identify you. It's really a public-private key pair. So the public key is what's going to go in the pod to say that this is my pod, or in this case, this is my driver's license. What you see on the right is the JSON format for this. It's optimized to be a little bit terse and also human readable. So things that don't need a type annotation, you'll notice don't have them because the JSON type itself is enough data for that. Once because the JSON type itself is enough data for that. Once you get down to actually building the Merkle tree, like everything does have a type, but in this table I call them type hints because the type is not part of the cryptographic data. Instead, it is guidance to how do I hash this data into a piece of cryptographically verifiable data. More on that later. So the first thing I do to make this into a pod is I build a Merkle tree tree i'm not going to go into detail on that but basically you arrange the elements into a tree you hash them all together until you get to a root and that root is what we call a content id the content id is derived from the data so if you have the same data you can derive the same content id regardless of how it was formatted in json one detail that you might notice on the right is that the names have been alphabetized. That's how we make sure that it is deterministic and you always get the same content ID. But everything else is just hashing. And then now once I've got the content ID, that's the thing that I sign. So if I'm an issuer and I want to issue a pod, first I get the data, I Merkle-ize it, I get a content ID, and then I just write a signature on that content ID, and that's enough to validate that the entire pod is valid. So we have a ZK-friendly data format. We'd probably like to do some ZK proving on it. So let's talk about the GPC side of this that is what lets you do that. As I mentioned earlier, GPCs are circuits made of reusable modules, as well as a family of multiple circuits so you can pick the size that you want. Let's look at what that looks like. So this is an example of a GPC configuration. This is how you say, what do I want to prove? And you're gonna present this as this JSON object that says what you wanna prove, and the system is gonna do the rest rest compiling this down to what to do with the circuit so here's a very minimal proof i'm going to try and prove that i have a driver's license that says i'm allowed to drive right so i my configuration says i have a pod i'm going to call it id card this is actually an arbitrary name that's just part of the configuration to refer to it later it has some entries and one of those entries is driver that is not not an arbitrary name. That's a name that was in the pod and is going to be hashed and checked. And what do I want to do with it? Well, I want to reveal it. So is reveal is true means this is a proof. It's going to prove that I have a pod, that it contains this entry, and it's going to reveal that its value is hopefully true because I'm going to try and drive a car. So that's simple enough. There's one detail that wasn't on the previous slide. That's because it's done by default, so I didn't need to include it in the config, but it's important to talk about. What I proved if I don't have, think about the signup key, is I just proved that I have a pod containing the word driver with the value true. That doesn't mean it's actually a driver's license. In order to do that, you've got to do something cryptographic. So the easiest way to do that is you check that the pod was signed by a public key that is well known. That might be the government of California, which is where I live. Hopefully we'll get them to issue pods eventually. But that is implicit. The signing key is also always revealed by default, but you can choose to not reveal it if you want to, in which case you can constrain it in other ways. You might constrain it to be equal to some other element without actually revealing it or constrain it to be a member of a list like maybe i have a list of all the signing keys of the 50 u.s states and i just want to prove i have a driver's license from one of them i don't want to tell you which one okay let's get straight and get a little bit more complicated um so i've proven that i have a driver's license that says driver equals true. I haven't actually proven that it's my driver's license yet. I could have stolen somebody else's. The thing is that pods, because they're just data, they are transferable. I can copy them. The way we make a pod bound to a single user is by putting that user's public key in it, which I showed earlier when we were looking at the entries. And the way you prove that you are that user is you make a proof that you hold the private key that corresponds to that public key. And the way you say that in the gpcconfig is this is owner ID field. You say is owner ID, and I give the type of public key I'm using, which is semaphore version 4 from our friends at PSE. And that basically means that this proof is going to be configured to check that I have the right private key in my private inputs. And in this case, it's not even going to reveal what my public key is, just that I own this pod and this pod says I can drive. Okay, let's get to a little bit more ZK and hiding some more data. Instead of proving that I'm a driver, what if I just want to prove I'm over 21? Maybe I want to go buy some alcohol. I don't know what the age is in Thailand, but back home it's 21. So I can just say I have a pod containing an entry called date of birth. That entry is not going to be revealed, but it's going to be in this range, and that's the numeric range for the date that is 21 years ago. We should make this more friendly and let you just pass in a date object, but for now it's a number. So this is a proof that I am over 21 and that I own this pod. I didn't take out that field, but everything else is not revealed and I'm being very anonymous. One last example, we can make proofs of multiple pods at once if we have a circuit with enough modules. So here's one that I'm proving I'm over 21 and also proving that I have a ticket to an event that maybe I'm going to go to an after party after DevCon. And in this case, the ticket, I'm proving that its attendee name is the same as the name in my driver's license. I'm proving that I own it and I'm also proving that the event ID of that ticket is in a valid list. I'm not revealing what I have a ticket to, but it's maybe a list of like DevCon related events that are happening in Thailand this week. So this is kind of a minimal anonymous way of checking into a party. Of course, if I'm there in person, I'm revealing some more about myself by being there, but you get the idea. Okay. So last piece of this, I've now configured my proof. I've decided what I want to prove How do I actually make a proof and all of this is an example of what you can do with the the GPC libraries So the three things I need in order to make a proof one of them is the proof config that I've already given you some examples of The second thing is the inputs. That's the actual pods Which I need to have in order to make proofs about them There are also other inputs like my private key or like that list of valid event IDs that I want to prove that my event ID is one of. Those are all inputs. The third thing I have to feed in is something called an artifact path. That is, where do we find the binaries that know how to generate this circuit? So when a ZK circuit is compiled, it generates a proving key, a verification key, and also a witness generator. Don't worry about what those are, but there's some like big binary things that the prover and verifier have to agree with. We distribute these via NPM. We also put them on various CDNs. You can download them. So you have to just decide for your app. Are you going to download them, put them on disk, give a path to them? Are you going to download them from a URL? There are options. give a path to them, are you going to download them from a URL, there are options. Once you've got these things together, the gpc proof function will generate the proof. It puts together that configuration, it picks a circuit that fits that configuration with enough modules, it downloads the corresponding artifacts for that circuit, and it generates the proof. And then the last thing it does, oh, I should have gone to the next slide, here we go. So it needs to compile down all those inputs into circuit signals that can feed into the actual ZK circuit, which are mathematical field elements, as I mentioned. And then after it's done and it gets a valid proof, it will decompile some of the outputs and turn them into what's called the revealed claims object. So it comes out of a proof. You've got the actual claims object. So it comes out of a proof. You've got the actual mathematical proof. That's just opaque numbers that are needed by the verifier. That's the actual ZK part. You've got a bound config, which is exactly like the configuration that you fed in, except that now it contains the identifier of the circuit that was selected so that the verifier knows how to verify it correctly. And then you've got the revealed claims. If I revealed that I am a licensed driver, driver equals true, that would be in this object. If I revealed my name, et cetera, that would be here. And that's what the decompiling is for. It's taking the circuit outputs and turning them back into a string or whatever the representative thing is. Okay, so those three things are exactly what I should send to a verifier, whoever I'm gonna prove this to. They need those three things. They also need an artifact path to download the corresponding verification key. And then they can verify the proof. They just do very much the same thing. They're going to compile some of those inputs back down into ZK land where there are circuit signals. They're going to verify the proof and they're going to say yes or no, whether it's valid. And, you know, gravy, we're at the end and hopefully everything went right and I've proven what I wanted to prove to you. So final takeaways, summary of what this was a bit of a speed run through. So pods are data that's designed to be proven about. Any pod is a signed attestation of something, whether it's I have a ticket, whether it's I have a driver's license, etc. GPCs allow you to flexibly make proofs about those pods by using modular circuits, which can be configured using a JSON-like configuration language. And the system will auto-select the circuit that you need depending on your configuration. So all your app needs to do is say, please make me a proof of this with these inputs and everything else is handled for you. Then the last step is the verifier verifies the proof, and then the apps do have to decide what things they trust. How do you trust that this is the correct proof? Like I alluded to before, you should check that this ID card was actually signed by the government. You should know the public key or you should check that this ID card was actually signed by the government. You should know the public key or you should know the event ID for DevCon. You should also check, and I'll say a little more about this in the deep dive, that the configuration that was sent to you was actually the configuration you asked for. So you don't want the prover to say, oh, I have a proof of something, but not necessarily the thing you asked for. That's something that you should check as well. But once you do all of that, this end-to-end should be very solid and you should be getting the information you need. Okay. That's it for the speedrun intro. Please check out our documentation. They're on pod.org that just went live yesterday. And also there's a link that just went by, t.me slash zoo pass to join the telegram group. And yeah, let's go do some Q&A. All right. Where do you store the sound for identity secret for users in Zupass? So that's all client side. Zupass stores all of your private data client side. The Zupass server is aware of your public key because that's how it can make sure that you get issued the right Devcon tickets and things like that But yeah, zoo pass is a client-side cryptographic data manager To what extent is pod an open data standard, so I consider it open we haven't like published a spec for it I should work on that but all of our code is open source, so people can do interoperability with it. The pod format itself is very generic and interoperable. It's the GPC compiler that turns a pod into the specifics of what you need to prove with a specific GPC. So the GPCs are kind of less standard and generic, though they also could be used on multiple platforms. We do have an example of GPC verification on chain that just started working a couple days ago, so all that is possible outside of a browser, but we don't have as many examples there as we do on the pod data. Can we scroll down? Is there anything more? Can you compare pod to verifiable credential? Yes. This is something I looked into. Pod is simpler. It doesn't really have a fixed schema or anything that ties it into a specific standard. You could put JSON-LD-based verifiable credential data in a pod if you wanted to. But a pod is much more flexible. At the cryptographic level, there is a difference in the kind of Merkle tree we use. The pod uses the lean IMT, which is something that Semaphore created, which is much shallower because pods tend to be relatively small, as opposed to the sparse Merkle tree that is used, at least for the implementation of verifiable credentials that I'm aware of, which is the one from IDEN3. That is a much deeper Merkle tree, but it can do things like prove absence of an entry, which pods can't do. Okay. What else do we have? How frequent is pod refresh? Very frequent so far, but we're hoping to keep it much more stable after DevCon. I don't have a strong answer to that. What else? How do you convert JSON to a Merkle tree? Please stick around for the deep dive session that's coming up. I'll tell you all about that. What else? Yeah. So, the—in the example of prover and verifier, the user's device can generate the proof and that's why everything has to work in a browser on a phone. Client side proving is definitely the default in ZooPass. Not every app has to do it. These are libraries. You can call them wherever you want. There's much more difference between verifiers, whether they're doing server side verification or client side verification. That depends what your use case is and what you're protecting against Are the issued credentials signed and the proof that the crunch loops we scrolled away We do not use BSS signatures to verify partial properties, that's what we use the Merkel tree for again more details on that coming up Is it possible to make information in ZooPass portable? I think that pods do make that possible, yes, as long as it's a pod and there are APIs for getting data out of ZooPass if you want to. That's called the ZAPI, at which point you can take this to whatever platform you want. We have implementations of pods in Python, C, and Rust for various projects, so it's not too hard to do. How do apps know whether a proof from a verifier is legit? Well, the framework tells you that it is a valid proof. And it will confirm for you that this configuration and these revealed claims and this proof match up and are valid. So the prover couldn't have cheated about that. What they could cheat about is app level semantics. So if you ask for a proof of a driver's license and I sent you a proof of a frog instead, that's something that the framework can't tell you because it just says that's a valid proof. So you do have to check, is that the configure I asked for? Is the signer of this driver's license the government, etc.? But yeah, that's the kind of level of verification we got. Okay. I think that's it. Can we go back to the slides briefly? Okay. Those of you who are collecting frogs, I've got something for you if we can switch back to my slides. Oh, yeah. We'll leave that up for a minute or two. I think we've got like three minutes before the next session starts anyway. So feel free to frog away. Okay. And as I said, we're going to go straight into a deep dive session, which is going to be 90 minutes. We probably won't use the whole thing, but that's what we're scheduled for. So stick around if you want more details to answer any of those questions.", - "eventId": "devcon-7", - "slot_start": 1731578400000, - "slot_end": 1731580200000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1D13mwNG569Eo7vRzSRs1BRHF7sCXAys5mnZEJpklwtg", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "jens-groth" - ] + "mark-pascall" + ], + "eventId": "devcon-7", + "slot_start": 1731481200000, + "slot_end": 1731481800000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1RsF9WALoUv0Wv3Pc036sfCbuKskiOHZzZRM1r385Iew", + "resources_slides": "https://drive.google.com/file/d/1hx9ym73B6CqpDzuifBxUrRLBa90J68EW/view" }, "vector": [ 0, @@ -782095,11 +779926,12 @@ 0, 0, 0, + 6, + 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -782852,6 +780684,27 @@ 0, 0, 0, + 6, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, 0, 0, 0, @@ -782911,6 +780764,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -782923,6 +780777,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -782939,13 +780794,16 @@ 0, 0, 0, + 2, 0, 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -782980,7 +780838,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -783020,6 +780877,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -783064,7 +780922,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -783073,7 +780930,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -783233,6 +781089,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -783326,37 +781183,8 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, + 2, + 2, 0, 0, 0, @@ -783408,11 +781236,9 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, + 2, 0, 0, 0, @@ -783425,43 +781251,37 @@ }, { "session": { - "id": "the-verkle-advantage", - "sourceId": "YLBEZN", - "title": "The verkle advantage", - "description": "This talk provides a comprehensive overview of the achievements by the stateless development effort, over the past year. It will explore some of the discoveries we made while implementing verkle trees, that improve the user and developer experience of Ethereum.", + "id": "things-you-didnt-know-about-contract-deployment", + "sourceId": "GJM9UC", + "title": "Things you didn't know about contract deployment", + "description": "In this session we will explore some of the lesser-known facts around contract deployment. To make the presentation accessible to all technical levels, the talk will start by recapping the three ways to start contract deployment (deployment tx, CREATE, CREATE2). Following this, we will delve deeper into the topic and highlight some interesting facts around contract deployment, including what happens when an address already has code, ETH, or state entries at deployment.", "track": "Core Protocol", - "type": "Talk", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Core Protocol", - "Protocol Design", - "Verkle trees", - "stateless", - "Core Protocol", - "Protocol Design", - "Verkle trees" + "deployment" ], "keywords": [ - "stateless" + "Deployment" ], - "duration": 1543, + "duration": 455, "language": "en", - "sources_swarmHash": "5a0b9f1615e20eb0e9597edb51957d0bf0f2f906610c445999fac2dd23a18440", - "sources_youtubeId": "f0e3ulrO9Ik", + "sources_swarmHash": "e5fd22d186e8fffb80536b2b8384bfe34be27dc9ada6f8ec30c26118b31bbf63", + "sources_youtubeId": "BGT-VwLIbs0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673471199dbb7a90e107f5eb", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731488400000, - "slot_end": 1731490200000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1zs9ePGkdyS7IfCoOeK_dArKiELQYjDXk5L-A70d7Gf4", - "resources_slides": null, + "slot_start": 1731470400000, + "slot_end": 1731471000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1j7qMdITP1J2AjDNnsbYHtP1ZqxF408IJ_kLSInVI0qU", + "resources_slides": "https://drive.google.com/file/d/19sJrvm-9JeKa0t_QTvSd7HUsfXsG0jBL/view", "speakers": [ - "guillaume-ballet" + "theresa-wakonig" ] }, "vector": [ @@ -784128,9 +781948,8 @@ 0, 0, 0, - 6, - 0, 0, + 6, 0, 0, 0, @@ -784235,7 +782054,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -784262,7 +782080,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -784409,7 +782226,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -784568,6 +782384,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -784731,8 +782548,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -784783,9 +782598,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -784801,39 +782616,46 @@ }, { "session": { - "id": "the-wallet-and-ux-stack-to-build-web3-applications-for-the-masses", - "sourceId": "LCNEGW", - "title": "The Wallet and UX Stack to Build Web3 Applications for the Masses", - "description": "In this talk I will give an overview of how wallet infrastructure and the relationship between wallets and dapps have evolved over the past 5 years. And give a layer-by-layer breakdown of the modern wallet stack from signers to smart account modules, how each component contributes to a UX unlock on Ethereum/L2s, and how application developers can use them today. We will also touch on pertinent ongoing EIPs such as 7702 (deploy code for EOAs), and 7715 (permissions).", - "track": "Usability", + "id": "this-cursed-machine-onchain-game-post-mortem", + "sourceId": "UBFQ9V", + "title": "THIS CURSED MACHINE: Onchain Game Post-Mortem", + "description": "“Live in the pod, fulfil orders, get bugs.”\r\n\r\nTHIS CURSED MACHINE is a fully onchain sci-fi body horror fulfilment center simulator by Moving Castles, a game studio for the tactical research and development of autonomous worlds.\r\n\r\nWe will speak about learnings of launching an autonomous world onchain (Redstone) and how we embraced the emergent chaos by making the bot attacks, exploits and player corporations part of the narrative of the world itself.", + "track": "Real World Ethereum", "type": "Talk", - "expertise": "Intermediate", + "expertise": "Beginner", "audience": "Product", "featured": false, "doNotRecord": false, - "keywords": [ - "Wallets", - "Signers", - "Permissions" - ], "tags": [ - "Developer Infrastructure", - "User Experience", - "Account Abstraction", - "permissions", - "Account Abstraction", - "Developer Infrastructure", - "User Experience" + "Best Practices", + "Gaming", + "Autonomous World", + "worldbuilding", + "Autonomous World", + "Best Practices", + "Gaming" ], - "language": "en", - "speakers": [ - "nichanan-kesonpat" + "keywords": [ + "Worldbuilding" ], + "duration": 1202, + "language": "en", + "sources_swarmHash": "b4c4551ccb33d3795de7e41d4cb7cd97e5953ba31de85f1be90de8c82c4ba2cf", + "sources_youtubeId": "vNIQeYXxmVw", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673470199dbb7a90e1ed9779", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673470199dbb7a90e1ed9779.vtt", + "transcript_text": " Hello, hello everyone. Nice to be here. I'm Arb, also known as Arthur Rowing Bear. I will be talking about this Christmas machine. I work at a company called Moving Castles. We're a game studio for the research and development of autonomous worlds. I have a quick definition of autonomous worlds. If single player worlds emerge out of personal computing and multiplayer worlds emerge out of client server computing, then the definition of our organization is that autonomous worlds are the new games and worlds that are made possible by P2P computing. I also want to say that I'm fighting a fever, and if I'm not making sense, you can blame the bugs fighting against my immune system right now. Okay, so we started building this autonomous world called this cursed machine a while ago. It looked like this in the beginning. And I will show you the first minutes of gameplay now. Can you make it darker? Yeah, I skipped over this part because it's boring. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Okay, I'm not going to show all of this. You're getting a kind of feel of what's going on. You're a stump trapped in a pod, and you need to produce different materials. It's a fully on-chain sci-fi body horror fulfillment center simulator, which is less important than you live in the pod, you fulfill your orders, and you get bugs. At some point you unlock a multiplayer market, you compete with other stumps to solve orders, and you can build new things on top extend the world. So, why? Why did we do all of this? But first I think there's a message from our sponsors. In the heart of industry, our machine's so grand. Transforms complex matter across the land Caffeine, MSG and plasma powder in hand Our cursed machine makes everything planned From waste to worth in our expert command Okay. Dear TCM, how did you do this and why? So the first thing we did was... The game is quite simple. It's a token in, push it through a circuit, token out game. We created a vast material map. And so if you take bugs, for example, token out game. We created a vast material map. And so if you take bugs, for example, you input bugs, you input them into the rat cage, you put the rats from the rat cage to the burner, you get dead rats. And there's a lot of them. Okay, composability, tokens. Second thing is narrative composability, which is even more important I would say. So what's great about building within a cursed corporate environment is that a Excel sheet is lore consistent. This has advantages over other worlds. It makes it really easy for people to contribute to it and it kind of blurs the barrier between user-generated content and what we do. It also makes our job really easy. The next is LARPing. So everything we did was in character, which means that... Oh, yeah, diaper is in order, just so you're not confused. We did everything in character, so the Telegram chats, the Twitter, everything was done in character as we are also employees at the same company. We're just the middle managers, and we are blaming other, you know, the corporate overlords, et cetera. The third thing that was important was to embrace, fourth thing, to embrace failure. Every issue is a narrative opportunity. Very simple one is we're building an on-chain game, which means we're dealing with this vast broken machine. So the game is about being trapped in a vast dysfunctional broken machine. But also this applies to more things, as you will see. being trapped in a vast dysfunctional broken machine. But also this applies to more things as you will see. So what happened? A lot of things happened. We launched and players started building different things. This is a kind of overview of the different organizations, the different stump initiatives that emerged. Someone found out that the orders for stumps to create different materials, the smart contracts were open, so they created a site called Stumpfmakler where you can buy materials from the stumps. Different companies started emerging that sold and cornered parts of the market. We had a first issue. Evil hackers started stealing the bugs we had wanted to give to the good stumps who were joining the game. Very bad evil hackers. So we gave a grant to two new insurance companies, which were player-led, which gave predatory loans to new stumps so they could now play the game. Of course, this would bite them in the ass later. There's a second company. We also, at some point, the medical records of the stumps got leaked. Oh yeah, Frolic built an answering machine. I don't know if the sound works. Thank you for calling the SEUMP unemployment hotline. Your call may be recorded for quality and training purposes. Please hold while we connect you with books. Order fulfillment center. So this was built by a player. And it reads the smart contracts and reads you the current orders. You can actually call it now. There are six jobs available. Job order number eight, 6461. We need 79. Okay, then we not going to do all of this. Okay, then we had another hack. Someone stole all the bugs. So we went out with an official statement. We started telling the stumps that they need to get ready for the war against the hackers. And we didn't. We beat the hackers. Actually, it was me DMing the hacker and asking him to return all the funds which he did also we had a corporate partnership with TSMC and this is a Can you turn up the music? I got a scat on that back on them corns She cut it for me, she want me in that corns She ice and mail it with no stop at corns Every time I suck it, it was all learned I ain't take sides, I ain't rock with no terms You can get hit with that fire, it's a burn Slow out the pulse, but I feel like a worm Bitch, I be deep, you can cut me in thirds I got a ease in me and I opened that part Yeah, bitch, I ran over the curb Bitch, I'ma pull out your toes and I turn them You knew what I said, bitch, you know that you heard me 24-7, we get just seven and eleven, double, till my face ain't been blurred My money's hard, my money fast My money thick, my money sturdy My tooth is gon' pull if I'm long-range, hit up thirty times, yeah, you callin' them crud Okay. So that worked, kind of. We are really happy with the outcome. We saw a lot of things emerge and we think that happened because of the different methods we used and I think that there is a lot of unexplored potential for leaning into issues, especially in the game space. As people come from the protocol space, they're kind of clinging to this idea of security and I think it doesn't matter. If you're actually creating a thing which is supposed to be fun, the most fun about crypto is all the hacks, all the drama, all the on-chain shenanigans and why not embrace that and make it into the narrative of the game itself? And I think that that is really how you get a decentralized narrative where the actions of the players and the different participants shape what the world is. We're working on a next game. This is an artist rendering. I'm not going to tell you more than that. If you want to follow what we're up to, you can scan this QR code. I'm going to give you a little bit of time. Since not many people are doing it, it must be because you don't have a lot of time. So I'm going to give you more time. Okay. Since not many people are doing it, it must be because you don't have a lot of time. So I'm going to give you more time. Okay. Are you all done? Great. And then the last thing is we slash Rasmus, one person from Moving Castles. Oh, yeah, I want to credit GVN and Rasmus, my two co-founders, and then also Agnes Cameron and Manus Niehoff, who worked on TCM. And this one was made by Bunker and Rasmus. And you can... It's shulgen.engineering, and you can upload your frogs and synthesize new materials. It's available now. And it integrates with ZooPass. Okay. The URL is shulgin.engineering. Wait. I don't know how to go back. Wait, wait. Okay, here. You have to read it quickly at the top. Here, shulgin. Okay, wait. Okay, one more time. You see it? Okay, here. You have to read it quickly at the top. Here, Shulgin. Okay, wait. Okay, one more time. You see it? Okay, I'll do... Okay, I have a lot of time left, so I'm kind of... But I think that maybe it's time for questions. Did we get questions? There are questions? Good. Because we have a lot of time left. How do I do this? Alright. A big applause for Arb. Who, despite being sick, came here and presented all of this. Don't be shy. Please bring more questions. Because the first one is not really a question. It's just like, thank you. And then the second one would be what's your favorite material from the map? I think the first question is really important. And I like it a lot, I must say. Thank you whoever asked that. I agree. I am persevering. Persevering. My favorite material from the map? I don't know. There's a lot of good ones. I will think about it a little bit. Does anyone in the audience have any favorite materials? Adrenochrome is pretty good. Neuralink is good, yeah. Yeah, the mold stuff is also pretty good. There's a whole, like, I like the whole oil, what's it called, material map unlock. That was great. Petrochemicals. Electronics was great. Petrochemicals. Electronics was good. Why Redstone? Yes, of course. I didn't mention that. We worked together with the beautiful Lattice company, and this was built on mud. And we used their chain called Redstone as well, which worked really well. I mean, we like working in mud, and Redstone worked well for, you know, it came with the thing and it was good. I'm not super opinionated about chains. I think it worked really well and we like working with the Lattice team. Who's the best player? I think that this is an interesting question. We had a leaderboard, but I think the best players were the ones that started creating their own stuff. I think Frolic built in the API, which was cool. That was a great player achievement. And I think that the players who extended the game and extended Logix were the best players. Are there other projects? Okay, here's a lot of stuff here. How did you build it? Mud. So the ones on the top are the ones with the most upvotes. So what percentage in character are you still in right now? That's a very good question. I don't know. I think the fever is pushing me more into character. Here, obviously, blockchain constraint options for game loops as well as developer resources independent of these two. Is there a central mechanic game loop you find interesting from a narrative perspective? I think that it's really hard to build game loops or games on chain. I think you can do a lot of classical games when you start doing obfuscation or ZK stuff. Otherwise, it really limits the game space. I think that TCM worked for the time we ran it, but it also was basically a capture with a faucet at the end. And then we did a bunch of wheeling and dealing around that to extend the game life of that. I'm currently, I find like games that have optimal strategies but that change based on player behavior really interesting. And I also like social oracle games like the beautiful Dress to Impress game on Roblox, which is a game where you dress up and you perform a fashion show and then people rate you for it. That's a great game. The next would be, are there any other projects that are along the same lines of a merchant blockchain narrative? I don't know. Are there? Does anyone know? Do you know any, Jibian? Are there any more? All of them trying? Yeah, we're all trying. I think that maybe games are too afraid to lean into the failures. I think that maybe is a unique thing about our game. And I think if more people were able to do that, we could have more emergent narratives. I think that's basically the somewhat chaotic kind of core message of my talk is that, you know, the people feel like they have agency over worlds that they can break them. If you don't allow that, you're just creating a little sandbox that people can play in, but you don't allow them to change or break the sandbox. Really, the most interesting narratives are the ones that subvert the expectations of the game makers, and then become part of the narrative. All right. Then the one that actually interests me a lot is like, how did you build it, the whole thing? Yeah, it was built using MUD by a great team, us at Moving Castles, and together with the support of the MUD team. And I think it looks good because we leaned into the kind of limitations of the medium. Yeah. Thank you so much. So what was the funniest hack then that happened? Yeah, I think the faucet hack, the, I mean, yeah, there was all kinds kinds it was like a lot of people like trying to break the game and then me kind of begging in dms for them to return the funds that happened a bunch of times the good thing is we we just were the central bank for the token anyway so when someone like hacked or tried to speculate on something we just increased inflation so it was worthless so like we did a bunch of stuff like this and uh that was fun uh we basically the the the trick is just to just demotivate so much that they give back them the tokens", "eventId": "devcon-7", - "slot_start": 1731470400000, - "slot_end": 1731472200000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1EwxJbkAW9PZZpjRozkPVAnLaQpoQZm7uf1kolnUFM_0" + "slot_start": 1731486600000, + "slot_end": 1731488400000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1cXPZD6cWdMNr2QSeVuUQ8-WSQ_YhrCRA6-l3ClLl2n0", + "resources_slides": "https://drive.google.com/file/d/1JqJ4gKILsbR7oAM4Z6wVL0u5iDi-uDgZ/view", + "speakers": [ + "arb" + ] }, "vector": [ 0, @@ -784842,8 +782664,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -785500,10 +783320,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -785603,7 +783423,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -785618,6 +783437,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -785638,10 +783458,8 @@ 0, 0, 0, - 2, 0, 0, - 2, 0, 0, 0, @@ -785698,6 +783516,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -786149,15 +783969,12 @@ 0, 0, 0, - 0, 2, 0, 0, 0, 0, 0, - 0, - 0, 2, 0, 0, @@ -786172,54 +783989,30 @@ }, { "session": { - "id": "the-wellbeing-protocol-scaling-localism", - "sourceId": "HC3QGN", - "title": "The Wellbeing Protocol - Scaling Localism", - "description": "Imagine a world where:\r\n - hyper-local marginalised communities could create impact DAOs as easily as creating FB groups\r\n - we could create a UI that abstracted the complexity of quadratic / conviction / delegated voting to create a continuous resource allocation alternative to governance\r\n - funders could stream money into millions of these treasuries\r\n\r\nFind out how this New Zealand government funded project, now running trials in three countries, is creating a network of grassroots changemakers.", + "id": "this-year-in-ethereum", + "sourceId": "MFBX7X", + "title": "This year in Ethereum", + "description": "Don’t miss the Devcon Opening Ceremony, where we’ll set the stage for an incredible event ahead, with talks from Vitalik Buterin (Founder of Ethereum), Aya Miyaguchi (Executive Director of the Ethereum Foundation), Josh Stark (Ethereum Foundation Leadership), Skylar Weaver (Devcon Team Lead), and more surprise guests.", "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Community", + "type": "Talk", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "conviction", - "zealand" - ], - "tags": [ - "DAO", - "Governance", - "Quadratic Voting", - "Collective Intelligence", - "Conviction", - "Ethereum for Good", - "Public good", - "Climate", - "ReFi", - "Regenerative Applications", - "User Experience", - "zealand", - "Climate", - "Collective Intelligence", - "Conviction", - "DAO", - "Ethereum for Good", - "Governance", - "Public good", - "Quadratic Voting", - "ReFi", - "Regenerative Applications", - "User Experience" - ], + "keywords": [], + "tags": [], "language": "en", "speakers": [ - "mark-pascall" + "josh-stark" ], "eventId": "devcon-7", - "slot_start": 1731481200000, - "slot_end": 1731481800000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1RsF9WALoUv0Wv3Pc036sfCbuKskiOHZzZRM1r385Iew" + "slot_start": 1731381300000, + "slot_end": 1731382800000, + "slot_roomId": "main-stage", + "sources_youtubeId": "YyK8i2-0aPk", + "sources_swarmHash": "42b2f958a6ad4ec1fc91b8dd669da09457cace9ae38b40d9772bcc6a5851ab4a", + "resources_presentation": "https://docs.google.com/presentation/d/1jnpwsT-B0lnVYIbUt5XuDZoqqTEjj666EzfAz3-aSZY", + "resources_slides": "" }, "vector": [ 0, @@ -786887,6 +784680,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -786989,7 +784783,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -787007,7 +784800,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -787069,12 +784861,10 @@ 0, 0, 0, - 2, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -787082,7 +784872,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -787099,16 +784888,13 @@ 0, 0, 0, - 2, 0, 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, @@ -787182,7 +784968,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -787395,7 +785180,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -787490,45 +785274,6 @@ 0, 0, 0, - 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -787538,6 +785283,51 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, 2, 0, 0, @@ -787545,8 +785335,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -787558,42 +785346,53 @@ }, { "session": { - "id": "things-you-didnt-know-about-contract-deployment", - "sourceId": "GJM9UC", - "title": "Things you didn't know about contract deployment", - "description": "In this session we will explore some of the lesser-known facts around contract deployment. To make the presentation accessible to all technical levels, the talk will start by recapping the three ways to start contract deployment (deployment tx, CREATE, CREATE2). Following this, we will delve deeper into the topic and highlight some interesting facts around contract deployment, including what happens when an address already has code, ETH, or state entries at deployment.", - "track": "Core Protocol", + "id": "time-is-all-you-need-optimizing-dutch-auctions-on-arbitrum", + "sourceId": "QNSX9R", + "title": "Time is all you need: optimizing Dutch auctions on Arbitrum", + "description": "Dutch auctions are a common approach in MEV-mitigating mechanism designs. However, little work has been done in exploring optimal auction execution times. Using simulations, we demonstrate how optimizing for a key metric — wait time — can achieve optimal execution without the complexity of existing systems.", + "track": "Cryptoeconomics", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Research", "featured": false, "doNotRecord": false, - "tags": [ - "deployment" - ], "keywords": [ - "Deployment" + "Dutch", + "auctions" + ], + "tags": [ + "Decentralization Improvements", + "Layer 2s", + "Mechanism design", + "MEV", + "auction", + "dutch", + "Decentralization Improvements", + "Layer 2s", + "Mechanism design", + "MEV" ], - "duration": 455, "language": "en", - "sources_swarmHash": "e5fd22d186e8fffb80536b2b8384bfe34be27dc9ada6f8ec30c26118b31bbf63", - "sources_youtubeId": "BGT-VwLIbs0", + "sources_swarmHash": "7b4f3808fd7baa654f358d3a9d534587739f8c60e00696cfd4ee495f27ccbbf3", + "sources_youtubeId": "eq2AbGusaJY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "brad-bachu", + "cody-born", + "alan-wu" + ], "eventId": "devcon-7", - "slot_start": 1731470400000, - "slot_end": 1731471000000, + "slot_start": 1731489000000, + "slot_end": 1731489600000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1j7qMdITP1J2AjDNnsbYHtP1ZqxF408IJ_kLSInVI0qU", - "resources_slides": null, - "speakers": [ - "theresa-wakonig" - ] + "resources_presentation": "https://docs.google.com/presentation/d/1DhrF39oif7Piw0FK877aPOnLTq12Z7iwOXeKa33SnVU", + "resources_slides": "https://drive.google.com/file/d/1tANSlnRSSqUAGImjL6DqB5mPekqZvCvI/view" }, "vector": [ - 0, - 0, 0, 0, 6, @@ -788258,20 +786057,12 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, + 6, + 6, + 6, 0, 0, 0, @@ -788352,9 +786143,12 @@ 0, 0, 0, + 6, 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -788413,6 +786207,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -788675,6 +786470,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -788695,7 +786491,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -788864,6 +786659,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -788911,7 +786707,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -788923,51 +786718,53 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "this-cursed-machine-onchain-game-post-mortem", - "sourceId": "UBFQ9V", - "title": "THIS CURSED MACHINE: Onchain Game Post-Mortem", - "description": "“Live in the pod, fulfil orders, get bugs.”\r\n\r\nTHIS CURSED MACHINE is a fully onchain sci-fi body horror fulfilment center simulator by Moving Castles, a game studio for the tactical research and development of autonomous worlds.\r\n\r\nWe will speak about learnings of launching an autonomous world onchain (Redstone) and how we embraced the emergent chaos by making the bot attacks, exploits and player corporations part of the narrative of the world itself.", - "track": "Real World Ethereum", - "type": "Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "tlsnotary-applying-mpc-and-interactive-zk-to-prove-web2-data", + "sourceId": "RTVKJC", + "title": "TLSNotary: Applying MPC and interactive ZK to prove web2 data", + "description": "Diving into TLSNotary, a protocol which leverages multi-party computation and interactive ZK to prove the authenticity and provenance of any data on the web to another party.\r\n\r\nSummary:\r\n1. What it is and what it can do\r\n2. High-level overview of how it works\r\n3. Details on the underlying MPC and ZK protocols that we use\r\n4. How to use it", + "track": "Applied Cryptography", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Best Practices", - "Gaming", - "Autonomous World", - "worldbuilding", - "Autonomous World", - "Best Practices", - "Gaming" - ], "keywords": [ - "Worldbuilding" + "User Sovereignty", + "Infrastructure", + "Oracle" + ], + "tags": [ + "Identity", + "ZKP", + "MPC", + "oracle", + "Identity", + "MPC", + "ZKP" ], - "duration": 1202, "language": "en", "sources_swarmHash": "", "sources_youtubeId": "", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673470199dbb7a90e1ed9779", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673470199dbb7a90e1ed9779.vtt", - "transcript_text": " Hello, hello everyone. Nice to be here. I'm Arb, also known as Arthur Rowing Bear. I will be talking about this Christmas machine. I work at a company called Moving Castles. We're a game studio for the research and development of autonomous worlds. I have a quick definition of autonomous worlds. If single player worlds emerge out of personal computing and multiplayer worlds emerge out of client server computing, then the definition of our organization is that autonomous worlds are the new games and worlds that are made possible by P2P computing. I also want to say that I'm fighting a fever, and if I'm not making sense, you can blame the bugs fighting against my immune system right now. Okay, so we started building this autonomous world called this cursed machine a while ago. It looked like this in the beginning. And I will show you the first minutes of gameplay now. Can you make it darker? Yeah, I skipped over this part because it's boring. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Bugs. Okay, I'm not going to show all of this. You're getting a kind of feel of what's going on. You're a stump trapped in a pod, and you need to produce different materials. It's a fully on-chain sci-fi body horror fulfillment center simulator, which is less important than you live in the pod, you fulfill your orders, and you get bugs. At some point you unlock a multiplayer market, you compete with other stumps to solve orders, and you can build new things on top extend the world. So, why? Why did we do all of this? But first I think there's a message from our sponsors. In the heart of industry, our machine's so grand. Transforms complex matter across the land Caffeine, MSG and plasma powder in hand Our cursed machine makes everything planned From waste to worth in our expert command Okay. Dear TCM, how did you do this and why? So the first thing we did was... The game is quite simple. It's a token in, push it through a circuit, token out game. We created a vast material map. And so if you take bugs, for example, token out game. We created a vast material map. And so if you take bugs, for example, you input bugs, you input them into the rat cage, you put the rats from the rat cage to the burner, you get dead rats. And there's a lot of them. Okay, composability, tokens. Second thing is narrative composability, which is even more important I would say. So what's great about building within a cursed corporate environment is that a Excel sheet is lore consistent. This has advantages over other worlds. It makes it really easy for people to contribute to it and it kind of blurs the barrier between user-generated content and what we do. It also makes our job really easy. The next is LARPing. So everything we did was in character, which means that... Oh, yeah, diaper is in order, just so you're not confused. We did everything in character, so the Telegram chats, the Twitter, everything was done in character as we are also employees at the same company. We're just the middle managers, and we are blaming other, you know, the corporate overlords, et cetera. The third thing that was important was to embrace, fourth thing, to embrace failure. Every issue is a narrative opportunity. Very simple one is we're building an on-chain game, which means we're dealing with this vast broken machine. So the game is about being trapped in a vast dysfunctional broken machine. But also this applies to more things, as you will see. being trapped in a vast dysfunctional broken machine. But also this applies to more things as you will see. So what happened? A lot of things happened. We launched and players started building different things. This is a kind of overview of the different organizations, the different stump initiatives that emerged. Someone found out that the orders for stumps to create different materials, the smart contracts were open, so they created a site called Stumpfmakler where you can buy materials from the stumps. Different companies started emerging that sold and cornered parts of the market. We had a first issue. Evil hackers started stealing the bugs we had wanted to give to the good stumps who were joining the game. Very bad evil hackers. So we gave a grant to two new insurance companies, which were player-led, which gave predatory loans to new stumps so they could now play the game. Of course, this would bite them in the ass later. There's a second company. We also, at some point, the medical records of the stumps got leaked. Oh yeah, Frolic built an answering machine. I don't know if the sound works. Thank you for calling the SEUMP unemployment hotline. Your call may be recorded for quality and training purposes. Please hold while we connect you with books. Order fulfillment center. So this was built by a player. And it reads the smart contracts and reads you the current orders. You can actually call it now. There are six jobs available. Job order number eight, 6461. We need 79. Okay, then we not going to do all of this. Okay, then we had another hack. Someone stole all the bugs. So we went out with an official statement. We started telling the stumps that they need to get ready for the war against the hackers. And we didn't. We beat the hackers. Actually, it was me DMing the hacker and asking him to return all the funds which he did also we had a corporate partnership with TSMC and this is a Can you turn up the music? I got a scat on that back on them corns She cut it for me, she want me in that corns She ice and mail it with no stop at corns Every time I suck it, it was all learned I ain't take sides, I ain't rock with no terms You can get hit with that fire, it's a burn Slow out the pulse, but I feel like a worm Bitch, I be deep, you can cut me in thirds I got a ease in me and I opened that part Yeah, bitch, I ran over the curb Bitch, I'ma pull out your toes and I turn them You knew what I said, bitch, you know that you heard me 24-7, we get just seven and eleven, double, till my face ain't been blurred My money's hard, my money fast My money thick, my money sturdy My tooth is gon' pull if I'm long-range, hit up thirty times, yeah, you callin' them crud Okay. So that worked, kind of. We are really happy with the outcome. We saw a lot of things emerge and we think that happened because of the different methods we used and I think that there is a lot of unexplored potential for leaning into issues, especially in the game space. As people come from the protocol space, they're kind of clinging to this idea of security and I think it doesn't matter. If you're actually creating a thing which is supposed to be fun, the most fun about crypto is all the hacks, all the drama, all the on-chain shenanigans and why not embrace that and make it into the narrative of the game itself? And I think that that is really how you get a decentralized narrative where the actions of the players and the different participants shape what the world is. We're working on a next game. This is an artist rendering. I'm not going to tell you more than that. If you want to follow what we're up to, you can scan this QR code. I'm going to give you a little bit of time. Since not many people are doing it, it must be because you don't have a lot of time. So I'm going to give you more time. Okay. Since not many people are doing it, it must be because you don't have a lot of time. So I'm going to give you more time. Okay. Are you all done? Great. And then the last thing is we slash Rasmus, one person from Moving Castles. Oh, yeah, I want to credit GVN and Rasmus, my two co-founders, and then also Agnes Cameron and Manus Niehoff, who worked on TCM. And this one was made by Bunker and Rasmus. And you can... It's shulgen.engineering, and you can upload your frogs and synthesize new materials. It's available now. And it integrates with ZooPass. Okay. The URL is shulgin.engineering. Wait. I don't know how to go back. Wait, wait. Okay, here. You have to read it quickly at the top. Here, shulgin. Okay, wait. Okay, one more time. You see it? Okay, here. You have to read it quickly at the top. Here, Shulgin. Okay, wait. Okay, one more time. You see it? Okay, I'll do... Okay, I have a lot of time left, so I'm kind of... But I think that maybe it's time for questions. Did we get questions? There are questions? Good. Because we have a lot of time left. How do I do this? Alright. A big applause for Arb. Who, despite being sick, came here and presented all of this. Don't be shy. Please bring more questions. Because the first one is not really a question. It's just like, thank you. And then the second one would be what's your favorite material from the map? I think the first question is really important. And I like it a lot, I must say. Thank you whoever asked that. I agree. I am persevering. Persevering. My favorite material from the map? I don't know. There's a lot of good ones. I will think about it a little bit. Does anyone in the audience have any favorite materials? Adrenochrome is pretty good. Neuralink is good, yeah. Yeah, the mold stuff is also pretty good. There's a whole, like, I like the whole oil, what's it called, material map unlock. That was great. Petrochemicals. Electronics was great. Petrochemicals. Electronics was good. Why Redstone? Yes, of course. I didn't mention that. We worked together with the beautiful Lattice company, and this was built on mud. And we used their chain called Redstone as well, which worked really well. I mean, we like working in mud, and Redstone worked well for, you know, it came with the thing and it was good. I'm not super opinionated about chains. I think it worked really well and we like working with the Lattice team. Who's the best player? I think that this is an interesting question. We had a leaderboard, but I think the best players were the ones that started creating their own stuff. I think Frolic built in the API, which was cool. That was a great player achievement. And I think that the players who extended the game and extended Logix were the best players. Are there other projects? Okay, here's a lot of stuff here. How did you build it? Mud. So the ones on the top are the ones with the most upvotes. So what percentage in character are you still in right now? That's a very good question. I don't know. I think the fever is pushing me more into character. Here, obviously, blockchain constraint options for game loops as well as developer resources independent of these two. Is there a central mechanic game loop you find interesting from a narrative perspective? I think that it's really hard to build game loops or games on chain. I think you can do a lot of classical games when you start doing obfuscation or ZK stuff. Otherwise, it really limits the game space. I think that TCM worked for the time we ran it, but it also was basically a capture with a faucet at the end. And then we did a bunch of wheeling and dealing around that to extend the game life of that. I'm currently, I find like games that have optimal strategies but that change based on player behavior really interesting. And I also like social oracle games like the beautiful Dress to Impress game on Roblox, which is a game where you dress up and you perform a fashion show and then people rate you for it. That's a great game. The next would be, are there any other projects that are along the same lines of a merchant blockchain narrative? I don't know. Are there? Does anyone know? Do you know any, Jibian? Are there any more? All of them trying? Yeah, we're all trying. I think that maybe games are too afraid to lean into the failures. I think that maybe is a unique thing about our game. And I think if more people were able to do that, we could have more emergent narratives. I think that's basically the somewhat chaotic kind of core message of my talk is that, you know, the people feel like they have agency over worlds that they can break them. If you don't allow that, you're just creating a little sandbox that people can play in, but you don't allow them to change or break the sandbox. Really, the most interesting narratives are the ones that subvert the expectations of the game makers, and then become part of the narrative. All right. Then the one that actually interests me a lot is like, how did you build it, the whole thing? Yeah, it was built using MUD by a great team, us at Moving Castles, and together with the support of the MUD team. And I think it looks good because we leaned into the kind of limitations of the medium. Yeah. Thank you so much. So what was the funniest hack then that happened? Yeah, I think the faucet hack, the, I mean, yeah, there was all kinds kinds it was like a lot of people like trying to break the game and then me kind of begging in dms for them to return the funds that happened a bunch of times the good thing is we we just were the central bank for the token anyway so when someone like hacked or tried to speculate on something we just increased inflation so it was worthless so like we did a bunch of stuff like this and uh that was fun uh we basically the the the trick is just to just demotivate so much that they give back them the tokens", - "eventId": "devcon-7", - "slot_start": 1731486600000, - "slot_end": 1731488400000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1cXPZD6cWdMNr2QSeVuUQ8-WSQ_YhrCRA6-l3ClLl2n0", - "resources_slides": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "arb" - ] + "sinu" + ], + "eventId": "devcon-7", + "slot_start": 1731576600000, + "slot_end": 1731577200000, + "slot_roomId": "classroom-e", + "resources_presentation": "https://docs.google.com/presentation/d/1XH5xVNY-eLNdwvYduookcntMG3Z4qjU319sqNmXxUXo", + "resources_slides": "https://drive.google.com/file/d/1SOVbq_b6OBIWkwl06aLNMMKgOa6VY0NY/view" }, "vector": [ 0, @@ -788976,15 +786773,11 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -789637,7 +787430,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -789645,6 +787437,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -789752,7 +787545,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -789772,6 +787564,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -789797,6 +787590,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -789810,6 +787604,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -789831,8 +787626,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -790155,6 +787948,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -790240,7 +788034,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -790283,6 +788076,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -790292,8 +788086,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -790306,37 +788098,49 @@ }, { "session": { - "id": "this-year-in-ethereum", - "sourceId": "MFBX7X", - "title": "This year in Ethereum", - "description": "Don’t miss the Devcon Opening Ceremony, where we’ll set the stage for an incredible event ahead, with talks from Vitalik Buterin (Founder of Ethereum), Aya Miyaguchi (Executive Director of the Ethereum Foundation), Josh Stark (Ethereum Foundation Leadership), Skylar Weaver (Devcon Team Lead), and more surprise guests.", - "track": "Real World Ethereum", + "id": "today-verkle-tomorrow-zk-everything-stateless-everything-lightclient", + "sourceId": "Z8EEGW", + "title": "Today Verkle + Tomorrow ZK = Everything Stateless, Everything Lightclient", + "description": "Statelessness could be one of the biggest unlocks in the Ethereum ecosystem, allowing the protocol to scale massively without giving away control and access to big entities, all while providing some real 'teeth' to the light client ecosystem.\r\n\r\nIn this talk, we’ll see how stateless clients enable immediate scalability and decentralization benefits, and how combining statelessness with ZKing the state transitions unlocks Ethereum’s long-term vision.", + "track": "Core Protocol", "type": "Talk", - "expertise": "", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [ - "josh-stark" + "tags": [ + "Light Clients", + "Zero-Knowledge", + "statelessness", + "Light Clients", + "Zero-Knowledge" + ], + "keywords": [ + "statelessness" ], + "duration": 1464, + "language": "en", + "sources_swarmHash": "c3c8d6808c0b093ef71c7ebcba97b19a2528e60002141e344b7674df85b5c061", + "sources_youtubeId": "oRiShQ5LPqw", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673478069dbb7a90e13fb370", "eventId": "devcon-7", - "slot_start": 1731381300000, - "slot_end": 1731382800000, - "slot_roomId": "main-stage", - "sources_youtubeId": "YyK8i2-0aPk", - "sources_swarmHash": "42b2f958a6ad4ec1fc91b8dd669da09457cace9ae38b40d9772bcc6a5851ab4a", - "resources_presentation": "https://docs.google.com/presentation/d/1jnpwsT-B0lnVYIbUt5XuDZoqqTEjj666EzfAz3-aSZY" + "slot_start": 1731490200000, + "slot_end": 1731492000000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1vOoQZu3TYR_edc7RAy-eEqHYRvkAPSwPJBk3veKBxRM", + "resources_slides": "https://drive.google.com/file/d/1AepsIidX_uKP6DX2o-1d1xEoEpxr5zP2/view", + "speakers": [ + "jason-chaskin", + "gajinder-singh" + ] }, "vector": [ 0, 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -790999,24 +788803,13 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -791104,6 +788897,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -791111,6 +788905,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -791609,6 +789404,11 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -791649,6 +789449,7 @@ 2, 0, 0, + 0, 2, 0, 0, @@ -791667,48 +789468,30 @@ }, { "session": { - "id": "time-is-all-you-need-optimizing-dutch-auctions-on-arbitrum", - "sourceId": "QNSX9R", - "title": "Time is all you need: optimizing Dutch auctions on Arbitrum", - "description": "Dutch auctions are a common approach in MEV-mitigating mechanism designs. However, little work has been done in exploring optimal auction execution times. Using simulations, we demonstrate how optimizing for a key metric — wait time — can achieve optimal execution without the complexity of existing systems.", - "track": "Cryptoeconomics", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "tomo-dj-set", + "sourceId": "3FTAT3", + "title": "Tomo DJ Set", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Dutch", - "auctions" - ], - "tags": [ - "Decentralization Improvements", - "Layer 2s", - "Mechanism design", - "MEV", - "auction", - "dutch", - "Decentralization Improvements", - "Layer 2s", - "Mechanism design", - "MEV" - ], + "keywords": [], + "tags": [], "language": "en", - "speakers": [ - "brad-bachu", - "cody-born", - "alan-wu" - ], + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731489000000, - "slot_end": 1731489600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1DhrF39oif7Piw0FK877aPOnLTq12Z7iwOXeKa33SnVU" + "slot_start": 1731583800000, + "slot_end": 1731588600000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1537a7C9-ILckCdyKNCQyYB-I6Kwu_xrA6i0Sk2-j9eU", + "resources_slides": "" }, "vector": [ 0, 0, - 6, 0, 0, 0, @@ -791716,6 +789499,8 @@ 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -792375,9 +790160,6 @@ 0, 0, 0, - 6, - 6, - 6, 0, 0, 0, @@ -792459,12 +790241,9 @@ 0, 0, 0, - 6, 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -792523,7 +790302,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -792787,7 +790565,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -792977,7 +790754,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -793020,7 +790796,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -793028,6 +790803,10 @@ 2, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -793042,41 +790821,52 @@ }, { "session": { - "id": "tlsnotary-applying-mpc-and-interactive-zk-to-prove-web2-data", - "sourceId": "RTVKJC", - "title": "TLSNotary: Applying MPC and interactive ZK to prove web2 data", - "description": "Diving into TLSNotary, a protocol which leverages multi-party computation and interactive ZK to prove the authenticity and provenance of any data on the web to another party.\r\n\r\nSummary:\r\n1. What it is and what it can do\r\n2. High-level overview of how it works\r\n3. Details on the underlying MPC and ZK protocols that we use\r\n4. How to use it", - "track": "Applied Cryptography", - "type": "Lightning Talk", + "id": "top-hacks-since-devcon-vi-what-did-we-learn", + "sourceId": "FCWCBG", + "title": "Top Hacks since Devcon VI: what did we learn?", + "description": "Discover the most daring blockchain hacks of '22-'24 and how to defend against them. Join Mudit Gupta, CISO of Polygon, and Matthias Egli from ChainSecurity for an analysis of tactics and vulnerabilities, and gain valuable insights to stay ahead of the game. And stay tuned for a prominent anon surprise guest!", + "track": "Security", + "type": "Workshop", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "User Sovereignty", - "Infrastructure", - "Oracle" - ], "tags": [ - "Identity", - "ZKP", - "MPC", - "oracle", - "Identity", - "MPC", - "ZKP" + "Security", + "Hacks", + "Use Cases", + "war", + "room", + "Hacks", + "Security", + "Use Cases" ], - "language": "en", - "speakers": [ - "sinu" + "keywords": [ + "Learnings", + "War Rooms" ], + "duration": 4878, + "language": "en", + "sources_swarmHash": "ff29aee71f1c81c6d57f6b49d5c3bcca90c840e14321a24a57645153e3b4b044", + "sources_youtubeId": "MQjw2ffttzw", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673473899dbb7a90e121fd13", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731576600000, - "slot_end": 1731577200000, - "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/1XH5xVNY-eLNdwvYduookcntMG3Z4qjU319sqNmXxUXo" + "slot_start": 1731483000000, + "slot_end": 1731488400000, + "slot_roomId": "classroom-b", + "resources_presentation": "https://docs.google.com/presentation/d/1Ic4xQqu3tPIGtBkRi-td-CDrhLlNwW9GBWn1_dYegTE", + "resources_slides": "https://drive.google.com/file/d/1kzSPIKpTuasqzSKDl8GLJyGzv8YrglUN/view", + "speakers": [ + "matthias-egli", + "mudit-gupta" + ] }, "vector": [ + 6, 0, 0, 0, @@ -793087,7 +790877,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -793333,6 +791122,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -793521,6 +791311,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -793749,7 +791540,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -793824,6 +791614,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -793877,7 +791668,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -793917,14 +791707,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -794086,6 +791868,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -794262,7 +792045,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -794352,6 +792134,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -794413,42 +792197,49 @@ }, { "session": { - "id": "today-verkle-tomorrow-zk-everything-stateless-everything-lightclient", - "sourceId": "Z8EEGW", - "title": "Today Verkle + Tomorrow ZK = Everything Stateless, Everything Lightclient", - "description": "Statelessness could be one of the biggest unlocks in the Ethereum ecosystem, allowing the protocol to scale massively without giving away control and access to big entities, all while providing some real 'teeth' to the light client ecosystem.\r\n\r\nIn this talk, we’ll see how stateless clients enable immediate scalability and decentralization benefits, and how combining statelessness with ZKing the state transitions unlocks Ethereum’s long-term vision.", + "id": "top-opcode-offenders-in-the-zkevm", + "sourceId": "DJL7RP", + "title": "Top opcode offenders in the zkEVM", + "description": "One of the challenges for any L2 is to reflect accurately the cost for each opcode in zk-resources.\r\nEthereum L1 reflects the resource cost in term of GAS but lately it has been proposed chnages in opcode GAS cost to fit the zk-world to make Ethreum L1 more aligned to L2 or even with enshrined zk-rollups.\r\nIn this talk, I will explain the worst performance opcodes when comparing its GAS cost Vs zk-resources cost in Polygon zkEVM in typical transactions (erc20 trannsfers, swaps, ...)", "track": "Core Protocol", "type": "Talk", - "expertise": "Intermediate", + "expertise": "Expert", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Light Clients", - "Zero-Knowledge", - "statelessness", - "Light Clients", - "Zero-Knowledge" + "Core Protocol", + "Layer 2s", + "Zk Rollups", + "top", + "offenders", + "Core Protocol", + "Layer 2s", + "Zk Rollups" ], "keywords": [ - "statelessness" + "zk-resources", + "GAS costs", + "top offenders" ], - "duration": 1464, + "duration": 1362, "language": "en", - "sources_swarmHash": "c3c8d6808c0b093ef71c7ebcba97b19a2528e60002141e344b7674df85b5c061", - "sources_youtubeId": "oRiShQ5LPqw", + "sources_swarmHash": "1cf0c4cf5a7f3375b701fee34d27087aef6897bea41bd54d5ce1afdf40e7e878", + "sources_youtubeId": "doQJosAFOaM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673478069dbb7a90e13fb370", + "sources_streamethId": "673477c09dbb7a90e13d8506", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", "slot_start": 1731490200000, "slot_end": 1731492000000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1vOoQZu3TYR_edc7RAy-eEqHYRvkAPSwPJBk3veKBxRM", - "resources_slides": null, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1NcWox_AiyJE1F6zW2KLfOoCFpaY0DVyowm34wlSdbao", + "resources_slides": "https://drive.google.com/file/d/1yKgFK_UEKkiCR8OZviogOzogWiNHCu8d/view", "speakers": [ - "jason-chaskin", - "gajinder-singh" + "carlos-matallana", + "jesus" ] }, "vector": [ @@ -794956,7 +792747,7 @@ 0, 0, 0, - 0, + 6, 0, 0, 0, @@ -795126,8 +792917,6 @@ 0, 0, 6, - 6, - 0, 0, 0, 0, @@ -795215,8 +793004,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -795270,8 +793057,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -795725,7 +793514,7 @@ 0, 0, 2, - 0, + 2, 0, 0, 0, @@ -795767,9 +793556,6 @@ 0, 0, 2, - 0, - 0, - 0, 2, 0, 0, @@ -795788,48 +793574,39 @@ }, { "session": { - "id": "tomo-dj-set", - "sourceId": "3FTAT3", - "title": "Tomo DJ Set", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", + "id": "tracing-integration-in-lighthouse", + "sourceId": "RVZX3C", + "title": "Tracing Integration in Lighthouse", + "description": "During Ethereum Protocol Fellowship, I've worked on integrating `Tracing`(an async-friendly logging framework) into Lighthouse(CL client) .\r\nThis presentation will provide a brief overview of the work that I’ve done.", + "track": "[CLS] EPF Day", + "type": "Lightning Talk", + "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, + "tags": [ + "Core Protocol", + "Frameworks" + ], "keywords": [], - "tags": [], + "duration": 841, "language": "en", - "speakers": [], + "sources_swarmHash": "175d7bc039ec1952a66d831fd8c59ca61cd356e1f223f38794226fa65f26d38e", + "sources_youtubeId": "MuOh05pJID0", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67343e3a9dbb7a90e1dcb5b6", "eventId": "devcon-7", - "slot_start": 1731583800000, - "slot_end": 1731588600000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1537a7C9-ILckCdyKNCQyYB-I6Kwu_xrA6i0Sk2-j9eU" + "slot_start": 1731474900000, + "slot_end": 1731475800000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1RQXvuQDzdyRtC3YArjUnvZw9pKG8y3WwlKPipk1FNJE", + "resources_slides": "https://drive.google.com/file/d/1DQwrgn53uSbJQhmgt1Wohxnf2iWAkP3m/view", + "speakers": [ + "sayan" + ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -795845,6 +793622,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -796503,6 +794281,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -796595,6 +794374,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -796680,6 +794460,18 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -797124,11 +794916,12 @@ 0, 0, 0, - 2, 0, 0, 2, 0, + 2, + 0, 0, 0, 0, @@ -797145,48 +794938,43 @@ }, { "session": { - "id": "top-hacks-since-devcon-vi-what-did-we-learn", - "sourceId": "FCWCBG", - "title": "Top Hacks since Devcon VI: what did we learn?", - "description": "Discover the most daring blockchain hacks of '22-'24 and how to defend against them. Join Mudit Gupta, CISO of Polygon, and Matthias Egli from ChainSecurity for an analysis of tactics and vulnerabilities, and gain valuable insights to stay ahead of the game. And stay tuned for a prominent anon surprise guest!", + "id": "transaction-simulation-the-good-the-bad-and-the-ugly", + "sourceId": "TE9JUF", + "title": "Transaction simulation, the good, the bad & the ugly", + "description": "Transaction simulation allows users to preview the outcomes of signing a transaction, enabling them to make informed decisions rather than fully trusting the dApp. However, several caveats and risks are associated with relying on simulated transaction outcomes. State changes, differing contract behavior between simulation and on-chain execution, and randomness can all affect the outcome. In this talk, I'll share my experiences and learnings from simulating user transactions over the past 2 years", "track": "Security", - "type": "Workshop", + "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ "Security", - "Hacks", - "Use Cases", - "war", - "room", - "Hacks", + "User Experience", + "safety", "Security", - "Use Cases" + "User Experience" ], "keywords": [ - "Learnings", - "War Rooms" + "simulation", + "wallet", + "safety" ], - "duration": 4878, + "duration": 458, "language": "en", - "sources_swarmHash": "ff29aee71f1c81c6d57f6b49d5c3bcca90c840e14321a24a57645153e3b4b044", - "sources_youtubeId": "MQjw2ffttzw", + "sources_swarmHash": "1367b463e69cb498817ffc03a9949daeade7c14957d466768d66c65a2b542e0f", + "sources_youtubeId": "12uW2nhIxN4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673473899dbb7a90e121fd13", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731483000000, - "slot_end": 1731488400000, - "slot_roomId": "classroom-b", - "resources_presentation": "https://docs.google.com/presentation/d/1Ic4xQqu3tPIGtBkRi-td-CDrhLlNwW9GBWn1_dYegTE", - "resources_slides": null, + "slot_start": 1731409800000, + "slot_end": 1731410400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1Bl4qs4Zj65LUtt4i8uht8GdKLHGxRkYht0gt_Qcd_n4", + "resources_slides": "https://drive.google.com/file/d/1Al6HgtbtUJg7ek83Ogsd_SmRkc0kZfyb/view", "speakers": [ - "matthias-egli", - "mudit-gupta" + "kim-persson" ] }, "vector": [ @@ -797447,9 +795235,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -797636,7 +795421,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -797869,6 +795653,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -797957,6 +795742,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -798020,7 +795806,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -798195,7 +795980,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -798245,6 +796029,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -798463,8 +796248,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -798526,49 +796309,43 @@ }, { "session": { - "id": "top-opcode-offenders-in-the-zkevm", - "sourceId": "DJL7RP", - "title": "Top opcode offenders in the zkEVM", - "description": "One of the challenges for any L2 is to reflect accurately the cost for each opcode in zk-resources.\r\nEthereum L1 reflects the resource cost in term of GAS but lately it has been proposed chnages in opcode GAS cost to fit the zk-world to make Ethreum L1 more aligned to L2 or even with enshrined zk-rollups.\r\nIn this talk, I will explain the worst performance opcodes when comparing its GAS cost Vs zk-resources cost in Polygon zkEVM in typical transactions (erc20 trannsfers, swaps, ...)", - "track": "Core Protocol", + "id": "transforming-systems-lessons-from-taiwans-movements", + "sourceId": "B9EDKY", + "title": "Transforming Systems: Lessons from Taiwan's Movements", + "description": "I will talk about the most recent struggles of open source communities in Taiwan, g0v specifically, how da0 has been trying to help in the past year or so, the conclusions we had and what is still missing. g0v has been running bi-monthly hackathons for 10 years now, which has been the key foundation for the community. April this year they stopped due to lack of funding support, we use this as a point of reference and how a web3 oriented subgroup like da0 could have done better, and the future.", + "track": "Coordination", "type": "Talk", - "expertise": "Expert", - "audience": "Engineering", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Core Protocol", - "Layer 2s", - "Zk Rollups", - "top", - "offenders", - "Core Protocol", - "Layer 2s", - "Zk Rollups" + "Civil Resistance", + "Coordination", + "Public good" ], "keywords": [ - "zk-resources", - "GAS costs", - "top offenders" + "Ecosystem", + "Funding", + "Mainstream" ], - "duration": 1362, + "duration": 924, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "2b9dfee773cde09da38f0a590cb1a7dbf70bfd70506a4361cbd19409fbeea5ef", + "sources_youtubeId": "2cuicY646Jo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673477c09dbb7a90e13d8506", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6736ef6274749a4b891853e7", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736ef6274749a4b891853e7.vtt", + "transcript_text": " Thuy Nguyen Reviewer Reviewer 1 Can you guys hear me? OK. Hi guys, my name is Noah. So today I'm going to talk about the strategies of making changes and our reflections from Taiwan. So, 10 years ago, this community called GovZero started in Taiwan. It started from having a bunch of hackers trying to open up government data. And later in the years, it become a beacon of digital democracy globally in the world, a leader of digital democracy in the world. And in recent years, the momentum has slowed down a little bit. So two years ago, me and my partner Vivian here basically started this initiative called DAO Zero. And the mission was to supercharge Gov Zero, to supercharge the digital democracy movement. And we did a bunch of things. This includes the decentralized ID pilots in Taiwan, these include various retroactive funding experiments, these include a lot of research funding the Commerce Taipei, Dao Taipei, Plurality Taipei, a bunch of different things. But at a certain point, I started asking myself, what has really changed? Were we really able to help? At a certain point, despite the relative success of all the experiments, this year in April, GovZero basically posted this picture saying that, okay, so we're running out of funding, please donate to GovZero so it can keep running its bimonthly hackathons continuously, which the bimonthly hackathons for 10 years, it has never paused once. So this is a signal that maybe despite all the experiments that we've done, maybe we need to go deeper on how we can make systemic change in the future. So if we are to initiate something new, if we really want to change system to the better, positively, sustainably, what are the strategies we are to adopt, what are the potential tools", "eventId": "devcon-7", - "slot_start": 1731490200000, - "slot_end": 1731492000000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1NcWox_AiyJE1F6zW2KLfOoCFpaY0DVyowm34wlSdbao", - "resources_slides": null, + "slot_start": 1731638700000, + "slot_end": 1731639900000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1mKMsPFBtVYtAcJOczCaTR2Ssw6fiQ86zw-Jz3zyGmFk", + "resources_slides": "https://drive.google.com/file/d/1lxSdq1o6dZ8LQL69aRwqftKqmVQDNKNW/view", "speakers": [ - "carlos-matallana", - "jesus" + "noah-yeh" ] }, "vector": [ @@ -798576,10 +796353,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, 0, @@ -798587,6 +796360,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -799076,7 +796850,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -799247,12 +797020,12 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -799342,7 +797115,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -799389,10 +797161,8 @@ 0, 0, 0, - 2, 0, 0, - 2, 0, 0, 0, @@ -799436,6 +797206,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -799481,6 +797252,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -799554,6 +797326,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -799847,8 +797620,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -799890,13 +797661,14 @@ 0, 0, 2, - 2, 0, 0, 0, 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -799908,47 +797680,57 @@ }, { "session": { - "id": "tracing-integration-in-lighthouse", - "sourceId": "RVZX3C", - "title": "Tracing Integration in Lighthouse", - "description": "During Ethereum Protocol Fellowship, I've worked on integrating `Tracing`(an async-friendly logging framework) into Lighthouse(CL client) .\r\nThis presentation will provide a brief overview of the work that I’ve done.", - "track": "[CLS] EPF Day", + "id": "transitioning-from-an-l1-to-an-l2-a-case-study", + "sourceId": "KHVZ9M", + "title": "Transitioning from an L1 to an L2: A case study", + "description": "This talk will cover the learnings from cLabs' experience rebuilding Celo from the ground up as an L2. We hope that it can be a useful case study for other L1s to follow.", + "track": "Layer 2", "type": "Lightning Talk", - "expertise": "Beginner", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Core Protocol", - "Frameworks" + "Layer 1", + "Layer 2s", + "Rollups", + "Scalability", + "Optimistic rollups", + "Use Cases", + "learnings", + "technical", + "Layer 1", + "Layer 2s", + "Optimistic rollups", + "Rollups", + "Scalability", + "Use Cases" ], - "keywords": [], - "duration": 841, + "keywords": [ + "Layer2", + "case study", + "technical learnings" + ], + "duration": 516, "language": "en", - "sources_swarmHash": "175d7bc039ec1952a66d831fd8c59ca61cd356e1f223f38794226fa65f26d38e", - "sources_youtubeId": "MuOh05pJID0", + "sources_swarmHash": "a8c9c4eae5b8bb85116277e1923cea79ee156b70ba11481801a3d6a23aac001e", + "sources_youtubeId": "JerIZmTt-tE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67343e3a9dbb7a90e1dcb5b6", + "sources_streamethId": "6735d79e9dbb7a90e1be2c82", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735d79e9dbb7a90e1be2c82.vtt", + "transcript_text": " Hi everyone, my name is Marek Olszewski and once again we're going to be talking about cello's transition from an L1 to an L2. You've probably heard that we're making this big transition and we really hope that we can become a case study for other L1s to follow suit. And so today will be a talk really targeting those other L1s with our learnings. And I tried to make this fun for you all, so I made this in the style of Zelda. Any Zelda players in the audience? Cool, we got a few hands up. Great. Well, hopefully you'll enjoy the slides.", "eventId": "devcon-7", - "slot_start": 1731474900000, - "slot_end": 1731475800000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1RQXvuQDzdyRtC3YArjUnvZw9pKG8y3WwlKPipk1FNJE", - "resources_slides": null, + "slot_start": 1731580800000, + "slot_end": 1731581400000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/14jswR8SSkWsHdCj5ky0DG_01yQVUwV7nJtS5K18ynHg", + "resources_slides": "https://drive.google.com/file/d/1YETIuGF5J7tcIEbo7nSf5sN0oygnGXF-/view", "speakers": [ - "sayan" + "marek-olszewski" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -800492,6 +798274,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -800617,42 +798400,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -800711,7 +798458,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -800748,6 +798494,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -800774,6 +798521,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -800799,6 +798547,7 @@ 0, 2, 0, + 2, 0, 0, 0, @@ -800809,6 +798558,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -800869,6 +798619,15 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -801241,24 +799000,50 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 2, 2, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, 2, 0, 0, @@ -801277,59 +799062,58 @@ }, { "session": { - "id": "transaction-simulation-the-good-the-bad-and-the-ugly", - "sourceId": "TE9JUF", - "title": "Transaction simulation, the good, the bad & the ugly", - "description": "Transaction simulation allows users to preview the outcomes of signing a transaction, enabling them to make informed decisions rather than fully trusting the dApp. However, several caveats and risks are associated with relying on simulated transaction outcomes. State changes, differing contract behavior between simulation and on-chain execution, and randomness can all affect the outcome. In this talk, I'll share my experiences and learnings from simulating user transactions over the past 2 years", - "track": "Security", + "id": "trust-minimized-p2p-marketplaces-on-ethereum", + "sourceId": "YPNBE8", + "title": "Trust-minimized P2P marketplaces on Ethereum", + "description": "Blockchains have enabled trustless and fast transaction settlement (i.e. stablecoins, DeFi). However, these existing use cases exist in parallel and are siloed off from the real world. With the maturation of ZK, MPC and other programmable crypto techniques, we are now able to connect data on the internet to blockchains in a trust minimized way for use in smart contracts. This talk will explore the massive design space unlocked for apps (i.e. trust minimized P2P marketplaces)", + "track": "Real World Ethereum", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Security", - "User Experience", - "safety", - "Security", - "User Experience" + "ZKP", + "Signatures", + "P2P finance", + "p2p", + "marketplace", + "P2P finance", + "Signatures", + "ZKP" ], "keywords": [ - "simulation", - "wallet", - "safety" + "TLSNotary", + "ZKEmail", + "P2P marketplaces" ], - "duration": 458, + "duration": 422, "language": "en", - "sources_swarmHash": "1367b463e69cb498817ffc03a9949daeade7c14957d466768d66c65a2b542e0f", - "sources_youtubeId": "12uW2nhIxN4", + "sources_swarmHash": "0e81f864032a7b77b620d0096f1bbc41df4651e639dd5006d81e82e88e1e33bc", + "sources_youtubeId": "GvRhTfLx9w0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6735781c9dbb7a90e1b5649c", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735781c9dbb7a90e1b5649c.vtt", + "transcript_text": " Awesome. Hey, guys. I'm Richard. Today I'll be talking about trust-minimized P2P marketplaces on Ethereum. Go over the why, how, and some thoughts of looking ahead. So why are we here? Um, if we look at web 2 today, uh, data currently exists in these centralized data silos, whether it's fiat currency in your bank account, uh, your social graph, concert tickets, domains, et cetera. Um, these lead to high switching costs for users. For example, you can't bring your followers from Facebook to Twitter. And these also are very extractive to these users. For example, Ticketmaster charges, like, monopolistic prices because they own both the secondary and primary exchanges. And lastly, there's, like, closed APIs, which make it very difficult to interoperate between payment APIs, for example. Additionally, if you look at Web3 in our permissionless database, the problem is that it's really detached from the real world. If you look at your social graph, stable coins, like these DeFi instruments, and even ETH, all of it runs in a parallel system. In fact, most of DeFi today is self-referential. And so the solution here is to bring the real world to Ethereum. And the question is how? We can break through these kind of wall gardens using cryptography and also maybe TEs. So we can use ZK and MPC, these new technologies, to allow us to permissionlessly export data from these centralized data silos. Techniques such as ZK email and TLS notary, which also can help us redact any kind of private sensitive information prior to exporting. Check out some of the other talks on ZK email or TLS notary today if you want to learn more deeply. But yeah, now with these tools, we can kind of compose any kind of Web2 action with Web3 action, such as proving a fiat transfer using zk email to swap for some stable coins on chain or proving ticket transfer using TLS notary to swap for stable coins or swap domains for ETH, et cetera, or even put a social graph inside a TEE to do some DeFi stuff with. An example of this flow that is in production today is ZKP2P, the project I work on. The problem that it fixes some of the issues I mentioned earlier, such as the high fees. The construction for this is pretty simple, actually. A seller comes in, escrows some funds in a smart contract and provides a payment ID. A seller comes in, escrows some funds in a smart contract and provides a payment ID. A buyer comes in, pays the seller, and uses that proof to unlock the escrow contract. It kind of replaces Binance or any kind of centralized exchange with a smart contract, and replaces the manual process of unlocking the escrow with instant unlock upon satisfying a predicate. This construction extends beyond fiat. Simply replace the off-chain payment with a digital asset transfer and a proof of payment with a proof of asset transfer. Now you can kind of do P2P trading tickets, trading domains, trade social media handles, CSGO skins, gift cards and much more. You can even further extend this construction if you, if you trust TEs as a credit poll third party. You can kind of encumber a Web2 account, put it inside a TEE and act that, act as an escrow for the Web2 asset. And both seller and buyer send requests through the TEE. Uh, once the TEE owns both funds and the Web2 asset say tickets, uh, then they can proceed to unlock funds to both parties. Uh, yeah, now you kind of can see the Web2 account is fully programmable. You can do some weird stuff like make the TEE run an AMM pricing model for exam, for example. Yeah, in conclusion, design space is huge. To do trust-minimized marketplaces, the tech here is ready. Dev tooling will continue to only get better. And instead of only bringing Ethereum to the real world, I think it's time to bring some of the real world onto Ethereum. Uh, thank you, and that's it. Thank you Richard. Do we have any questions? Shy crowd today, come on guys. I don't even know if you have the ability to go back in slides. Sorry, is this better? Yeah, if you can't go back, that's okay. I was just gonna ask something about it, but it doesn't matter. I don't think we can. Maybe you can just going to ask something about it, but it doesn't matter. I don't think we can. Maybe you can just talk to it. Okay. Any particular use cases you're most excited about, like in terms of future of peer-to-peer marketplaces? I think there's just a lot of ideas out there. You can put almost any Internet data on-chain and use it. Trade any kind of Web2 asset or even data and anything that you think has value on an internet, you can kind of bring into on-chain to do cool stuff with. Yeah, love to see it. I mean, just like you said, Ticketmaster, I think a prime example of just like marketplaces that are absolutely using monopolistic pricing to just gouge consumers in what is becoming, yeah, a very much frictionless process to do straight Peter Peer. I guess a question, have you seen any regulatory issues? How are you thinking about navigating that? Yeah, I think there haven't been so far. I think P2P is a legally, I guess, gray area kind of thing. You are allowed to transact with your peer. For example, Facebook Marketplace is a huge P2P marketplace. And we're just basically enabling the coordination of that to be easier through on-chain means on Ethereum. Totally. All right. Thank you, Richard. Appreciate it. Give it up.", "eventId": "devcon-7", - "slot_start": 1731409800000, - "slot_end": 1731410400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Bl4qs4Zj65LUtt4i8uht8GdKLHGxRkYht0gt_Qcd_n4", - "resources_slides": null, + "slot_start": 1731556200000, + "slot_end": 1731556800000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1_yxVcYnivrcVQGtbD7FmPQLfgJn75M9f-qQDTJJuPH8", + "resources_slides": "https://drive.google.com/file/d/1jDL17wya4cBYIvGzW7VnOgyzS4FVjiaw/view", "speakers": [ - "kim-persson" + "richard" ] }, "vector": [ - 6, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -801457,6 +799241,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -801994,7 +799779,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -802068,7 +799852,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -802084,7 +799867,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -802148,6 +799930,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -802164,6 +799947,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -802597,7 +800381,8 @@ 0, 0, 0, - 0, + 2, + 2, 0, 0, 0, @@ -802635,11 +800420,11 @@ 0, 0, 0, - 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -802653,43 +800438,41 @@ }, { "session": { - "id": "transforming-systems-lessons-from-taiwans-movements", - "sourceId": "B9EDKY", - "title": "Transforming Systems: Lessons from Taiwan's Movements", - "description": "I will talk about the most recent struggles of open source communities in Taiwan, g0v specifically, how da0 has been trying to help in the past year or so, the conclusions we had and what is still missing. g0v has been running bi-monthly hackathons for 10 years now, which has been the key foundation for the community. April this year they stopped due to lack of funding support, we use this as a point of reference and how a web3 oriented subgroup like da0 could have done better, and the future.", + "id": "trust-zones-why-daos-will-be-the-best-organizations-ever-created", + "sourceId": "R9ENCP", + "title": "Trust Zones: Why DAOs will be the best organizations ever created", + "description": "This talk introduces the theory of Trust Zones. Every Trust Zone is a unique blend of constraints, reputation requirements, and accountability measures, within which an agent can operate on behalf of an organization to further its goals.\r\n\r\nI will contend that the operational management of all organizations can be described as creating new Trust Zones and adjusting their parameters. And further, that DAOs and other onchain organizations can do this better than any other organizational form.", "track": "Coordination", - "type": "Talk", - "expertise": "Beginner", - "audience": "Community", + "type": "Lightning Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Civil Resistance", - "Coordination", - "Public good" + "DAO", + "Governance", + "trusted", + "DAO", + "Governance" ], "keywords": [ - "Ecosystem", - "Funding", - "Mainstream" + "Trust" ], - "duration": 924, + "duration": 505, "language": "en", - "sources_swarmHash": "2b9dfee773cde09da38f0a590cb1a7dbf70bfd70506a4361cbd19409fbeea5ef", - "sources_youtubeId": "2cuicY646Jo", + "sources_swarmHash": "852ff1461e28d565f245830a4e9ecb44fed61bff2192ef6686cd69d515928f99", + "sources_youtubeId": "tu6t6GdLyCg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736ef6274749a4b891853e7", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736ef6274749a4b891853e7.vtt", - "transcript_text": " Thuy Nguyen Reviewer Reviewer 1 Can you guys hear me? OK. Hi guys, my name is Noah. So today I'm going to talk about the strategies of making changes and our reflections from Taiwan. So, 10 years ago, this community called GovZero started in Taiwan. It started from having a bunch of hackers trying to open up government data. And later in the years, it become a beacon of digital democracy globally in the world, a leader of digital democracy in the world. And in recent years, the momentum has slowed down a little bit. So two years ago, me and my partner Vivian here basically started this initiative called DAO Zero. And the mission was to supercharge Gov Zero, to supercharge the digital democracy movement. And we did a bunch of things. This includes the decentralized ID pilots in Taiwan, these include various retroactive funding experiments, these include a lot of research funding the Commerce Taipei, Dao Taipei, Plurality Taipei, a bunch of different things. But at a certain point, I started asking myself, what has really changed? Were we really able to help? At a certain point, despite the relative success of all the experiments, this year in April, GovZero basically posted this picture saying that, okay, so we're running out of funding, please donate to GovZero so it can keep running its bimonthly hackathons continuously, which the bimonthly hackathons for 10 years, it has never paused once. So this is a signal that maybe despite all the experiments that we've done, maybe we need to go deeper on how we can make systemic change in the future. So if we are to initiate something new, if we really want to change system to the better, positively, sustainably, what are the strategies we are to adopt, what are the potential tools", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731638700000, - "slot_end": 1731639900000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1mKMsPFBtVYtAcJOczCaTR2Ssw6fiQ86zw-Jz3zyGmFk", - "resources_slides": null, + "slot_start": 1731488400000, + "slot_end": 1731489000000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/11gK41qto_r77F_waBaxEdW2JoYIgXHs4mVHzUzI_OaU", + "resources_slides": "https://drive.google.com/file/d/1rtywjF61qKmajtac50arBNQksOE8sbvx/view", "speakers": [ - "noah-yeh" + "spencer-graham" ] }, "vector": [ @@ -803370,7 +801153,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -803538,10 +801320,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -803553,10 +801337,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -803599,7 +801379,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -803617,6 +801396,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -803673,7 +801453,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -804006,6 +801785,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -804016,8 +801796,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -804029,61 +801807,50 @@ }, { "session": { - "id": "transitioning-from-an-l1-to-an-l2-a-case-study", - "sourceId": "KHVZ9M", - "title": "Transitioning from an L1 to an L2: A case study", - "description": "This talk will cover the learnings from cLabs' experience rebuilding Celo from the ground up as an L2. We hope that it can be a useful case study for other L1s to follow.", - "track": "Layer 2", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "try-it-out-in-remix", + "sourceId": "SUEJQR", + "title": "Try it out in Remix", + "description": "Remix is great for your blockchain experiments for both new Web3 devs and OGs. We’ll present the new Remix Desktop - great for offline work, plus RemixAI tools and RemixZK tools, our new collection of templates, our new video guide, our new tool to make a basic DApp - great for hackathons, and more! Learn to play in Remix!", + "track": "Developer Experience", + "type": "Talk", + "expertise": "Beginner", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Layer 1", "Layer 2s", - "Rollups", - "Scalability", - "Optimistic rollups", - "Use Cases", - "learnings", - "technical", - "Layer 1", + "Tooling", + "DevRel", + "Desktop", + "ai", + "Desktop", + "DevRel", "Layer 2s", - "Optimistic rollups", - "Rollups", - "Scalability", - "Use Cases" + "Tooling" ], "keywords": [ - "Layer2", - "case study", - "technical learnings" + "AI" ], - "duration": 516, + "duration": 1453, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "ba46a6efe168366f05e9a4d908ed3909a765ef3712b9f52433c87f7275e605a0", + "sources_youtubeId": "XRbi7AbQwSg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735d79e9dbb7a90e1be2c82", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735d79e9dbb7a90e1be2c82.vtt", - "transcript_text": " Hi everyone, my name is Marek Olszewski and once again we're going to be talking about cello's transition from an L1 to an L2. You've probably heard that we're making this big transition and we really hope that we can become a case study for other L1s to follow suit. And so today will be a talk really targeting those other L1s with our learnings. And I tried to make this fun for you all, so I made this in the style of Zelda. Any Zelda players in the audience? Cool, we got a few hands up. Great. Well, hopefully you'll enjoy the slides.", + "sources_streamethId": "6735e09f9dbb7a90e1fef26b", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731580800000, - "slot_end": 1731581400000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/14jswR8SSkWsHdCj5ky0DG_01yQVUwV7nJtS5K18ynHg", - "resources_slides": null, + "slot_start": 1731582000000, + "slot_end": 1731583800000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1frNEqhlzbsXj_EqKtcIYr8R8G-t4ymlj401WFG6BBYw", + "resources_slides": "https://drive.google.com/file/d/1C4fkAelfWEUPkV4hR0uOXv_JTzWAQBwJ/view", "speakers": [ - "marek-olszewski" + "rob-stupay" ] }, "vector": [ - 0, - 0, - 0, - 0, 0, 0, 0, @@ -804624,13 +802391,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -804769,6 +802529,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -804846,7 +802607,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -804860,6 +802620,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -804873,7 +802634,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -804897,9 +802657,7 @@ 0, 0, 0, - 2, 0, - 2, 0, 0, 0, @@ -804907,10 +802665,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -804921,6 +802679,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -804971,7 +802730,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -805015,6 +802773,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -805357,8 +803116,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -805370,6 +803127,14 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -805394,7 +803159,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -805402,6 +803166,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -805416,59 +803182,51 @@ }, { "session": { - "id": "trust-minimized-p2p-marketplaces-on-ethereum", - "sourceId": "YPNBE8", - "title": "Trust-minimized P2P marketplaces on Ethereum", - "description": "Blockchains have enabled trustless and fast transaction settlement (i.e. stablecoins, DeFi). However, these existing use cases exist in parallel and are siloed off from the real world. With the maturation of ZK, MPC and other programmable crypto techniques, we are now able to connect data on the internet to blockchains in a trust minimized way for use in smart contracts. This talk will explore the massive design space unlocked for apps (i.e. trust minimized P2P marketplaces)", - "track": "Real World Ethereum", + "id": "txain-discover-the-next-generation-of-blockchain-exploration", + "sourceId": "WRGHRM", + "title": "TXain: Discover the Next Generation of Blockchain Exploration", + "description": "Discover TXain, the next generation blockchain explorer designed to elevate your blockchain experience. Join us as we delve into our key features: an intuitive UI, real-time data, advanced search capabilities, and in-depth analytics. As a new startup, we’re committed to performance and information clarity, ensuring seamless navigation and comprehensive insights. Learn how TXain is set to redefine blockchain exploration, providing the tools you need to explore, analyze, and understand the blockch", + "track": "Developer Experience", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Product", + "expertise": "Beginner", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "ZKP", - "Signatures", - "P2P finance", - "p2p", - "marketplace", - "P2P finance", - "Signatures", - "ZKP" + "data", + "real-time" ], "keywords": [ - "TLSNotary", - "ZKEmail", - "P2P marketplaces" + "blockchain explorer", + "user experience", + "Real-Time Data" ], - "duration": 422, + "duration": 426, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "c7b6d89b21dc79bb6f3eaeb558d3882f666c486f9c880a25afae3b3dcac9a1df", + "sources_youtubeId": "4NJZijpEH6A", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735781c9dbb7a90e1b5649c", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735781c9dbb7a90e1b5649c.vtt", - "transcript_text": " Awesome. Hey, guys. I'm Richard. Today I'll be talking about trust-minimized P2P marketplaces on Ethereum. Go over the why, how, and some thoughts of looking ahead. So why are we here? Um, if we look at web 2 today, uh, data currently exists in these centralized data silos, whether it's fiat currency in your bank account, uh, your social graph, concert tickets, domains, et cetera. Um, these lead to high switching costs for users. For example, you can't bring your followers from Facebook to Twitter. And these also are very extractive to these users. For example, Ticketmaster charges, like, monopolistic prices because they own both the secondary and primary exchanges. And lastly, there's, like, closed APIs, which make it very difficult to interoperate between payment APIs, for example. Additionally, if you look at Web3 in our permissionless database, the problem is that it's really detached from the real world. If you look at your social graph, stable coins, like these DeFi instruments, and even ETH, all of it runs in a parallel system. In fact, most of DeFi today is self-referential. And so the solution here is to bring the real world to Ethereum. And the question is how? We can break through these kind of wall gardens using cryptography and also maybe TEs. So we can use ZK and MPC, these new technologies, to allow us to permissionlessly export data from these centralized data silos. Techniques such as ZK email and TLS notary, which also can help us redact any kind of private sensitive information prior to exporting. Check out some of the other talks on ZK email or TLS notary today if you want to learn more deeply. But yeah, now with these tools, we can kind of compose any kind of Web2 action with Web3 action, such as proving a fiat transfer using zk email to swap for some stable coins on chain or proving ticket transfer using TLS notary to swap for stable coins or swap domains for ETH, et cetera, or even put a social graph inside a TEE to do some DeFi stuff with. An example of this flow that is in production today is ZKP2P, the project I work on. The problem that it fixes some of the issues I mentioned earlier, such as the high fees. The construction for this is pretty simple, actually. A seller comes in, escrows some funds in a smart contract and provides a payment ID. A seller comes in, escrows some funds in a smart contract and provides a payment ID. A buyer comes in, pays the seller, and uses that proof to unlock the escrow contract. It kind of replaces Binance or any kind of centralized exchange with a smart contract, and replaces the manual process of unlocking the escrow with instant unlock upon satisfying a predicate. This construction extends beyond fiat. Simply replace the off-chain payment with a digital asset transfer and a proof of payment with a proof of asset transfer. Now you can kind of do P2P trading tickets, trading domains, trade social media handles, CSGO skins, gift cards and much more. You can even further extend this construction if you, if you trust TEs as a credit poll third party. You can kind of encumber a Web2 account, put it inside a TEE and act that, act as an escrow for the Web2 asset. And both seller and buyer send requests through the TEE. Uh, once the TEE owns both funds and the Web2 asset say tickets, uh, then they can proceed to unlock funds to both parties. Uh, yeah, now you kind of can see the Web2 account is fully programmable. You can do some weird stuff like make the TEE run an AMM pricing model for exam, for example. Yeah, in conclusion, design space is huge. To do trust-minimized marketplaces, the tech here is ready. Dev tooling will continue to only get better. And instead of only bringing Ethereum to the real world, I think it's time to bring some of the real world onto Ethereum. Uh, thank you, and that's it. Thank you Richard. Do we have any questions? Shy crowd today, come on guys. I don't even know if you have the ability to go back in slides. Sorry, is this better? Yeah, if you can't go back, that's okay. I was just gonna ask something about it, but it doesn't matter. I don't think we can. Maybe you can just going to ask something about it, but it doesn't matter. I don't think we can. Maybe you can just talk to it. Okay. Any particular use cases you're most excited about, like in terms of future of peer-to-peer marketplaces? I think there's just a lot of ideas out there. You can put almost any Internet data on-chain and use it. Trade any kind of Web2 asset or even data and anything that you think has value on an internet, you can kind of bring into on-chain to do cool stuff with. Yeah, love to see it. I mean, just like you said, Ticketmaster, I think a prime example of just like marketplaces that are absolutely using monopolistic pricing to just gouge consumers in what is becoming, yeah, a very much frictionless process to do straight Peter Peer. I guess a question, have you seen any regulatory issues? How are you thinking about navigating that? Yeah, I think there haven't been so far. I think P2P is a legally, I guess, gray area kind of thing. You are allowed to transact with your peer. For example, Facebook Marketplace is a huge P2P marketplace. And we're just basically enabling the coordination of that to be easier through on-chain means on Ethereum. Totally. All right. Thank you, Richard. Appreciate it. Give it up.", + "sources_streamethId": "67348d8a9dbb7a90e16d35b3", "eventId": "devcon-7", - "slot_start": 1731556200000, - "slot_end": 1731556800000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1_yxVcYnivrcVQGtbD7FmPQLfgJn75M9f-qQDTJJuPH8", - "resources_slides": null, + "slot_start": 1731493200000, + "slot_end": 1731493800000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1_ATKYtQF_Q_hjc85bqwcab990AdWWjiO8FiSDVR2BMg", + "resources_slides": "https://drive.google.com/file/d/1BHD2qn-t9w9iIIhYcwc2J6psxsyh0iTk/view", "speakers": [ - "richard" + "joan-baylina", + "daniel" ] }, "vector": [ 0, 0, 0, + 6, 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -805595,7 +803353,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -805629,6 +803386,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -806141,6 +803899,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -806287,7 +804046,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -806304,7 +804062,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -806514,7 +804271,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -806525,6 +804281,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -806741,7 +804498,6 @@ 0, 0, 2, - 2, 0, 0, 0, @@ -806779,13 +804535,11 @@ 0, 0, 0, + 2, 0, 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -806797,41 +804551,37 @@ }, { "session": { - "id": "trust-zones-why-daos-will-be-the-best-organizations-ever-created", - "sourceId": "R9ENCP", - "title": "Trust Zones: Why DAOs will be the best organizations ever created", - "description": "This talk introduces the theory of Trust Zones. Every Trust Zone is a unique blend of constraints, reputation requirements, and accountability measures, within which an agent can operate on behalf of an organization to further its goals.\r\n\r\nI will contend that the operational management of all organizations can be described as creating new Trust Zones and adjusting their parameters. And further, that DAOs and other onchain organizations can do this better than any other organizational form.", - "track": "Coordination", + "id": "txmonster-mud-day-demo", + "sourceId": "3GSMUH", + "title": "TxMonster - MUD Day Demo", + "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications\r\n\r\nUsing MUD Dev to build \"Eat Sleep & Survive\" TxMonster on RedStone Chain", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", "type": "Lightning Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "DAO", - "Governance", - "trusted", - "DAO", - "Governance" - ], + "tags": [], "keywords": [ - "Trust" + "N/A" ], - "duration": 505, + "duration": 288, "language": "en", - "sources_swarmHash": "852ff1461e28d565f245830a4e9ecb44fed61bff2192ef6686cd69d515928f99", - "sources_youtubeId": "tu6t6GdLyCg", + "sources_swarmHash": "d78526fb87c5a752ac399b532bc16e395f7ee7caf593f8ae3c03e5b78e07e201", + "sources_youtubeId": "P3x5UV39CCA", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673587ef9dbb7a90e1311702", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673587ef9dbb7a90e1311702.vtt", + "transcript_text": " What are you doing? You guys find here it's like Pokemon Go here? Okay, so this is a chance for us to play the game similar to Pokemon Go. Capture mantras on the text studio and then with the mat app. So, can I have a clicker? Where is it? No, here. Okay. Just a little bit. So I need a clicker here. So that's to be great today so I can show up in here and talk with you guys like a transfer to share the game. And how grateful is the MUD team and the Lattice and the rest of the supporters to build the game. Thanks again. So, please, I need a clicker like you. Okay. So we can do it. Okay, so you can just skip into the next slide. This is the game loop. So the next one. The next one. Okay. The text monitor is like the monster capturing. So that is the 3D web browser and we're doing with the Unity. So we are coming from the web tools like studio games. So we want to bring like the gaming experience inspired into the web free and the fully on chain games blockchain. So uh this one we bring like the whole new diversity like monster system so we can try to use and discovery with the like the sandbox and open world. Uh please next slide. Okay the game look will be very like simple. We see in here we build the characters, we explore the world, we just like taming the monster and then we like taking battle work and we gain access and then we can like build the character again. That's very simple game look so we're just following it. Okay, next one. Okay, we have like a lot of features in ecosystem but the most important thing here I want to emphasize is the FOMC mechanism. The first thing is like we talk about the game files, it's the ownership of the assets. But talking about the fully on-chain game, we need like all the logic on-chain. And lastly, and most important, is decentralized world bringing contribution with the community. Okay, please for the next one. Okay that is the gaming NFT contributing because we need to make sure that the people and the user need to play the game by the game itself not only about the finance thing. So we have it here the NFT the monster activities cosmetic and all of the things the user needs to contribute by the finance or by gaming experience, by goal for the game itself. Okay. And of course, uh, blockchain people own value not only for the game and also for like finance thing so we also have the further value. So looking for it and we need the next one. OK, this is our emphasize things that you see in the future. We have the user-generated content. It means, for example, we have our own project with access. And then the community, they can create their own access, make this one, and then put it into the game. But in extra, you can own your sub-client. And you just use access, and then you can bring the sub contract into the game. But in Extra, you can own your sub-client. And you just like use access and then you can bring the sub-contract into the game. And then you also have like all other features in the game and go along with us. But most and important thing is like if you have the better team and you can build the client with our access, you can do that. So the UDC is the most important thing when you play the game. If you, anyone in here plays Steam community, you have", "eventId": "devcon-7", - "slot_start": 1731488400000, - "slot_end": 1731489000000, + "slot_start": 1731558000000, + "slot_end": 1731558300000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/11gK41qto_r77F_waBaxEdW2JoYIgXHs4mVHzUzI_OaU", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/10U4OcgkMv_HGXoZzHe-sIP9e08AcMp-G142YBiu1DUM", + "resources_slides": "https://drive.google.com/file/d/1sLQb1lSHs3xFGv9wziKI-TyiMJnZJpfd/view", "speakers": [ - "spencer-graham" + "buidltxgames" ] }, "vector": [ @@ -806846,6 +804596,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -807682,12 +805433,7 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, - 2, 0, 0, 0, @@ -807758,7 +805504,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -808171,56 +805916,46 @@ }, { "session": { - "id": "try-it-out-in-remix", - "sourceId": "SUEJQR", - "title": "Try it out in Remix", - "description": "Remix is great for your blockchain experiments for both new Web3 devs and OGs. We’ll present the new Remix Desktop - great for offline work, plus RemixAI tools and RemixZK tools, our new collection of templates, our new video guide, our new tool to make a basic DApp - great for hackathons, and more! Learn to play in Remix!", - "track": "Developer Experience", - "type": "Talk", + "id": "ultimate-dominion-mud-day-demo", + "sourceId": "GPQVMW", + "title": "Ultimate Dominion - MUD Day Demo", + "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications.\r\n\r\nUltimate Dominion is a fully onchain text-based RPG. Explore the world, defeat monsters, collect, buy, and sell items.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", + "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Developer", + "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", - "Tooling", - "DevRel", - "Desktop", - "ai", - "Desktop", - "DevRel", - "Layer 2s", - "Tooling" - ], - "keywords": [ - "AI" + "Gaming", + "Autonomous World", + "Autonomous World", + "Gaming" ], - "duration": 1453, + "keywords": [], + "duration": 329, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "342168451b143f531922ea9d08f6e99c010b5fe7b227fa112be1f90f564d43c0", + "sources_youtubeId": "yILE0MO7B2M", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735e09f9dbb7a90e1fef26b", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6735907b9dbb7a90e1cac2fd", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735907b9dbb7a90e1cac2fd.vtt", + "transcript_text": " Ritz Raspba Reviewer 1 Hello everybody, my name is Ritz Raspba. This is my first time at DevCon 19, and I'm really excited to show you what me and my team have worked up for you guys. We've been working on this game called Ultimate Dominion for the past six months. It's a fully on-chain text-based MMORPG. Now, I've been growing up a real fan of Skyrim, those types of games, and that really inspired my art going into this. I've been drawing knights and dragons since I was a kid, and I'm glad I'm able to have the same assets as the things I've been drawing and growing up with. Narrative is a really big part in this. I mean, I feel a lot of games in the last decade just haven't been, haven't really been focusing on that world building, haven't had a world that felt like we should fight for and narrative that we would want to push forward. A lot of games focus on building a game that feels like it's going to outlive the players, but we want to work on a game that feels like it's lived for and has had a history long before they've arrived. Now heading into that game, this is what we have. My character name is going to be Mr. Slay's lot. This is beta and I'm going to be doing this live for you guys. That's my profile picture. Deal with it. Moving forward, I'm going to be rolling my stats next. The stats determine what you're good at and what you're bad at. If you have high HP, then you're harder to kill. Higher strength means you swing your sword heavier. Agility, you're faster and better at running away from me. And intelligence gives you access to the strongest magic in the game. Let me actually roll my stats a little bit. Rolling into here, this world is going to be in a 10 by 10 grid. You're going to be moving about it more. Think of it like you're moving a piece in a chess board. Loading into it here, you're going to see to the right here, we're going to have several points of interest and a yellow border. This yellow border is going to be the player, the safe area. Your character is depicted as a dragon icon. And as you see as I move through here, the middle screen is going to be updating with whatever, with the players and the monsters that are currently there. This getting into the combat here. I'm going to be going up against a kobold scout. Oh. Apologies. So the items in this game are the main way that you're going to pack things and dealing damage. These individual items, as I equip them, and even these low-level items, they don't do a lot. They're mostly numbers and have small effects. However, when you couple them up with... when you have multiplayer and multiple people using several attacks, like think of a rogue with a smoke bomb and then your wizard casts Fireball and lights up the smoke bomb and does extra damage. We want to incentivize players and empower them to have cool interactions like that that we really can't anticipate. This sort of story building is really the... It's what we want to push. Here we have the chance to do some really lovely cooperative storytelling. We as developers want to empower players to have a narrative to push forward and to attack with. And that's the main dream. Right now with this beta, we're focusing on this main loop where you attack monsters, get their items, and sell them. And with that comes, well, that progression. But this is the meat and bones of the game. All this narrative stuff that we want to do, all this ambition, this crazy world that we want to build is great, but we want to have a good game that goes with it. I mean, I think it's really important. Getting into it, now I'm going to be getting into the combat here. Going up against a... There we go, I apologize for that. Getting into the combat shortly here, you're going to see that as I get into it, we're going to have these actionable items here are what I have to attack with. As I was saying, when you mix and match with multiplayer, you have a lot of combination and a lot of things that you can do. But ultimately, this is the game. If you want to support it and you want to see it live through, you can join the demo with the code we're going to have back here. And I can't wait to see you there, guys. My name is Ritz Rospa, and this was Ultimate Dominion. Thank you. Thank you, Ritz.", "eventId": "devcon-7", - "slot_start": 1731582000000, - "slot_end": 1731583800000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1frNEqhlzbsXj_EqKtcIYr8R8G-t4ymlj401WFG6BBYw", - "resources_slides": null, + "slot_start": 1731558300000, + "slot_end": 1731558600000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/13Uil3sm_cj9Qi6g5Yd7Wn1eUVWbT6tRsAAUDqNmNTmU", + "resources_slides": "https://drive.google.com/file/d/1zYGJzeJokUlCNleWOVNAy1_3aG8x6C4-/view", "speakers": [ - "rob-stupay" + "ritz-raspa" ] }, "vector": [ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -808230,6 +805965,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -808895,10 +806631,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -808987,7 +806723,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -809032,7 +806767,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -809046,7 +806780,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -809078,6 +806811,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -809140,7 +806875,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -809496,7 +807230,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -809535,9 +807268,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -809551,48 +807284,49 @@ }, { "session": { - "id": "txain-discover-the-next-generation-of-blockchain-exploration", - "sourceId": "WRGHRM", - "title": "TXain: Discover the Next Generation of Blockchain Exploration", - "description": "Discover TXain, the next generation blockchain explorer designed to elevate your blockchain experience. Join us as we delve into our key features: an intuitive UI, real-time data, advanced search capabilities, and in-depth analytics. As a new startup, we’re committed to performance and information clarity, ensuring seamless navigation and comprehensive insights. Learn how TXain is set to redefine blockchain exploration, providing the tools you need to explore, analyze, and understand the blockch", - "track": "Developer Experience", + "id": "unchained-index-a-purposefully-designed-schelling-point-a-native-web3-api", + "sourceId": "VBUJML", + "title": "Unchained Index: A Purposefully Designed Schelling Point: A native Web3 API", + "description": "The Unchained Index smart contract, part of TrueBlocks, acts as a purposefully-designed Schelling Point, creating a decentralized, permissionless store for blockchain index data. In this talk, we generalize the Unchained Index to show it can serve as a repository for other datasets such as event signatures and address labels. We contend we can replace costly APIs with a robust, reproducible public good, enhancing data accessibility & decentralization.", + "track": "Coordination", "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Developer", + "expertise": "Intermediate", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "data", - "real-time" + "Coordination", + "Decentralization", + "Ethereum for Good", + "Coordination", + "Decentralization", + "Ethereum for Good" ], "keywords": [ - "blockchain explorer", - "user experience", - "Real-Time Data" + "none" ], - "duration": 426, + "duration": 612, "language": "en", - "sources_swarmHash": "c7b6d89b21dc79bb6f3eaeb558d3882f666c486f9c880a25afae3b3dcac9a1df", - "sources_youtubeId": "4NJZijpEH6A", + "sources_swarmHash": "ea604c4fb470594534b4c6e9037f54f969d7e9fad9537949cba6906a31938188", + "sources_youtubeId": "bfFZzY0h9qQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67348d8a9dbb7a90e16d35b3", + "sources_streamethId": "67348bb99dbb7a90e154aafc", "eventId": "devcon-7", - "slot_start": 1731493200000, - "slot_end": 1731493800000, + "slot_start": 1731492000000, + "slot_end": 1731492600000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1_ATKYtQF_Q_hjc85bqwcab990AdWWjiO8FiSDVR2BMg", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/12qCfXtoD8E9oGVRdfTgU97VfTsXFeb1ceIy1bYwWAV0", + "resources_slides": "https://drive.google.com/file/d/1faF3jCQjTHS6t7lAc2cR-LskKPNY9mwL/view", "speakers": [ - "joan-baylina", - "daniel" + "thomas-jay-rush", + "meriam-zandi" ] }, "vector": [ 0, 0, 0, - 6, 0, 0, 0, @@ -809601,6 +807335,9 @@ 0, 0, 0, + 6, + 0, + 0, 0, 0, 0, @@ -809754,7 +807491,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -810271,6 +808007,10 @@ 0, 0, 6, + 6, + 0, + 0, + 0, 0, 0, 0, @@ -810434,6 +808174,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -810457,6 +808198,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -810485,6 +808227,19 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -810654,7 +808409,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -810871,27 +808625,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -810900,12 +808633,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -810917,56 +808650,48 @@ 0, 0, 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "txmonster-mud-day-demo", - "sourceId": "3GSMUH", - "title": "TxMonster - MUD Day Demo", - "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications\r\n\r\nUsing MUD Dev to build \"Eat Sleep & Survive\" TxMonster on RedStone Chain", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Lightning Talk", + "id": "understanding-eip-7002-and-eip-6110", + "sourceId": "KPD8HB", + "title": "Understanding EIP-7002 and EIP-6110", + "description": "The first part will be an overview of EIP-7002, explaining how it works, why adding this extra option to exit validators is important, and addressing some of the UX challenges of this approach. The second part will be a technical overview of EIP-6110, explaining the UX improvements for validators depositing on the beacon chain, the removal of pre-merge technical debt as well as a quick look at the EIP implementation in Teku.", + "track": "Core Protocol", + "type": "Talk", "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], + "tags": [ + "Staking" + ], "keywords": [ - "N/A" + "EIP", + "validator", + "staking" ], - "duration": 288, + "duration": 1495, "language": "en", - "sources_swarmHash": "d78526fb87c5a752ac399b532bc16e395f7ee7caf593f8ae3c03e5b78e07e201", - "sources_youtubeId": "P3x5UV39CCA", + "sources_swarmHash": "5e5addf0da8b7cde13a38f9d5bf27a477cb4b61980091c63038ec72253663a34", + "sources_youtubeId": "EyDChjFQEkQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673587ef9dbb7a90e1311702", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673587ef9dbb7a90e1311702.vtt", - "transcript_text": " What are you doing? You guys find here it's like Pokemon Go here? Okay, so this is a chance for us to play the game similar to Pokemon Go. Capture mantras on the text studio and then with the mat app. So, can I have a clicker? Where is it? No, here. Okay. Just a little bit. So I need a clicker here. So that's to be great today so I can show up in here and talk with you guys like a transfer to share the game. And how grateful is the MUD team and the Lattice and the rest of the supporters to build the game. Thanks again. So, please, I need a clicker like you. Okay. So we can do it. Okay, so you can just skip into the next slide. This is the game loop. So the next one. The next one. Okay. The text monitor is like the monster capturing. So that is the 3D web browser and we're doing with the Unity. So we are coming from the web tools like studio games. So we want to bring like the gaming experience inspired into the web free and the fully on chain games blockchain. So uh this one we bring like the whole new diversity like monster system so we can try to use and discovery with the like the sandbox and open world. Uh please next slide. Okay the game look will be very like simple. We see in here we build the characters, we explore the world, we just like taming the monster and then we like taking battle work and we gain access and then we can like build the character again. That's very simple game look so we're just following it. Okay, next one. Okay, we have like a lot of features in ecosystem but the most important thing here I want to emphasize is the FOMC mechanism. The first thing is like we talk about the game files, it's the ownership of the assets. But talking about the fully on-chain game, we need like all the logic on-chain. And lastly, and most important, is decentralized world bringing contribution with the community. Okay, please for the next one. Okay that is the gaming NFT contributing because we need to make sure that the people and the user need to play the game by the game itself not only about the finance thing. So we have it here the NFT the monster activities cosmetic and all of the things the user needs to contribute by the finance or by gaming experience, by goal for the game itself. Okay. And of course, uh, blockchain people own value not only for the game and also for like finance thing so we also have the further value. So looking for it and we need the next one. OK, this is our emphasize things that you see in the future. We have the user-generated content. It means, for example, we have our own project with access. And then the community, they can create their own access, make this one, and then put it into the game. But in extra, you can own your sub-client. And you just use access, and then you can bring the sub contract into the game. But in Extra, you can own your sub-client. And you just like use access and then you can bring the sub-contract into the game. And then you also have like all other features in the game and go along with us. But most and important thing is like if you have the better team and you can build the client with our access, you can do that. So the UDC is the most important thing when you play the game. If you, anyone in here plays Steam community, you have", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731558000000, - "slot_end": 1731558300000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/10U4OcgkMv_HGXoZzHe-sIP9e08AcMp-G142YBiu1DUM", - "resources_slides": null, + "slot_start": 1731396600000, + "slot_end": 1731398400000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/13NjraDw6-VLGwVGpYUmZprFK68Rq7uVHZ7yVIgSx7Q0", + "resources_slides": "https://drive.google.com/file/d/1-FGeFm390sMVKLb3dMTYJ_tY3OspsSkS/view", "speakers": [ - "buidltxgames" + "lucas-saldanha", + "stefan-bratanov" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -811641,7 +809366,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -811652,6 +809376,9 @@ 0, 0, 0, + 6, + 6, + 0, 0, 0, 0, @@ -811842,6 +809569,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -812295,40 +810023,50 @@ }, { "session": { - "id": "ultimate-dominion-mud-day-demo", - "sourceId": "GPQVMW", - "title": "Ultimate Dominion - MUD Day Demo", - "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and non-financial applications.\r\n\r\nUltimate Dominion is a fully onchain text-based RPG. Explore the world, defeat monsters, collect, buy, and sell items.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Lightning Talk", - "expertise": "Beginner", + "id": "unified-ethereum-vs-l2-ecosystem-competition-can-we-have-both", + "sourceId": "HZCDFP", + "title": "“Unified Ethereum” vs “L2 Ecosystem Competition”: Can we have both?", + "description": "This panel will dig into the delicate balance of Ethereum's rollup-centric future. We'll talk about the \"frenemy\" dynamic between competing L2 ecosystems, and how this can lead to a fragmented user experience. We'll strategize on ways to maintain diversity while making interoperability easy for users—including a discussion on the pros/cons of supporting standards like ERC-7683. Can we get the best of both worlds: the innovation and diversity of many L2s, with the UX of a unified Ethereum?", + "track": "Layer 2", + "type": "Panel", + "expertise": "Intermediate", "audience": "Product", "featured": false, "doNotRecord": false, "tags": [ - "Gaming", - "Autonomous World", - "Autonomous World", - "Gaming" + "Cross-L2", + "UI/UX", + "Intents", + "ethereum", + "unified", + "Cross-L2", + "Intents", + "UI/UX" ], - "keywords": [], - "duration": 329, + "keywords": [ + "ERC-7683", + "Interoperability", + "Unified-Ethereum" + ], + "duration": 3385, "language": "en", - "sources_swarmHash": "342168451b143f531922ea9d08f6e99c010b5fe7b227fa112be1f90f564d43c0", - "sources_youtubeId": "yILE0MO7B2M", + "sources_swarmHash": "8edb1f118a91d0cb8965596fd17130e941f3aea7bcada1981305da431687f90d", + "sources_youtubeId": "4Tds-Bik7zM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735907b9dbb7a90e1cac2fd", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735907b9dbb7a90e1cac2fd.vtt", - "transcript_text": " Ritz Raspba Reviewer 1 Hello everybody, my name is Ritz Raspba. This is my first time at DevCon 19, and I'm really excited to show you what me and my team have worked up for you guys. We've been working on this game called Ultimate Dominion for the past six months. It's a fully on-chain text-based MMORPG. Now, I've been growing up a real fan of Skyrim, those types of games, and that really inspired my art going into this. I've been drawing knights and dragons since I was a kid, and I'm glad I'm able to have the same assets as the things I've been drawing and growing up with. Narrative is a really big part in this. I mean, I feel a lot of games in the last decade just haven't been, haven't really been focusing on that world building, haven't had a world that felt like we should fight for and narrative that we would want to push forward. A lot of games focus on building a game that feels like it's going to outlive the players, but we want to work on a game that feels like it's lived for and has had a history long before they've arrived. Now heading into that game, this is what we have. My character name is going to be Mr. Slay's lot. This is beta and I'm going to be doing this live for you guys. That's my profile picture. Deal with it. Moving forward, I'm going to be rolling my stats next. The stats determine what you're good at and what you're bad at. If you have high HP, then you're harder to kill. Higher strength means you swing your sword heavier. Agility, you're faster and better at running away from me. And intelligence gives you access to the strongest magic in the game. Let me actually roll my stats a little bit. Rolling into here, this world is going to be in a 10 by 10 grid. You're going to be moving about it more. Think of it like you're moving a piece in a chess board. Loading into it here, you're going to see to the right here, we're going to have several points of interest and a yellow border. This yellow border is going to be the player, the safe area. Your character is depicted as a dragon icon. And as you see as I move through here, the middle screen is going to be updating with whatever, with the players and the monsters that are currently there. This getting into the combat here. I'm going to be going up against a kobold scout. Oh. Apologies. So the items in this game are the main way that you're going to pack things and dealing damage. These individual items, as I equip them, and even these low-level items, they don't do a lot. They're mostly numbers and have small effects. However, when you couple them up with... when you have multiplayer and multiple people using several attacks, like think of a rogue with a smoke bomb and then your wizard casts Fireball and lights up the smoke bomb and does extra damage. We want to incentivize players and empower them to have cool interactions like that that we really can't anticipate. This sort of story building is really the... It's what we want to push. Here we have the chance to do some really lovely cooperative storytelling. We as developers want to empower players to have a narrative to push forward and to attack with. And that's the main dream. Right now with this beta, we're focusing on this main loop where you attack monsters, get their items, and sell them. And with that comes, well, that progression. But this is the meat and bones of the game. All this narrative stuff that we want to do, all this ambition, this crazy world that we want to build is great, but we want to have a good game that goes with it. I mean, I think it's really important. Getting into it, now I'm going to be getting into the combat here. Going up against a... There we go, I apologize for that. Getting into the combat shortly here, you're going to see that as I get into it, we're going to have these actionable items here are what I have to attack with. As I was saying, when you mix and match with multiplayer, you have a lot of combination and a lot of things that you can do. But ultimately, this is the game. If you want to support it and you want to see it live through, you can join the demo with the code we're going to have back here. And I can't wait to see you there, guys. My name is Ritz Rospa, and this was Ultimate Dominion. Thank you. Thank you, Ritz.", + "sources_streamethId": "67345b159dbb7a90e13ea261", "eventId": "devcon-7", - "slot_start": 1731558300000, - "slot_end": 1731558600000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/13Uil3sm_cj9Qi6g5Yd7Wn1eUVWbT6tRsAAUDqNmNTmU", - "resources_slides": null, + "slot_start": 1731479400000, + "slot_end": 1731483000000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1sjVmE9pcutiBwFVJbYVV2KdRqnNTg_wv6ZwyrExBY2Y", + "resources_slides": "https://drive.google.com/file/d/1wh1013mz6bb1M6-JnXCuwvyHrz2g7v_w/view", "speakers": [ - "ritz-raspa" + "hart-lambur", + "ben-jones", + "vitalik-buterin", + "steven-goldfeder", + "jesse-pollak" ] }, "vector": [ @@ -812339,11 +810077,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -812530,6 +810263,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -812770,6 +810504,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -812831,6 +810566,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -813015,19 +810751,13 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -813143,9 +810873,11 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -813193,8 +810925,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -813288,6 +811018,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -813397,6 +811128,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -813616,6 +811348,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -813646,9 +811379,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -813668,53 +811401,43 @@ }, { "session": { - "id": "unchained-index-a-purposefully-designed-schelling-point-a-native-web3-api", - "sourceId": "VBUJML", - "title": "Unchained Index: A Purposefully Designed Schelling Point: A native Web3 API", - "description": "The Unchained Index smart contract, part of TrueBlocks, acts as a purposefully-designed Schelling Point, creating a decentralized, permissionless store for blockchain index data. In this talk, we generalize the Unchained Index to show it can serve as a repository for other datasets such as event signatures and address labels. We contend we can replace costly APIs with a robust, reproducible public good, enhancing data accessibility & decentralization.", - "track": "Coordination", + "id": "universal-eccs-use-cases-for-the-p256-precompile-in-decentralized-internet-infrastructure", + "sourceId": "NX7U8B", + "title": "Universal ECCs: Use Cases for the P256 Precompile in Decentralized Internet Infrastructure", + "description": "## Summary\r\n\r\nThe session will highlight the history of adoption of P256 in Elliptic Curve Cryptography (ECC), its current applications in web security, authentication, and encryption, and explore future possibilities for its integration into Ethereum and ENS to enhance decentralized internet infrastructure.", + "track": "Core Protocol", "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Coordination", - "Decentralization", - "Ethereum for Good", - "Coordination", - "Decentralization", - "Ethereum for Good" + "ens", + "Accessibility", + "Public good", + "Use cases of cryptography" ], "keywords": [ - "none" + "ENS" ], - "duration": 612, + "duration": 522, "language": "en", - "sources_swarmHash": "ea604c4fb470594534b4c6e9037f54f969d7e9fad9537949cba6906a31938188", - "sources_youtubeId": "bfFZzY0h9qQ", + "sources_swarmHash": "d137af18f4692a1194d1e3d606910f72833ec4282b51cac0a9b1a317238c2ef2", + "sources_youtubeId": "e_QBTQGMxPs", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67348bb99dbb7a90e154aafc", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731492000000, - "slot_end": 1731492600000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/12qCfXtoD8E9oGVRdfTgU97VfTsXFeb1ceIy1bYwWAV0", - "resources_slides": null, + "slot_start": 1731467100000, + "slot_end": 1731467700000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1-xDtu6rJ4NegQFgMrkNcVtzLJVJkvrYD_L3OYcBdFQo", + "resources_slides": "https://drive.google.com/file/d/1V3xGcwnGPg1NGM8TREc4U1La2p8m_j2V/view", "speakers": [ - "thomas-jay-rush", - "meriam-zandi" + "estmcmxcieth" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -814392,46 +812115,6 @@ 0, 0, 0, - 6, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -814443,6 +812126,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -814530,6 +812214,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -814585,10 +812270,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -815022,6 +812703,20 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 2, 0, 0, @@ -815031,11 +812726,41 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 2, 0, 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -815044,53 +812769,50 @@ }, { "session": { - "id": "understanding-eip-7002-and-eip-6110", - "sourceId": "KPD8HB", - "title": "Understanding EIP-7002 and EIP-6110", - "description": "The first part will be an overview of EIP-7002, explaining how it works, why adding this extra option to exit validators is important, and addressing some of the UX challenges of this approach. The second part will be a technical overview of EIP-6110, explaining the UX improvements for validators depositing on the beacon chain, the removal of pre-merge technical debt as well as a quick look at the EIP implementation in Teku.", - "track": "Core Protocol", - "type": "Talk", + "id": "unlock-web2-data-with-tlsnotary-hands-on-workshop", + "sourceId": "VPMQGM", + "title": "Unlock Web2 Data with TLSNotary: Hands-On Workshop", + "description": "Join our hands-on workshop to master **TLSNotary**! Dive into multi-party-TLS and learn to prove and verify online data authenticity to a third-party verifier while ensuring privacy. We’ll start with small examples in Rust and build up to custom browser extensions in TypeScript to collect and verify private user data.\r\n\r\nBring your laptop, bring a friend, and learn together. Get ready to unlock and compose Web2 data in innovative ways.", + "track": "Applied Cryptography", + "type": "Workshop", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Staking" + "Live Coding", + "Privacy", + "MPC", + "oracle", + "Live Coding", + "MPC", + "Privacy" ], "keywords": [ - "EIP", - "validator", - "staking" + "oracle" ], - "duration": 1495, + "duration": 5123, "language": "en", - "sources_swarmHash": "5e5addf0da8b7cde13a38f9d5bf27a477cb4b61980091c63038ec72253663a34", - "sources_youtubeId": "EyDChjFQEkQ", + "sources_swarmHash": "08a2c85bb2455f9e3ab6c2e0b2181df657e1370607a8a686c1627266923ca9d3", + "sources_youtubeId": "FhKjScuaNxw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673cbc4b982f234a12f066e9", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673cbc4b982f234a12f066e9.vtt", + "transcript_text": " . Thank you. How about now? There we go. So transport layer security, TLS is an old protocol. It's ubiquitous. It's all over the Internet. Basically everything supports it. If you're not using it, you probably should be. And the important thing is that all websites already support it. And so, yeah, basically, it's a protocol between your computer and a website that you're connecting to. It's using cryptography. So you can query data from a website and know that you're talking to the website you expect and your communications are protected. But one thing that it can't do for you is you can't query data from a website and then present it to another party. There's reasons for this that I won't really go into, but you can just know safely that it's not possible to just take a TLS transcript and send it to someone else. They won't know whether or not the data is actually authentic. But TLS, there are signatures there. There's cryptography there. And maybe we can plug into it and somehow leverage it for what we want to do, which is to allow people to privately prove data to a third party. I'm not sure if you've come across this jargon before. There's multiple different terms, multiple different approaches for generally the same thing. MPC TLS, CKTLS, web proofs, TLS middle boxes, Windows proxy. They're all essentially trying to do the same thing. We've implemented MPC TLS, and this leverages multi-party computation and zero-knowledge proofs in order to allow a prover to privately disclose data to a verifier in an implicitly secure way. And then there's other approaches, like a proxy approach, which kind of switches up the topology here, which puts the verifier in between the prover and the website. But, oh, right. One thing to note here is that both these approaches, both the MPC TLS approach and the ZK TLS proxy mode, whatever you want to call it, these are both designated verifier protocols, which means that it's really between two parties, and the only party who really receives cryptographic assurances is the verifier that's actually running the protocol itself. So sorry for everyone that wants to do things on-chain. This does not give you trustless oracles, but it does give you a trustless off-chain designated verifier protocol. And yeah, all approaches kind of all go towards the same affordances, which is that it allows you to basically compose with any application. If there is data on the Internet, which is queryable using TLS, you can trustlessly compose with it and without asking for permission. So all servers are already running TLS. So you can basically take our protocol Wired into your application And now users can just Prove arbitrary data to you In a privacy preserving manner And just to Elaborate on the Privacy part there, user can Query data from some arbitrary data Source and present it to your application While redacting their secrets Such as their cookies and their HTTP request. They can redact data in the response. And we're probably not going to get into it today, but you can also do zero-knowledge proofs over the top of that data, such as hiding your address but proving you're not in the United States, and then the classic example of proving that you're above some age without revealing your exact birth date. Yeah, so those are the general affordances that these protocols provide. TLS Notary is free, open source. The core protocol is written in Rust. We have TypeScript bindings. We're going to demo a browser extension slash plugin system, which is just one way of distributing the TLS notary protocol. And yeah, it's Apache, MIT, dual licensed at your option. Do whatever you want with it. Yeah, so that's an introduction to generally what TLS notary is and what it does. And yeah, let's just hop straight into building something. So good afternoon. I'm Hendrik, also from the TLS Notary team. And this is our schedule for the workshop today. We are plenty of members of the TLS Notary team. So if you have questions, there's Tanner, Ryan. Who else do I see? Thomas in the back, Chris. And then Tsukino and Sinu. So just if you run into any questions, just ask. So we just had this great introduction to TLS Notary. We'll start with coding part one. So that will be mainly on your own machine, everything offline so that we don't run into any network problems. Then we'll do some experiments with TLS Notary where you work together with your neighbor. So just distribute the roles. And then we'll also switch to the browser extension. Where we'll do a demo first, and then Tsukino will give an overview of how all browser setup works. Then we'll switch to coding part two, where we'll build our own plugins. And then we'll do a short, an extra slide with some, where we go into the future of TLS Notary and discuss what comes up next. And then there's some play time where we invite you to build your own plugins or build on top of TLS Notary and we're here to help you. And then we're also very open to hear all your questions. So some tips and tricks for the workshop. So don't rush through all the items. We have time. There is a HackMD document. It allows for adding comments. So if you run into things that don't make sense or you see typos or whatever, just comment so that we can improve it for next time. Also, check the Wi-Fi. So there is a local Wi-Fi network here that allows us to connect to each other. So make sure you're on the, yeah, I can't read the name, but I think it's a classroom network. The network, the password is also over there. Also for the web devs that will do a little bit of rust, but it's only reading Rust code, so don't be intimidated. You should be fine. And also, as I said, the TLS Notary team is here, so if you have questions, just ask them. So, if you go to this URL, you will find the classroom notes. And yeah, if you have any issues, just call us and then we will help. So maybe a quick overview of what we will do there. So we will start with the most basic setup of TLS Notary, where we just have a server, a prover, and a verifier, where everything is on the same networks on your local computer. So we'll start with starting all the necessary services. And then the prover will do the request to the web server with the cooperation of the verifier. This will return in an attestation. We will then create a presentation where we redact some information, and then the verifier will verify the info. And then the second part, we will work with a neutral notary, so where the roles of the verifying parts are split up. So this is all in Rust. And then in the third part of the example, we will run the prover in the browser. And then in the nodes, you will see that then we will also distribute the roles. So you could run the prover and your neighbor could be the test server and another neighbor could be the verifier. Should be fun. Any questions so far? So the question was which repository for the first item, so that's the TLS and repository Sorry, so if you didn't bring your laptop, this will be a very boring workshop, I'm afraid. So maybe then just chat with some of us and ask us your questions. Vielen Dank. Thank you. Thank you. Amen. Vielen Dank. Thank you. If you have an error message running the last command, don't stress, that's for further in the workshop. So the prerequisites is mainly to have all the dependencies on your laptop so that you can go through the following steps. Thank you. Thank you. All right. Thank you. If you run into a Rust C error, check your Rust compiler version and then just do Rust update to get the latest Rust compiler. Thank you. Thank you. Vielen Dank. Thank you. Thank you. Thank you. Thank you. Vielen Dank. Thank you. So they couldn't install the PDK. Thank you. Thank you. Thank you. Vielen Dank. Thank you. Thank you. Thank you. Thank you. ¶¶ ¶¶ Thank you. ¶¶ Thank you. ¶¶ Thank you. Thank you. Vielen Dank. Thank you.", "eventId": "devcon-7", - "slot_start": 1731396600000, - "slot_end": 1731398400000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/13NjraDw6-VLGwVGpYUmZprFK68Rq7uVHZ7yVIgSx7Q0", - "resources_slides": null, + "slot_start": 1731577200000, + "slot_end": 1731582600000, + "slot_roomId": "classroom-e", + "resources_presentation": "https://docs.google.com/presentation/d/18dMKK1NHUfq3W_cP2sm0ttim6fH4ZLV0KlzLOZdAiZ0", + "resources_slides": "https://drive.google.com/file/d/1AkKRK-frg9D-yoY-XfuUKKZH_fVpR58W/view", "speakers": [ - "lucas-saldanha", - "stefan-bratanov" + "hendrik-eeckhaut", + "sinu", + "tsukino" ] }, "vector": [ - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -815101,6 +812823,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -815760,6 +813483,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -815767,8 +813491,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -815780,6 +813502,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -815926,6 +813650,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -815951,6 +813676,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -815961,7 +813688,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -816268,6 +813994,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -816399,9 +814126,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 0, @@ -816417,50 +814144,41 @@ }, { "session": { - "id": "unified-ethereum-vs-l2-ecosystem-competition-can-we-have-both", - "sourceId": "HZCDFP", - "title": "“Unified Ethereum” vs “L2 Ecosystem Competition”: Can we have both?", - "description": "This panel will dig into the delicate balance of Ethereum's rollup-centric future. We'll talk about the \"frenemy\" dynamic between competing L2 ecosystems, and how this can lead to a fragmented user experience. We'll strategize on ways to maintain diversity while making interoperability easy for users—including a discussion on the pros/cons of supporting standards like ERC-7683. Can we get the best of both worlds: the innovation and diversity of many L2s, with the UX of a unified Ethereum?", + "id": "unlocking-new-possibilities-with-stateless-architecture-in-layer-2", + "sourceId": "NGZBJL", + "title": "Unlocking New Possibilities with Stateless Architecture in Layer 2", + "description": "Explore the potential of stateless architecture in Layer 2 solutions. As Layer 2 technologies evolve, we will discuss the fundamental trade-offs and present how combining client-side Zero-Knowledge Proofs (ZKPs) with stateless architecture enhances efficiency. This session will highlight innovative possibilities not yet widely discussed in the Ethereum community, showing how this approach can revolutionize scalability, security, and privacy.", "track": "Layer 2", - "type": "Panel", + "type": "Talk", "expertise": "Intermediate", - "audience": "Product", + "audience": "Developper", "featured": false, "doNotRecord": false, "tags": [ - "Cross-L2", - "UI/UX", - "Intents", - "ethereum", - "unified", - "Cross-L2", - "Intents", - "UI/UX" + "statelessness" ], "keywords": [ - "ERC-7683", - "Interoperability", - "Unified-Ethereum" + "Privacy", + "Scalability", + "Statelessness" ], - "duration": 3385, + "duration": 1383, "language": "en", - "sources_swarmHash": "8edb1f118a91d0cb8965596fd17130e941f3aea7bcada1981305da431687f90d", - "sources_youtubeId": "4Tds-Bik7zM", + "sources_swarmHash": "8bea63f73194ad7063441d20c9051702aad4fa50e03192aada5cbe1f8a7cb960", + "sources_youtubeId": "hfj9QaGoZrs", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67345b159dbb7a90e13ea261", + "sources_streamethId": "67348d529dbb7a90e16bc9cb", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731479400000, - "slot_end": 1731483000000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1sjVmE9pcutiBwFVJbYVV2KdRqnNTg_wv6ZwyrExBY2Y", - "resources_slides": null, + "slot_start": 1731495600000, + "slot_end": 1731497400000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1CkoCHWyFJ_4IDI_puC1cfrAXBQJADtCY7bYExgXn3xQ", + "resources_slides": "https://drive.google.com/file/d/1A7B1aIm34k2UlI83nMR1NnaTkfOmDce8/view", "speakers": [ - "hart-lambur", - "ben-jones", - "vitalik-buterin", - "steven-goldfeder", - "jesse-pollak" + "leona-hioki" ] }, "vector": [ @@ -816657,7 +814375,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -816899,7 +814616,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -816961,7 +814677,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -817152,14 +814867,13 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -817270,11 +814984,9 @@ 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, @@ -817415,7 +815127,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -817526,7 +815237,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -817739,6 +815449,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -817747,7 +815461,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -817786,11 +815499,11 @@ 0, 0, 0, - 2, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -817800,47 +815513,55 @@ }, { "session": { - "id": "universal-eccs-use-cases-for-the-p256-precompile-in-decentralized-internet-infrastructure", - "sourceId": "NX7U8B", - "title": "Universal ECCs: Use Cases for the P256 Precompile in Decentralized Internet Infrastructure", - "description": "## Summary\r\n\r\nThe session will highlight the history of adoption of P256 in Elliptic Curve Cryptography (ECC), its current applications in web security, authentication, and encryption, and explore future possibilities for its integration into Ethereum and ENS to enhance decentralized internet infrastructure.", - "track": "Core Protocol", + "id": "unlocking-the-future-onboarding-the-fixed-income-market-to-ethereumchallenges-and-opportunities", + "sourceId": "N3JJFU", + "title": "Unlocking the Future: Onboarding the Fixed Income Market to Ethereum—Challenges and Opportunities", + "description": "Discover how Ethereum can revolutionize the world’s largest market: fixed income. This talk will explore strategies for onboarding fixed income markets onchain by collaborating with regulators, adopting progressive compliance, and streamlining UI/UX. We'll also discuss how to tackle challenges such as chain navigation, liquidity fragmentation, and fiat-to-crypto onboarding to drive the next wave of mass adoption.", + "track": "Real World Ethereum", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Research", + "expertise": "Beginner", + "audience": "Product", "featured": false, "doNotRecord": false, - "tags": [ - "ens", - "Accessibility", - "Public good", - "Use cases of cryptography" - ], "keywords": [ - "ENS" + "DeFi" + ], + "tags": [ + "Regulation", + "UI/UX", + "Account Abstraction", + "Economics", + "defi", + "Account Abstraction", + "Economics", + "Regulation", + "UI/UX" ], - "duration": 522, "language": "en", - "sources_swarmHash": "d137af18f4692a1194d1e3d606910f72833ec4282b51cac0a9b1a317238c2ef2", - "sources_youtubeId": "e_QBTQGMxPs", + "sources_swarmHash": "", + "sources_youtubeId": "", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "charles-st-louis" + ], "eventId": "devcon-7", - "slot_start": 1731467100000, - "slot_end": 1731467700000, + "slot_start": 1731580800000, + "slot_end": 1731581400000, "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1-xDtu6rJ4NegQFgMrkNcVtzLJVJkvrYD_L3OYcBdFQo", - "resources_slides": null, - "speakers": [ - "estmcmxcieth" - ] + "resources_presentation": "https://docs.google.com/presentation/d/15KHZ8vK6GD9sf4oCsV5ZRJ5sKkMhq4oPgvFv-uAVHsY", + "resources_slides": "https://drive.google.com/file/d/1kRioMh546g5ESssfRZeHUAuAkfbUw0RQ/view" }, "vector": [ 0, 0, 0, 0, + 0, + 0, 6, 0, 0, @@ -818616,6 +816337,8 @@ 0, 0, 0, + 0, + 0, 2, 0, 0, @@ -818632,12 +816355,15 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -818648,7 +816374,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -818686,6 +816411,9 @@ 0, 0, 0, + 2, + 0, + 0, 0, 0, 0, @@ -818697,7 +816425,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -818768,6 +816495,22 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -819121,33 +816864,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -819156,10 +816872,8 @@ 0, 0, 0, - 2, - 0, - 0, 0, + 2, 0, 0, 0, @@ -819173,47 +816887,48 @@ }, { "session": { - "id": "unlock-web2-data-with-tlsnotary-hands-on-workshop", - "sourceId": "VPMQGM", - "title": "Unlock Web2 Data with TLSNotary: Hands-On Workshop", - "description": "Join our hands-on workshop to master **TLSNotary**! Dive into multi-party-TLS and learn to prove and verify online data authenticity to a third-party verifier while ensuring privacy. We’ll start with small examples in Rust and build up to custom browser extensions in TypeScript to collect and verify private user data.\r\n\r\nBring your laptop, bring a friend, and learn together. Get ready to unlock and compose Web2 data in innovative ways.", - "track": "Applied Cryptography", - "type": "Workshop", + "id": "unpacking-eof-applications-in-developer-infrastructure-and-tooling", + "sourceId": "87XNSS", + "title": "Unpacking EOF: Applications in Developer Infrastructure and Tooling", + "description": "In this talk, we will delve into the Ethereum Object Format (EOF), a pivotal component of the upcoming Pectra hard-fork, focusing on its profound implications for development infrastructure and tooling. EIP-7692 introduces a new execution environment and a structured format for executable code, bringing extensive changes to the Ethereum Virtual Machine (EVM).\r\n\r\nHow will it affect developers? What will make their lives harder and what easier?", + "track": "Core Protocol", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developer", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Live Coding", - "Privacy", - "MPC", - "oracle", - "Live Coding", - "MPC", - "Privacy" + "Core Protocol", + "Developer Infrastructure", + "DevEx", + "EVM", + "Core Protocol", + "Developer Infrastructure", + "DevEx" ], "keywords": [ - "oracle" + "EOF", + "EIP-7692", + "EVM" ], - "duration": 5123, + "duration": 494, "language": "en", - "sources_swarmHash": "08a2c85bb2455f9e3ab6c2e0b2181df657e1370607a8a686c1627266923ca9d3", - "sources_youtubeId": "FhKjScuaNxw", + "sources_swarmHash": "49566c098cc805e61220bffaf5ba1387699a7cdf7b842332c6df071ece86c14f", + "sources_youtubeId": "OsKyVPdpJgI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673cbc4b982f234a12f066e9", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673cbc4b982f234a12f066e9.vtt", - "transcript_text": " . Thank you. How about now? There we go. So transport layer security, TLS is an old protocol. It's ubiquitous. It's all over the Internet. Basically everything supports it. If you're not using it, you probably should be. And the important thing is that all websites already support it. And so, yeah, basically, it's a protocol between your computer and a website that you're connecting to. It's using cryptography. So you can query data from a website and know that you're talking to the website you expect and your communications are protected. But one thing that it can't do for you is you can't query data from a website and then present it to another party. There's reasons for this that I won't really go into, but you can just know safely that it's not possible to just take a TLS transcript and send it to someone else. They won't know whether or not the data is actually authentic. But TLS, there are signatures there. There's cryptography there. And maybe we can plug into it and somehow leverage it for what we want to do, which is to allow people to privately prove data to a third party. I'm not sure if you've come across this jargon before. There's multiple different terms, multiple different approaches for generally the same thing. MPC TLS, CKTLS, web proofs, TLS middle boxes, Windows proxy. They're all essentially trying to do the same thing. We've implemented MPC TLS, and this leverages multi-party computation and zero-knowledge proofs in order to allow a prover to privately disclose data to a verifier in an implicitly secure way. And then there's other approaches, like a proxy approach, which kind of switches up the topology here, which puts the verifier in between the prover and the website. But, oh, right. One thing to note here is that both these approaches, both the MPC TLS approach and the ZK TLS proxy mode, whatever you want to call it, these are both designated verifier protocols, which means that it's really between two parties, and the only party who really receives cryptographic assurances is the verifier that's actually running the protocol itself. So sorry for everyone that wants to do things on-chain. This does not give you trustless oracles, but it does give you a trustless off-chain designated verifier protocol. And yeah, all approaches kind of all go towards the same affordances, which is that it allows you to basically compose with any application. If there is data on the Internet, which is queryable using TLS, you can trustlessly compose with it and without asking for permission. So all servers are already running TLS. So you can basically take our protocol Wired into your application And now users can just Prove arbitrary data to you In a privacy preserving manner And just to Elaborate on the Privacy part there, user can Query data from some arbitrary data Source and present it to your application While redacting their secrets Such as their cookies and their HTTP request. They can redact data in the response. And we're probably not going to get into it today, but you can also do zero-knowledge proofs over the top of that data, such as hiding your address but proving you're not in the United States, and then the classic example of proving that you're above some age without revealing your exact birth date. Yeah, so those are the general affordances that these protocols provide. TLS Notary is free, open source. The core protocol is written in Rust. We have TypeScript bindings. We're going to demo a browser extension slash plugin system, which is just one way of distributing the TLS notary protocol. And yeah, it's Apache, MIT, dual licensed at your option. Do whatever you want with it. Yeah, so that's an introduction to generally what TLS notary is and what it does. And yeah, let's just hop straight into building something. So good afternoon. I'm Hendrik, also from the TLS Notary team. And this is our schedule for the workshop today. We are plenty of members of the TLS Notary team. So if you have questions, there's Tanner, Ryan. Who else do I see? Thomas in the back, Chris. And then Tsukino and Sinu. So just if you run into any questions, just ask. So we just had this great introduction to TLS Notary. We'll start with coding part one. So that will be mainly on your own machine, everything offline so that we don't run into any network problems. Then we'll do some experiments with TLS Notary where you work together with your neighbor. So just distribute the roles. And then we'll also switch to the browser extension. Where we'll do a demo first, and then Tsukino will give an overview of how all browser setup works. Then we'll switch to coding part two, where we'll build our own plugins. And then we'll do a short, an extra slide with some, where we go into the future of TLS Notary and discuss what comes up next. And then there's some play time where we invite you to build your own plugins or build on top of TLS Notary and we're here to help you. And then we're also very open to hear all your questions. So some tips and tricks for the workshop. So don't rush through all the items. We have time. There is a HackMD document. It allows for adding comments. So if you run into things that don't make sense or you see typos or whatever, just comment so that we can improve it for next time. Also, check the Wi-Fi. So there is a local Wi-Fi network here that allows us to connect to each other. So make sure you're on the, yeah, I can't read the name, but I think it's a classroom network. The network, the password is also over there. Also for the web devs that will do a little bit of rust, but it's only reading Rust code, so don't be intimidated. You should be fine. And also, as I said, the TLS Notary team is here, so if you have questions, just ask them. So, if you go to this URL, you will find the classroom notes. And yeah, if you have any issues, just call us and then we will help. So maybe a quick overview of what we will do there. So we will start with the most basic setup of TLS Notary, where we just have a server, a prover, and a verifier, where everything is on the same networks on your local computer. So we'll start with starting all the necessary services. And then the prover will do the request to the web server with the cooperation of the verifier. This will return in an attestation. We will then create a presentation where we redact some information, and then the verifier will verify the info. And then the second part, we will work with a neutral notary, so where the roles of the verifying parts are split up. So this is all in Rust. And then in the third part of the example, we will run the prover in the browser. And then in the nodes, you will see that then we will also distribute the roles. So you could run the prover and your neighbor could be the test server and another neighbor could be the verifier. Should be fun. Any questions so far? So the question was which repository for the first item, so that's the TLS and repository Sorry, so if you didn't bring your laptop, this will be a very boring workshop, I'm afraid. So maybe then just chat with some of us and ask us your questions. Vielen Dank. Thank you. Thank you. Amen. Vielen Dank. Thank you. If you have an error message running the last command, don't stress, that's for further in the workshop. So the prerequisites is mainly to have all the dependencies on your laptop so that you can go through the following steps. Thank you. Thank you. All right. Thank you. If you run into a Rust C error, check your Rust compiler version and then just do Rust update to get the latest Rust compiler. Thank you. Thank you. Vielen Dank. Thank you. Thank you. Thank you. Thank you. Vielen Dank. Thank you. So they couldn't install the PDK. Thank you. Thank you. Thank you. Vielen Dank. Thank you. Thank you. Thank you. Thank you. ¶¶ ¶¶ Thank you. ¶¶ Thank you. ¶¶ Thank you. Thank you. Vielen Dank. Thank you.", + "sources_streamethId": "673cc669982f234a120c4b7b", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731577200000, - "slot_end": 1731582600000, - "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/18dMKK1NHUfq3W_cP2sm0ttim6fH4ZLV0KlzLOZdAiZ0", - "resources_slides": null, + "slot_start": 1731562200000, + "slot_end": 1731562800000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1yIsFqKcISo1wBOpMh8bQqTwKa7ihE8HDSAKmoWXYRs8", + "resources_slides": "https://drive.google.com/file/d/19qxK_vp8gI9EHTLEgsFBQzrk91_fK1gv/view", "speakers": [ - "hendrik-eeckhaut", - "sinu", - "tsukino" + "nebojsa-urosevic", + "pavle-drobnjak" ] }, "vector": [ @@ -819221,17 +816936,13 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -819889,7 +817600,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -819908,8 +817618,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -819917,6 +817625,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -819989,6 +817699,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -819999,6 +817710,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -820022,6 +817734,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -820057,7 +817770,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -820083,8 +817795,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -820177,6 +817887,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -820402,7 +818113,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -820535,9 +818245,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -820553,47 +818263,50 @@ }, { "session": { - "id": "unlocking-new-possibilities-with-stateless-architecture-in-layer-2", - "sourceId": "NGZBJL", - "title": "Unlocking New Possibilities with Stateless Architecture in Layer 2", - "description": "Explore the potential of stateless architecture in Layer 2 solutions. As Layer 2 technologies evolve, we will discuss the fundamental trade-offs and present how combining client-side Zero-Knowledge Proofs (ZKPs) with stateless architecture enhances efficiency. This session will highlight innovative possibilities not yet widely discussed in the Ethereum community, showing how this approach can revolutionize scalability, security, and privacy.", - "track": "Layer 2", - "type": "Talk", + "id": "updating-gas-cost-schedule-based-on-reproducible-benchmarks", + "sourceId": "TZVK7F", + "title": "Updating Gas Cost Schedule based on reproducible benchmarks", + "description": "Sponsored by the Ethereum Foundation, our project evaluates the real cost of executing OPCODEs and procompiles in EVMs across diverse clients. We present the up-to-date benchmarks, the new Gas Cost Schedule proposal, a do-it-yourself solution to reproduce measurements in your environment, and an automated way to generate new proposals for each hard fork.", + "track": "Core Protocol", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Developper", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "statelessness" + "Gas", + "Decentralization", + "infrastructure", + "node", + "Decentralization", + "Gas" ], "keywords": [ - "Privacy", - "Scalability", - "Statelessness" + "Gas Cost Schedule", + "EVM Internals", + "Client Diversity", + "Node Infrastructure" ], - "duration": 1383, + "duration": 479, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "7f453befdfb576ea007ae870d4419b6bda34be91a758cd779132803505e89e90", + "sources_youtubeId": "lHutPWuF3EY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67348d529dbb7a90e16bc9cb", + "sources_streamethId": "6736f9981b0f83434dd9a416", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731495600000, - "slot_end": 1731497400000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1CkoCHWyFJ_4IDI_puC1cfrAXBQJADtCY7bYExgXn3xQ", - "resources_slides": null, + "slot_start": 1731572400000, + "slot_end": 1731573000000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1Dzcuj-EPyhFVz3jUb7kd535irDd3n7X0WxNqRVI5Rgs", + "resources_slides": "https://drive.google.com/file/d/1ZLhcSjEgO0A73_rxwq14DA8CcPkMuUMW/view", "speakers": [ - "leona-hioki" + "jacek-glen" ] }, "vector": [ - 0, - 0, - 0, 0, 0, 0, @@ -821284,17 +818997,12 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -821419,6 +819127,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -821448,6 +819157,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -821533,6 +819243,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -821571,6 +819282,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -821863,7 +819575,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -821914,10 +819625,10 @@ 0, 0, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -821927,39 +819638,47 @@ }, { "session": { - "id": "unlocking-the-future-onboarding-the-fixed-income-market-to-ethereumchallenges-and-opportunities", - "sourceId": "N3JJFU", - "title": "Unlocking the Future: Onboarding the Fixed Income Market to Ethereum—Challenges and Opportunities", - "description": "Discover how Ethereum can revolutionize the world’s largest market: fixed income. This talk will explore strategies for onboarding fixed income markets onchain by collaborating with regulators, adopting progressive compliance, and streamlining UI/UX. We'll also discuss how to tackle challenges such as chain navigation, liquidity fragmentation, and fiat-to-crypto onboarding to drive the next wave of mass adoption.", - "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "usability-changes-in-a-post-eoa-world", + "sourceId": "P9FRCH", + "title": "Usability changes in a post-EOA world", + "description": "The wallet world has evolved to embrace contract accounts (4337 and 7702), app-owned wallets, session keys (CAIP-25), and permissions controls (7715). How might we on the app layer design and build upon these new account types? In this talk, we will demonstrate the possibilities for novel user flows given these new account standards, compare how these new standards can introduce pitfalls, and provide best practices on how to design for app layer in a post-7702 world.", + "track": "Usability", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Design", "featured": false, "doNotRecord": false, - "keywords": [ - "DeFi" - ], "tags": [ - "Regulation", - "UI/UX", - "Account Abstraction", - "Economics", - "defi", + "ux", + "wallet", "Account Abstraction", - "Economics", - "Regulation", + "Design", + "Key Management", "UI/UX" ], - "language": "en", - "speakers": [ - "charles-st-louis" + "keywords": [ + "Wallet", + "UX" ], + "duration": 1443, + "language": "en", + "sources_swarmHash": "c9499f3505fdc7dbc21b9bcae9814112702fbbd925537261068b032270172cdb", + "sources_youtubeId": "MvFforeD1SU", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735e3f49dbb7a90e171c913", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735e3f49dbb7a90e171c913.vtt", + "transcript_text": " So today we're giving a talk about what evolving standards, different account models, and how that changes usability. This talk is basically going to be about endgame wallets and the connection between wallets and dApps. Okay, sorry. All right, so a little bit of background of what the UX looks like today is that dApp UX today is constrained by the concept of the account model. So what that usually means is that when you visit a dApp, the purpose in life of that dApp is constrained by the thing that you're trying to do on chain, and also the private key signatures and how you interact with that dApp. All right. There are a lot of account abstraction talks, I realize, at this conference. So I don't want to spend too much time. But these are some properties that you get when you're signing with a single signature. So your entire account gets connected, key management is all on you, no native batching, and you have to have native gas tokens. These are the common pitfalls that I think most of us are familiar with. So no recovery or upgrades, signing a message for virtually every action that you have to do, and also having blocked experiences on dApps where you need to connect your wallet to be able to see a quote or you need to be able to connect before you do anything. All right. So what's at stake here is something that I'd like to call the appification of dApps. So instead of the dApp experience being one thing and then using a legacy app and being another thing, it would be great if they're more blended. And the properties of that would be enhanced security, smooth onboarding, and what I want to say, convergent applications where you're blending on-chain, off-chain, and cross-chain components. Oh, I'm blocking the QR code. I'm so sorry. Maybe I'll come over here. All right. So what I want to do now is give a tiny glimpse into what greater programmability and security on smart contract wallets actually looks like on a UI. I don't want it to be too detailed or implementation specific. So these are just general examples of what can be done in the future. All right. So with smart contract wallets, they can run code. And with that, you can get conditional logic. One use case that I think would be pretty interesting is like, let's say that you are a DAB and you manage portfolios and you want to add a security service that says if there is an on-chain condition that is met and we are able to prove that there is an exploit or there's a vulnerability in this DAB, please remove my exposure and unwind my positions. So in order to do this, you have to presign it and then you would be waiting for the condition to be met and then you won't have to unwind it manually as you do today. Other things you get with smart contract wallets, you can do cross-chain calling and balance aggregation because you can query the state of other contracts as a smart contract wallet. So this means that let's say I want to buy an NFT on Zora, and I only have funds on Optimism and Mainnet, this doesn't have to be a problem because we can aggregate the balance and bridge it for the user on their behalf. So this means that the click path is getting much lower, so you're not clicking as much and approving everything, looking for which bridge has the liquidity that you want, and numerous other things that we have to do today. Okay. This use case, this example is the most important one to get right because user ops are not highly adopted right now. It's still pretty early. So the thing that we need to nail is the onboarding and the issuing of the smart wallet. I mean the smart account. So what I say here is modular authentication allows users to define the dApps level of access. So this is not unlike when you sign in with Google, and Google is telling you, oh, when you're going to Calendly, I'd like to see your contacts and your calendar and what you have set up there. We could do this on chain as well with smart accounts. So in this example, I want to show that it's looking for a specific domain that you have set, and then it wants to request access to a spend limit or maybe usage of modules that this dApp wants to use. Okay, so those are some very high-level examples of what can be done in the future, or even now. So the upshot of all of this is that you can get better blending of off-chain, on-chain, and cross-chain components. This is a nod to the convergent application thing that I was talking about, so that dApps don't feel as single-purpose. The thing is, though, like many complex systems, when you introduce new solutions, you can also risk introducing new frictions. And I'll gloss over this very quickly. Now that we want fewer approvals, currently right now with the standards that we have is that you do migrate to a new account and funds get fragmented. Even though there's fewer approvals, that is a user friction that we might have to deal with. Obviously the evolving standards and there's different ways in which that we can try to deal with this. So there's also enhanced programmability. This means like the contract can run code. It's like currently the EOA, they don't have, like it can't actually read state or anything like that. So EOA is still initial signer for it. So that's another user friction that is possible. Gas abstraction, paying gas and other tokens other than the native token of the network. This is not gas optimized yet because we're still prior to 7702 being on Pectra, I think. And then we have account recovery. These can introduce unfamiliar user patterns. So the one that I, again, like the one that I want to focus on is the onboarding aspect, which would be the modular authentication, because there's a plethora of solutions that you can choose from for implementing the onboarding of smart accounts. And so I want to hand it over to Greg so that he can show you some best practices on how to implement a helpful experience. Thank you, Sydney. Sorry. Okay, perfect. Sorry, I forgot where we were at. How many people here are familiar with, like, the CAPEs, Chain-Agnostic Improvement Proposals? This is good. We need to change that. How many of you are familiar with EIPs? Keep your hand up. How many of you actively check the EIPs or the ERCs? How many of you are wallet developers? How many of you are applications as opposed to smart contract accounts? Yeah, we have to change all this. Basically, in order to get any of this done, we need to start focusing on standards more. And so I'm going to verbally assault you all with some very important ERCs and show you exactly how we get to, frankly, what's probably the end game wallet to dap interaction where those pieces fit together and how we do it. It's going to take a couple of ERCs, capes to get there. Frankly though they're all available. They're live and there's other versions being iterated on. So we're not too far away. It's just going to take some work because we need more people actively working and looking at standards. These are the main ones we're going to be talking about. 275, 4337, which we should all be familiar with, 7702, 7755, 17715, 7679, and RIP7575, which I'm not going to cover. Let's start, though, with some, like, visual stuff. And then I'll kind of stitch it together at the end. When you first go, this is a website that's live, basement.fun. When you first go to the website, you get to experience the entire website without ever needing to get a wallet prompt prompt without needing to actually install anything without even needing your funds the way they do that is by embedding a embed they put an embedded wallet behind the scenes for you and there's a reason they go with the embedded wallet behind the scenes it's because you don't have to actually they don't want to force you into a smart contract account right at the beginning because that would just cause further fragmentation if you come to the app later with your own account that has a smart contract wallet enabled. Now we're already just fragmenting. We're splitting up your accounts where they don't know about each other. So now you're having to maintain effectively two separate bank accounts. I don't know if anyone's done banking with two bank accounts, but it's, like, unenjoyable. So I don't know why we'd force our users to do it. Similarly, which is 7555 and K275, we have this issue. Do we have any mobile app developers here? Maybe mobile game developers? Have you ever tried to actually connect to somebody else's smart account or even someone else's external wallet? It's borderline impossible. And that's because you can't discover. The ability to discover a wallet and then know where it's deployed is practically impossible. That's what Cape275 tries to do. It's the idea that you basically look up gregthegreek.eth and instantaneously I'll figure out where the address is, what type of account is it, EOA, smart account, safe account, is it someone else's es. And we can then use a wire protocol, which is the Oeth one, to basically go and connect to them. Because we do know everyone's familiar with pass keys. We know that that's probably the best way to remove seed phrases. But how many of you know that your pass key is scoped to the website you issued it from? Or the app you issued it from? Or the app you issued it from? That means if I generate a public key from the passkey, it's only going to be found there. If I go to chainsafe.io and I issue passkeys that way, if I go to then sprinter.tech and try to issue a passkey there, I'll never get the same pub address. And this is a fundamentally big problem because we're all issuing pass keys with our smart accounts. So we need to create a way to bridge that gap. And that's what 7-triple-5 aims to do. And I'll get into it a little bit. We're going to stitch this all together. Don't worry. It's going to be less confusing, I promise. 7-7-1-5. This is pretty ‑‑ this is also an interesting one because what we do is we start to say, hey, if I'm going to remote into somebody else's smart account and I'm going to ask to use that smart account with a passkey that I don't have, I don't want to constantly go back and forth. The UX breaks. You're constantly flipping out of a mobile game that's supposed to keep you in the game, and now you're going out to sign every single time you need to do something. So what 7715 tries to do is basically says, hey, we're going to let a random key, aka this is where we tie the embedded wallet back into the picture, the embedded wallet can now act as a signer to the wallet for a given period of time. And if you're thinking about the Web2 model, this is a JWT token. All we've done is brought back the JWT and said put some cryptographic properties to it. And that's how we can maintain it. So you can maintain your UX in the app and use an embedded wallet. Now you don't need to deploy them in a smart contract account. You can use what they already have in preexisting infrastructure. Finally 7579, this is an interesting one. You do want a smart account feature. There's stuff that the smart account doesn't actually provide you. It's simply a plug-in. Build it as a plug-in. You tell the wallet to register the plug-in. You get the full benefit of deploying a smart contract account for someone without fragmenting their balance. You remain the existing experience. And the big thing to discuss here is, which I'll get into and then is it the next slide? No. Oh, yeah, it is the next slide. Perfect. And so this is these are all the EIPs. ERCs. That stitch it together. And what we'll do is we'll go through an interactive game of actually stitching together to show you kind of like how the end game in one to two years' time will actually look like. Because we have a lot of teams that are doing really good infrastructure work at the application layer, but frankly, because we don't have all of these things plumbed in, we are frankly setting up the applications for a little bit of failure and there's going to be a period in time where we're going to see mass migration of self-deploying app infrastructure outwards. And my main argument I have for this is there exists two spectrums of UX that we need to maintain as an app developer. There's your local app UX, which we have full control over, and that's why we're suggesting you deploy with embedded wallets, because you can fully control it. You can make your perp decks that's fully on-chain look and feel exactly like Robinhood. You can make your perp decks that's fully on chain look and feel exactly like Robin Hood. You get all the benefits. So there's no reason to go anywhere else. The other thing is we have wallet land UX. And most app developers think we don't have control over this. And that's where we're hoping to tell you something else. Because you do. You have to control that you don't change it. The minute you go and change someone else's experience of how they bank effectively with your app, it's breaking it. And when you deploy a smart account into the application layer, we've gone back to the old principle of, you know, think about like Starbucks. You have to put money into the Starbucks app. Your bank now no longer knows how much money. If you moved $100 in there, you don't know that you have your bank account value plus $100 that you have unspent in Starbucks. You just have minus 100. We have the ability to not do that. We have the ability to not fragment and deposit funds but rather borrow funds directly from the bank with a scoped permission. And that's what we're going to achieve here. So what do we need to do to get this done? The whole pipeline is going to look like this. We want people and wallet info teams to deploy 4337. We want them to be the smart account issuer. If they're not the smart account issuer, if the app ends up being the smart account issuer, you need to be able to let the user rage quit. You need to be able to set permissions. Like, are you comfortable as an app developer saying, oh, yeah, I want to make sure when I get into a chain-abstracted multi-chain future that the user always has one eth and mainnet because they always want to make sure they have gas to pull out some Aave positions or compound positions that I don't know about. They might want that preference. But are you really ready to actually deploy all that info or do you just want to focus on your app? Let the wallet handle that part. We then always pass keys are the way to go. It's already like this today. And it's only going to get better. We have solutions to do like multi-account pass keys where the address persists. The app is where you're going to deploy the embedded wallet. Within that app, we see CAPE 275 come in. And I want to think about clicks, so keep your hand up thinking about the clicks we're going to do here. When the user first auths onto your website, they're going to type in their identifier, email, phone number, ENS name. They'll click enter once. I'll then go and look up in the registry, you figure out who issued it, who their provider is, the SmartCon account provider, and you go to that website directly. This is now going to look like an iframe. This is what 7755 does. Think about that iframe you're used to, you know, when Google says, hey, someone says can you log in with Google so I can get access to your calendar information. It's the exact same flow. That's going to pop up the destination where the smart account is. It might be the NOSA safe website. Once you get in there, along with that message, we're going to pass in 7715. That's now going to say, hey, I'm a perp desk. I want to trade 1,000 USDC every hour for the next 24 hours. That means your total bound is 24K worth of USDC that could be taken by this application but it's defined, strictly defined the same way Google strictly defines your log in process. 7579 I talked before. You want that extra smart contract functionality on the same wire protocol, the same 7555 wire protocol. That should be 7555. You'll be able to attach whatever else you need. You need that extra functionality as a plug-in. It's simple. You just add it in. The screen shows the exact same view. 7811, for anybody that follows standards, you probably haven't seen this yet. It came out a few days ago. But the simple thing is, you know, when you're an app, do you how many of you develop your app and then go, okay, cool, step one, hit the RPC for every available token balance that this user has? There's no need. It's already been done for you. The wallet knows what the user has available. And in a cross-chain, multi-chain world, the wallet also knows how much is available against other chains as well as what the spending power is. I have USDC. I need to use ETH to go buy an NFT. It knows that I can convert USDC. The app doesn't need to know about that. So this makes an extension for it. There's a plethora of other ones we can add on to here. But if you think about it, at the end of the day, what happens with this experience? Remember how I said to think about the number of clicks we did for the user here? There's two. We've reduced it to two. You go from looking up your account, automatically generating the pop-up iframe. If anybody's used Coinbase's wallet recently, there's that iframe that pops up. That's your OAuth window. The user clicks. One click, OAuth window is up. One more click to confirm. And now you have the full power of that smart account locally while only using an embedded wallet, and you can ensure the security remains constant because the permissions are there in place. And you now have the full flexibility. You never had to deploy account of any sort of wallet infra. You don't have to care about the wallet infra. You don't have to even care about what app, what type of implementation do they have because you can attach the plug-in you need. So you can ensure as long as they're 4.3.3.7 compatible, you can boot in anything else you want. You don't have to go and deploy them a custom implementation. Your onboarding experience, just 10x's right out of the gate. This is what the end game realistically will look like. How close are we? We started with 7.5.5. Realized we needed 7.1.5. 715. This one is almost done. There's a few last little things needed. Modules by rhinestones been done forever and frankly if you haven't looked at them, you should. This will be pretty quick. 275 is live. Lit's using it. And we already know we've got pass keys and 437. We just need the permissions module to finish. And we need to get the main wallet providers to start adopting the slugs so that we can have those pop-ups happen. And you have a full OAuth journey that when you go to onboard your grandmother, it just looks, or your mother, it looks exactly like how they do by logging with Google. And you can issue all the infra underneath blindly. And so that's, this is the endgame for wallets. This is how we envision it. And frankly, we need more people looking at standards and helping just see what's out there and realize what they can do with what's available. Because all this already exists. We can have this. We can have a two-click journey, and you get a smart account without ever having to deal with that info yourself. Oh, there's more slides. Ah. Yeah. Time for a... I don't know. Yeah. Time for ground zero good Dapp experience. Thank you, guys. Let's keep in touch. Okay. We have time for questions. So if you have questions, you still can ask them. We have two questions already. So first question is what is that we're missing or need to change on layer one in order to enable better experiences for everyone and not just a specific layer two? Native account abstraction that doesn't look like 7702. Something more closer to 3074 that actually achieved its goal. There's a lot of chains that actually have that at the base layer. And 7702 is good. It's a migration tool. It's a stopgap. We're going to have to release more account abstraction features down the road, natively. But unfortunately, that's the biggest one. Okay. So our next question is about passkeys. You said that the problem with them is being scoped to specific sites, but later you said they're actually good, that we should use them. So what is the solution here? Two separate problems. So the one problem is that when I issue a passkey, you get a public,. Like when you do a passkey authentication, you know, face ID, touch ID, whatever it may be, that issues you a public address. And it works the same way that a YubiKey works. What happens is they basically say, cool, what's like the DNS record? And they match that DNS record to a new public key generated on the passkey chain. So the problem is if I go to another website, it's going to either get a new one or create a new passkey. And so you can never have that same address persisted through. And so that's why you have to push outwards. If you want access to the signing key of the smart account, you actually need to go to whoever generated the passkey in the first place. And that's why we need that OAuth window. The same way Google doesn't let you just like OAuth inside of an API call inside some random website. It would just be very insecure. Okay. Next question is if we connect our wallets to apps in this way that you described and there's all our savings on these wallets, is it going to be safe enough? How many of you trust Gnosis Safe? Or safe? Lovely. Do you trust safe to provide you the UI you need to ensure you're signing the right message? This isn't rhetorical. Okay, good. In that case, if you trust them to provide you the correct UI when you sign a message, why would they not provide you the correct UI when they say, hey, by the way, this thing's going to drain your account? Instead, it's going to say, hey, it just wants to spend like a thousand USDC for the next like two hours. And then the token expires and they have to refresh that token. Same thing as a JWT with your bank. If you trust the people providing you the wallet, then you should have no problem actually using it in the system. It's the same problem with, it's exactly why I said, you as an app developer, are you actually comfortable deploying all this infra? Even the person giving you the smart account, are they giving you all the infra you need to ensure your users are safe? Push the barrier to the wallet. There's a last question, I think. Next question. In the endgame, are token ballots still held on external wallets or smart contract address? 4337. So, smart account. Okay. And banks often have the backup of support. Will the wallets provide the same backup? I mean, pick a good provider. MetaMask support is pretty good on Twitter. I think this question is really interesting. It's more like an insurance type of question. Because I think banks are like they have FDIC. It seems like this question is asking about support of user funds, which I think is external to wallets themselves. Or recovery mechanisms. Yeah, exactly. Okay, if there are no more questions, that's it. Please give it a round of applause for Greg and Cindy. Thank you.", "eventId": "devcon-7", - "slot_start": 1731580800000, - "slot_end": 1731581400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/15KHZ8vK6GD9sf4oCsV5ZRJ5sKkMhq4oPgvFv-uAVHsY" + "slot_start": 1731573000000, + "slot_end": 1731574800000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1Qe6obqukS9lTToSF06QtJ1Ovqj8Dzv1P-Vi0z9-wI7w", + "resources_slides": "https://drive.google.com/file/d/1TaIFDPviXHm1cdsJcOiUxZ9OldqpXQCZ/view", + "speakers": [ + "cindy", + "gregthegreek" + ] }, "vector": [ 0, @@ -821968,12 +819687,9 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -822595,6 +820311,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -822656,11 +820373,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -822748,7 +820465,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -822820,26 +820536,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -822876,6 +820572,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -822904,7 +820601,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -823244,6 +820940,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -823264,6 +820961,16 @@ 0, 0, 0, + 2, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -823278,7 +820985,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -823293,66 +820999,68 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 0, 0 ] }, { "session": { - "id": "unpacking-eof-applications-in-developer-infrastructure-and-tooling", - "sourceId": "87XNSS", - "title": "Unpacking EOF: Applications in Developer Infrastructure and Tooling", - "description": "In this talk, we will delve into the Ethereum Object Format (EOF), a pivotal component of the upcoming Pectra hard-fork, focusing on its profound implications for development infrastructure and tooling. EIP-7692 introduces a new execution environment and a structured format for executable code, bringing extensive changes to the Ethereum Virtual Machine (EVM).\r\n\r\nHow will it affect developers? What will make their lives harder and what easier?", - "track": "Core Protocol", - "type": "Lightning Talk", - "expertise": "Intermediate", + "id": "usc-ultimate-solidity-championship", + "sourceId": "UE8WVS", + "title": "USC Ultimate Solidity Championship", + "description": "A 30-minute Solidity programming competition where the winner is determined objectively, permissionlessly, and transparently after the time expires. The Ultimate Solidity Championship (USC) is an event designed to showcase the skills of the best Solidity developers in the ecosystem. Its primary goals are to highlight Solidity programming as an art form, onboard more developers, educate the community, and foster collaboration, ultimately enhancing Ethereum's long-term impact.", + "track": "Entertainment", + "type": "Mixed Formats", + "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Core Protocol", - "Developer Infrastructure", - "DevEx", - "EVM", - "Core Protocol", - "Developer Infrastructure", - "DevEx" - ], "keywords": [ - "EOF", - "EIP-7692", - "EVM" + "Solidity", + "Programming", + "Competition" + ], + "tags": [ + "Art", + "Hacks", + "Public good" ], - "duration": 494, "language": "en", - "sources_swarmHash": "49566c098cc805e61220bffaf5ba1387699a7cdf7b842332c6df071ece86c14f", - "sources_youtubeId": "OsKyVPdpJgI", + "sources_swarmHash": "", + "sources_youtubeId": "", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673cc669982f234a120c4b7b", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731562200000, - "slot_end": 1731562800000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1yIsFqKcISo1wBOpMh8bQqTwKa7ihE8HDSAKmoWXYRs8", - "resources_slides": null, "speakers": [ - "nebojsa-urosevic", - "pavle-drobnjak" - ] + "five" + ], + "eventId": "devcon-7", + "slot_start": 1731582000000, + "slot_end": 1731583800000, + "slot_roomId": "classroom-b", + "resources_presentation": "https://docs.google.com/presentation/d/1flrl1DVDOcGQrL2WtGO0tRQUbwP7P_Xk3IQeWVr_wIU", + "resources_slides": "" }, "vector": [ 0, 0, 0, 0, - 6, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -824038,8 +821746,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -824113,7 +821819,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -824124,7 +821829,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -824148,7 +821852,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -824204,6 +821907,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -824301,7 +822006,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -824349,6 +822053,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -824659,8 +822364,6 @@ 0, 2, 0, - 0, - 0, 2, 0, 0, @@ -824679,48 +822382,47 @@ }, { "session": { - "id": "updating-gas-cost-schedule-based-on-reproducible-benchmarks", - "sourceId": "TZVK7F", - "title": "Updating Gas Cost Schedule based on reproducible benchmarks", - "description": "Sponsored by the Ethereum Foundation, our project evaluates the real cost of executing OPCODEs and procompiles in EVMs across diverse clients. We present the up-to-date benchmarks, the new Gas Cost Schedule proposal, a do-it-yourself solution to reproduce measurements in your environment, and an automated way to generate new proposals for each hard fork.", + "id": "using-reth-execution-extensions-for-next-generation-indexing", + "sourceId": "YUFRTQ", + "title": "Using Reth Execution Extensions for next generation indexing", + "description": "Recently, Reth and Geth released the ExEx and live tracer features, respectively, which share similar functionalities. Both provide real-time, detailed access to chain and state events. As ExEx developers begin to persist this data and explore ways to make it accessible to users, new questions arise: how can we best serve this data to users, and what might the indexers of the future look like?", "track": "Core Protocol", - "type": "Lightning Talk", + "type": "Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Gas", - "Decentralization", - "infrastructure", - "node", - "Decentralization", - "Gas" - ], "keywords": [ - "Gas Cost Schedule", - "EVM Internals", - "Client Diversity", - "Node Infrastructure" + "client", + "plugin", + "indexer" + ], + "tags": [ + "Layer 1", + "Developer Infrastructure", + "Tooling", + "plugin", + "Developer Infrastructure", + "Layer 1", + "Tooling" ], - "duration": 479, "language": "en", - "sources_swarmHash": "7f453befdfb576ea007ae870d4419b6bda34be91a758cd779132803505e89e90", - "sources_youtubeId": "lHutPWuF3EY", + "sources_swarmHash": "e434c586141c1e57eb8d7a9a5d407ed187d97b43e104b724b1d5ada20df26dff", + "sources_youtubeId": "GhEhzE9SFqY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736f9981b0f83434dd9a416", + "sources_streamethId": "", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", + "speakers": [ + "alexey-shekhirin" + ], "eventId": "devcon-7", - "slot_start": 1731572400000, - "slot_end": 1731573000000, + "slot_start": 1731484800000, + "slot_end": 1731486600000, "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1Dzcuj-EPyhFVz3jUb7kd535irDd3n7X0WxNqRVI5Rgs", - "resources_slides": null, - "speakers": [ - "jacek-glen" - ] + "resources_presentation": "https://docs.google.com/presentation/d/1grvRBeTUC4cPjxwSFQPy6d3VmlJ6P3Y2_R99fgeourE", + "resources_slides": "https://drive.google.com/file/d/1PliCCH-hA4rZOGxo9U4esC6V8VI6TOIQ/view" }, "vector": [ 0, @@ -825486,11 +823188,13 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -825523,6 +823227,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -825546,11 +823251,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -825576,7 +823276,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -825662,7 +823361,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -825701,8 +823399,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -826011,6 +823707,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -826041,12 +823738,12 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -826059,46 +823756,49 @@ }, { "session": { - "id": "usability-changes-in-a-post-eoa-world", - "sourceId": "P9FRCH", - "title": "Usability changes in a post-EOA world", - "description": "The wallet world has evolved to embrace contract accounts (4337 and 7702), app-owned wallets, session keys (CAIP-25), and permissions controls (7715). How might we on the app layer design and build upon these new account types? In this talk, we will demonstrate the possibilities for novel user flows given these new account standards, compare how these new standards can introduce pitfalls, and provide best practices on how to design for app layer in a post-7702 world.", - "track": "Usability", - "type": "Talk", + "id": "utilizing-national-ids-in-the-ethereum-ecosystem", + "sourceId": "PR78EL", + "title": "Utilizing national IDs in the Ethereum ecosystem", + "description": "This panel brings together developers of MynaWallet, Anon-Aadhaar, Proof of Passport and zkPassport, who are exploring and developing applications that utilize government-issued IDs in the Ethereum ecosystem. We will discuss the characteristics of each ID system and what functions can be realized using tech stacks in the Ethereum ecosystem and cryptographic technology.", + "track": "Real World Ethereum", + "type": "Panel", "expertise": "Intermediate", - "audience": "Design", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "ux", - "wallet", - "Account Abstraction", - "Design", - "Key Management", - "UI/UX" + "Civil Resistance", + "Privacy", + "Identity", + "Civil Resistance", + "Identity", + "Privacy" ], "keywords": [ - "Wallet", - "UX" + "National IDs", + "Selective Disclosure" ], - "duration": 1443, + "duration": 3351, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "5f450cd1ae6a875ce249bdf3b65ee1d5e4b524568296c784d24f0c3b908c3845", + "sources_youtubeId": "XsQ_DiECL0I", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735e3f49dbb7a90e171c913", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735e3f49dbb7a90e171c913.vtt", - "transcript_text": " So today we're giving a talk about what evolving standards, different account models, and how that changes usability. This talk is basically going to be about endgame wallets and the connection between wallets and dApps. Okay, sorry. All right, so a little bit of background of what the UX looks like today is that dApp UX today is constrained by the concept of the account model. So what that usually means is that when you visit a dApp, the purpose in life of that dApp is constrained by the thing that you're trying to do on chain, and also the private key signatures and how you interact with that dApp. All right. There are a lot of account abstraction talks, I realize, at this conference. So I don't want to spend too much time. But these are some properties that you get when you're signing with a single signature. So your entire account gets connected, key management is all on you, no native batching, and you have to have native gas tokens. These are the common pitfalls that I think most of us are familiar with. So no recovery or upgrades, signing a message for virtually every action that you have to do, and also having blocked experiences on dApps where you need to connect your wallet to be able to see a quote or you need to be able to connect before you do anything. All right. So what's at stake here is something that I'd like to call the appification of dApps. So instead of the dApp experience being one thing and then using a legacy app and being another thing, it would be great if they're more blended. And the properties of that would be enhanced security, smooth onboarding, and what I want to say, convergent applications where you're blending on-chain, off-chain, and cross-chain components. Oh, I'm blocking the QR code. I'm so sorry. Maybe I'll come over here. All right. So what I want to do now is give a tiny glimpse into what greater programmability and security on smart contract wallets actually looks like on a UI. I don't want it to be too detailed or implementation specific. So these are just general examples of what can be done in the future. All right. So with smart contract wallets, they can run code. And with that, you can get conditional logic. One use case that I think would be pretty interesting is like, let's say that you are a DAB and you manage portfolios and you want to add a security service that says if there is an on-chain condition that is met and we are able to prove that there is an exploit or there's a vulnerability in this DAB, please remove my exposure and unwind my positions. So in order to do this, you have to presign it and then you would be waiting for the condition to be met and then you won't have to unwind it manually as you do today. Other things you get with smart contract wallets, you can do cross-chain calling and balance aggregation because you can query the state of other contracts as a smart contract wallet. So this means that let's say I want to buy an NFT on Zora, and I only have funds on Optimism and Mainnet, this doesn't have to be a problem because we can aggregate the balance and bridge it for the user on their behalf. So this means that the click path is getting much lower, so you're not clicking as much and approving everything, looking for which bridge has the liquidity that you want, and numerous other things that we have to do today. Okay. This use case, this example is the most important one to get right because user ops are not highly adopted right now. It's still pretty early. So the thing that we need to nail is the onboarding and the issuing of the smart wallet. I mean the smart account. So what I say here is modular authentication allows users to define the dApps level of access. So this is not unlike when you sign in with Google, and Google is telling you, oh, when you're going to Calendly, I'd like to see your contacts and your calendar and what you have set up there. We could do this on chain as well with smart accounts. So in this example, I want to show that it's looking for a specific domain that you have set, and then it wants to request access to a spend limit or maybe usage of modules that this dApp wants to use. Okay, so those are some very high-level examples of what can be done in the future, or even now. So the upshot of all of this is that you can get better blending of off-chain, on-chain, and cross-chain components. This is a nod to the convergent application thing that I was talking about, so that dApps don't feel as single-purpose. The thing is, though, like many complex systems, when you introduce new solutions, you can also risk introducing new frictions. And I'll gloss over this very quickly. Now that we want fewer approvals, currently right now with the standards that we have is that you do migrate to a new account and funds get fragmented. Even though there's fewer approvals, that is a user friction that we might have to deal with. Obviously the evolving standards and there's different ways in which that we can try to deal with this. So there's also enhanced programmability. This means like the contract can run code. It's like currently the EOA, they don't have, like it can't actually read state or anything like that. So EOA is still initial signer for it. So that's another user friction that is possible. Gas abstraction, paying gas and other tokens other than the native token of the network. This is not gas optimized yet because we're still prior to 7702 being on Pectra, I think. And then we have account recovery. These can introduce unfamiliar user patterns. So the one that I, again, like the one that I want to focus on is the onboarding aspect, which would be the modular authentication, because there's a plethora of solutions that you can choose from for implementing the onboarding of smart accounts. And so I want to hand it over to Greg so that he can show you some best practices on how to implement a helpful experience. Thank you, Sydney. Sorry. Okay, perfect. Sorry, I forgot where we were at. How many people here are familiar with, like, the CAPEs, Chain-Agnostic Improvement Proposals? This is good. We need to change that. How many of you are familiar with EIPs? Keep your hand up. How many of you actively check the EIPs or the ERCs? How many of you are wallet developers? How many of you are applications as opposed to smart contract accounts? Yeah, we have to change all this. Basically, in order to get any of this done, we need to start focusing on standards more. And so I'm going to verbally assault you all with some very important ERCs and show you exactly how we get to, frankly, what's probably the end game wallet to dap interaction where those pieces fit together and how we do it. It's going to take a couple of ERCs, capes to get there. Frankly though they're all available. They're live and there's other versions being iterated on. So we're not too far away. It's just going to take some work because we need more people actively working and looking at standards. These are the main ones we're going to be talking about. 275, 4337, which we should all be familiar with, 7702, 7755, 17715, 7679, and RIP7575, which I'm not going to cover. Let's start, though, with some, like, visual stuff. And then I'll kind of stitch it together at the end. When you first go, this is a website that's live, basement.fun. When you first go to the website, you get to experience the entire website without ever needing to get a wallet prompt prompt without needing to actually install anything without even needing your funds the way they do that is by embedding a embed they put an embedded wallet behind the scenes for you and there's a reason they go with the embedded wallet behind the scenes it's because you don't have to actually they don't want to force you into a smart contract account right at the beginning because that would just cause further fragmentation if you come to the app later with your own account that has a smart contract wallet enabled. Now we're already just fragmenting. We're splitting up your accounts where they don't know about each other. So now you're having to maintain effectively two separate bank accounts. I don't know if anyone's done banking with two bank accounts, but it's, like, unenjoyable. So I don't know why we'd force our users to do it. Similarly, which is 7555 and K275, we have this issue. Do we have any mobile app developers here? Maybe mobile game developers? Have you ever tried to actually connect to somebody else's smart account or even someone else's external wallet? It's borderline impossible. And that's because you can't discover. The ability to discover a wallet and then know where it's deployed is practically impossible. That's what Cape275 tries to do. It's the idea that you basically look up gregthegreek.eth and instantaneously I'll figure out where the address is, what type of account is it, EOA, smart account, safe account, is it someone else's es. And we can then use a wire protocol, which is the Oeth one, to basically go and connect to them. Because we do know everyone's familiar with pass keys. We know that that's probably the best way to remove seed phrases. But how many of you know that your pass key is scoped to the website you issued it from? Or the app you issued it from? Or the app you issued it from? That means if I generate a public key from the passkey, it's only going to be found there. If I go to chainsafe.io and I issue passkeys that way, if I go to then sprinter.tech and try to issue a passkey there, I'll never get the same pub address. And this is a fundamentally big problem because we're all issuing pass keys with our smart accounts. So we need to create a way to bridge that gap. And that's what 7-triple-5 aims to do. And I'll get into it a little bit. We're going to stitch this all together. Don't worry. It's going to be less confusing, I promise. 7-7-1-5. This is pretty ‑‑ this is also an interesting one because what we do is we start to say, hey, if I'm going to remote into somebody else's smart account and I'm going to ask to use that smart account with a passkey that I don't have, I don't want to constantly go back and forth. The UX breaks. You're constantly flipping out of a mobile game that's supposed to keep you in the game, and now you're going out to sign every single time you need to do something. So what 7715 tries to do is basically says, hey, we're going to let a random key, aka this is where we tie the embedded wallet back into the picture, the embedded wallet can now act as a signer to the wallet for a given period of time. And if you're thinking about the Web2 model, this is a JWT token. All we've done is brought back the JWT and said put some cryptographic properties to it. And that's how we can maintain it. So you can maintain your UX in the app and use an embedded wallet. Now you don't need to deploy them in a smart contract account. You can use what they already have in preexisting infrastructure. Finally 7579, this is an interesting one. You do want a smart account feature. There's stuff that the smart account doesn't actually provide you. It's simply a plug-in. Build it as a plug-in. You tell the wallet to register the plug-in. You get the full benefit of deploying a smart contract account for someone without fragmenting their balance. You remain the existing experience. And the big thing to discuss here is, which I'll get into and then is it the next slide? No. Oh, yeah, it is the next slide. Perfect. And so this is these are all the EIPs. ERCs. That stitch it together. And what we'll do is we'll go through an interactive game of actually stitching together to show you kind of like how the end game in one to two years' time will actually look like. Because we have a lot of teams that are doing really good infrastructure work at the application layer, but frankly, because we don't have all of these things plumbed in, we are frankly setting up the applications for a little bit of failure and there's going to be a period in time where we're going to see mass migration of self-deploying app infrastructure outwards. And my main argument I have for this is there exists two spectrums of UX that we need to maintain as an app developer. There's your local app UX, which we have full control over, and that's why we're suggesting you deploy with embedded wallets, because you can fully control it. You can make your perp decks that's fully on-chain look and feel exactly like Robinhood. You can make your perp decks that's fully on chain look and feel exactly like Robin Hood. You get all the benefits. So there's no reason to go anywhere else. The other thing is we have wallet land UX. And most app developers think we don't have control over this. And that's where we're hoping to tell you something else. Because you do. You have to control that you don't change it. The minute you go and change someone else's experience of how they bank effectively with your app, it's breaking it. And when you deploy a smart account into the application layer, we've gone back to the old principle of, you know, think about like Starbucks. You have to put money into the Starbucks app. Your bank now no longer knows how much money. If you moved $100 in there, you don't know that you have your bank account value plus $100 that you have unspent in Starbucks. You just have minus 100. We have the ability to not do that. We have the ability to not fragment and deposit funds but rather borrow funds directly from the bank with a scoped permission. And that's what we're going to achieve here. So what do we need to do to get this done? The whole pipeline is going to look like this. We want people and wallet info teams to deploy 4337. We want them to be the smart account issuer. If they're not the smart account issuer, if the app ends up being the smart account issuer, you need to be able to let the user rage quit. You need to be able to set permissions. Like, are you comfortable as an app developer saying, oh, yeah, I want to make sure when I get into a chain-abstracted multi-chain future that the user always has one eth and mainnet because they always want to make sure they have gas to pull out some Aave positions or compound positions that I don't know about. They might want that preference. But are you really ready to actually deploy all that info or do you just want to focus on your app? Let the wallet handle that part. We then always pass keys are the way to go. It's already like this today. And it's only going to get better. We have solutions to do like multi-account pass keys where the address persists. The app is where you're going to deploy the embedded wallet. Within that app, we see CAPE 275 come in. And I want to think about clicks, so keep your hand up thinking about the clicks we're going to do here. When the user first auths onto your website, they're going to type in their identifier, email, phone number, ENS name. They'll click enter once. I'll then go and look up in the registry, you figure out who issued it, who their provider is, the SmartCon account provider, and you go to that website directly. This is now going to look like an iframe. This is what 7755 does. Think about that iframe you're used to, you know, when Google says, hey, someone says can you log in with Google so I can get access to your calendar information. It's the exact same flow. That's going to pop up the destination where the smart account is. It might be the NOSA safe website. Once you get in there, along with that message, we're going to pass in 7715. That's now going to say, hey, I'm a perp desk. I want to trade 1,000 USDC every hour for the next 24 hours. That means your total bound is 24K worth of USDC that could be taken by this application but it's defined, strictly defined the same way Google strictly defines your log in process. 7579 I talked before. You want that extra smart contract functionality on the same wire protocol, the same 7555 wire protocol. That should be 7555. You'll be able to attach whatever else you need. You need that extra functionality as a plug-in. It's simple. You just add it in. The screen shows the exact same view. 7811, for anybody that follows standards, you probably haven't seen this yet. It came out a few days ago. But the simple thing is, you know, when you're an app, do you how many of you develop your app and then go, okay, cool, step one, hit the RPC for every available token balance that this user has? There's no need. It's already been done for you. The wallet knows what the user has available. And in a cross-chain, multi-chain world, the wallet also knows how much is available against other chains as well as what the spending power is. I have USDC. I need to use ETH to go buy an NFT. It knows that I can convert USDC. The app doesn't need to know about that. So this makes an extension for it. There's a plethora of other ones we can add on to here. But if you think about it, at the end of the day, what happens with this experience? Remember how I said to think about the number of clicks we did for the user here? There's two. We've reduced it to two. You go from looking up your account, automatically generating the pop-up iframe. If anybody's used Coinbase's wallet recently, there's that iframe that pops up. That's your OAuth window. The user clicks. One click, OAuth window is up. One more click to confirm. And now you have the full power of that smart account locally while only using an embedded wallet, and you can ensure the security remains constant because the permissions are there in place. And you now have the full flexibility. You never had to deploy account of any sort of wallet infra. You don't have to care about the wallet infra. You don't have to even care about what app, what type of implementation do they have because you can attach the plug-in you need. So you can ensure as long as they're 4.3.3.7 compatible, you can boot in anything else you want. You don't have to go and deploy them a custom implementation. Your onboarding experience, just 10x's right out of the gate. This is what the end game realistically will look like. How close are we? We started with 7.5.5. Realized we needed 7.1.5. 715. This one is almost done. There's a few last little things needed. Modules by rhinestones been done forever and frankly if you haven't looked at them, you should. This will be pretty quick. 275 is live. Lit's using it. And we already know we've got pass keys and 437. We just need the permissions module to finish. And we need to get the main wallet providers to start adopting the slugs so that we can have those pop-ups happen. And you have a full OAuth journey that when you go to onboard your grandmother, it just looks, or your mother, it looks exactly like how they do by logging with Google. And you can issue all the infra underneath blindly. And so that's, this is the endgame for wallets. This is how we envision it. And frankly, we need more people looking at standards and helping just see what's out there and realize what they can do with what's available. Because all this already exists. We can have this. We can have a two-click journey, and you get a smart account without ever having to deal with that info yourself. Oh, there's more slides. Ah. Yeah. Time for a... I don't know. Yeah. Time for ground zero good Dapp experience. Thank you, guys. Let's keep in touch. Okay. We have time for questions. So if you have questions, you still can ask them. We have two questions already. So first question is what is that we're missing or need to change on layer one in order to enable better experiences for everyone and not just a specific layer two? Native account abstraction that doesn't look like 7702. Something more closer to 3074 that actually achieved its goal. There's a lot of chains that actually have that at the base layer. And 7702 is good. It's a migration tool. It's a stopgap. We're going to have to release more account abstraction features down the road, natively. But unfortunately, that's the biggest one. Okay. So our next question is about passkeys. You said that the problem with them is being scoped to specific sites, but later you said they're actually good, that we should use them. So what is the solution here? Two separate problems. So the one problem is that when I issue a passkey, you get a public,. Like when you do a passkey authentication, you know, face ID, touch ID, whatever it may be, that issues you a public address. And it works the same way that a YubiKey works. What happens is they basically say, cool, what's like the DNS record? And they match that DNS record to a new public key generated on the passkey chain. So the problem is if I go to another website, it's going to either get a new one or create a new passkey. And so you can never have that same address persisted through. And so that's why you have to push outwards. If you want access to the signing key of the smart account, you actually need to go to whoever generated the passkey in the first place. And that's why we need that OAuth window. The same way Google doesn't let you just like OAuth inside of an API call inside some random website. It would just be very insecure. Okay. Next question is if we connect our wallets to apps in this way that you described and there's all our savings on these wallets, is it going to be safe enough? How many of you trust Gnosis Safe? Or safe? Lovely. Do you trust safe to provide you the UI you need to ensure you're signing the right message? This isn't rhetorical. Okay, good. In that case, if you trust them to provide you the correct UI when you sign a message, why would they not provide you the correct UI when they say, hey, by the way, this thing's going to drain your account? Instead, it's going to say, hey, it just wants to spend like a thousand USDC for the next like two hours. And then the token expires and they have to refresh that token. Same thing as a JWT with your bank. If you trust the people providing you the wallet, then you should have no problem actually using it in the system. It's the same problem with, it's exactly why I said, you as an app developer, are you actually comfortable deploying all this infra? Even the person giving you the smart account, are they giving you all the infra you need to ensure your users are safe? Push the barrier to the wallet. There's a last question, I think. Next question. In the endgame, are token ballots still held on external wallets or smart contract address? 4337. So, smart account. Okay. And banks often have the backup of support. Will the wallets provide the same backup? I mean, pick a good provider. MetaMask support is pretty good on Twitter. I think this question is really interesting. It's more like an insurance type of question. Because I think banks are like they have FDIC. It seems like this question is asking about support of user funds, which I think is external to wallets themselves. Or recovery mechanisms. Yeah, exactly. Okay, if there are no more questions, that's it. Please give it a round of applause for Greg and Cindy. Thank you.", + "sources_streamethId": "6736baf49dbb7a90e12ccb78", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731573000000, - "slot_end": 1731574800000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1Qe6obqukS9lTToSF06QtJ1Ovqj8Dzv1P-Vi0z9-wI7w", - "resources_slides": null, + "slot_start": 1731552300000, + "slot_end": 1731555900000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1DNOsJyO6qTZrHr9rXUHPF9-HZEOF4NkaTmABCndOG0g", + "resources_slides": "https://drive.google.com/file/d/1gAnV6ub_U-4kKpkth9CHr5JYDssps6Hh/view", "speakers": [ - "cindy", - "gregthegreek" + "florent", + "hiroyuki-tachibana", + "michael-elliot", + "nico", + "yanis" ] }, "vector": [ @@ -826108,8 +823808,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -826213,6 +823911,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -826273,6 +823972,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -826733,7 +824434,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -826801,6 +824501,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -826898,13 +824599,14 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -826912,7 +824614,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -826965,6 +824666,17 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -826996,7 +824708,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -827068,6 +824779,38 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -827366,53 +825109,9 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -827429,44 +825128,47 @@ 0, 0, 0, - 0, - 0, - 2, - 0, 0 ] }, { "session": { - "id": "usc-ultimate-solidity-championship", - "sourceId": "UE8WVS", - "title": "USC Ultimate Solidity Championship", - "description": "A 30-minute Solidity programming competition where the winner is determined objectively, permissionlessly, and transparently after the time expires. The Ultimate Solidity Championship (USC) is an event designed to showcase the skills of the best Solidity developers in the ecosystem. Its primary goals are to highlight Solidity programming as an art form, onboard more developers, educate the community, and foster collaboration, ultimately enhancing Ethereum's long-term impact.", - "track": "Entertainment", - "type": "Mixed Formats", - "expertise": "Beginner", + "id": "vadcops-leveraging-starks-for-tailored-proof-generation", + "sourceId": "BEJPG8", + "title": "VADCOPs: Leveraging STARKs for Tailored Proof Generation", + "description": "VADCOP is a proving method using STARKs to achieve cost-efficiency by focusing on active parts of the execution trace rather than the entire trace. Traditional modular designs, which divide machines into components and use relational arguments, face inefficiencies due to the padding of unused cells with dummy values. VADCOPs optimize performance by allowing maximum modularity and avoiding unused components, making proof generation precise and efficient without unnecessary redundancy.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "Solidity", - "Programming", - "Competition" - ], "tags": [ - "Art", - "Hacks", - "Public good" + "vadcops" ], - "language": "en", - "speakers": [ - "five" + "keywords": [ + "STARKs", + "VADCOPs" ], + "duration": 1347, + "language": "en", + "sources_swarmHash": "9c2a0481d79b3081115348cb2d026e8ce0cd54d6d9a79ef0a589e2d6c845643e", + "sources_youtubeId": "cQWKTyyoeto", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67344f579dbb7a90e1b0dad9", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673453859dbb7a90e1de05dc.vtt", + "transcript_text": " essentially distributing a verifiable binary version of the chain from one machine to another, essentially. And what this diagram is showing is you've got a set of pages and they're all hash verified. Now, what are the implications of all that process? Well, basically what you end up with is very quick sync performance effectively so the big difference between Eragon to an error Eragon 3 is the amount of time it takes to sync something and this is You see this particularly on large chains so Basically my role in the Eragon team. I mainly work on Polygon rather than Ethereum. So, basically, my role in the Aragon team, I mainly work on Polygon rather than Ethereum. So, basically, my role in the Aragon team, I mainly work on Polygon rather than Ethereum. So basically my role in the Aragon team, I mainly work on Polygon rather than Ethereum. So basically my role in the Aragon team, I mainly work on Polygon rather than Ethereum. So basically my role in the Aragon team, I mainly work on Polygon rather than Ethereum. So basically my role in the Aragon team, I mainly work on Polygon rather than Ethereum,", "eventId": "devcon-7", - "slot_start": 1731582000000, - "slot_end": 1731583800000, - "slot_roomId": "classroom-b", - "resources_presentation": "https://docs.google.com/presentation/d/1flrl1DVDOcGQrL2WtGO0tRQUbwP7P_Xk3IQeWVr_wIU" + "slot_start": 1731479400000, + "slot_end": 1731481200000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1vlLbALGk1-PoxsWpK3hZ1d85x7eK1bnX8dA5Jjf4Yj0", + "resources_slides": "https://drive.google.com/file/d/1k4WOtyYcQjruZ-dITnCUItGJ6Dn33vN2/view", + "speakers": [ + "felicia-barcelo", + "hector-masip-ardevol" + ] }, "vector": [ 0, @@ -827478,13 +825180,8 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -828174,6 +825871,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -828328,8 +826027,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -828474,7 +826171,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -828758,6 +826454,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -828783,9 +826480,9 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, 2, 0, @@ -828805,44 +826502,46 @@ }, { "session": { - "id": "using-reth-execution-extensions-for-next-generation-indexing", - "sourceId": "YUFRTQ", - "title": "Using Reth Execution Extensions for next generation indexing", - "description": "Recently, Reth and Geth released the ExEx and live tracer features, respectively, which share similar functionalities. Both provide real-time, detailed access to chain and state events. As ExEx developers begin to persist this data and explore ways to make it accessible to users, new questions arise: how can we best serve this data to users, and what might the indexers of the future look like?", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Intermediate", + "id": "verifiable-open-source-vaccines-to-save-millions-of-lives-from-the-developing-world-up", + "sourceId": "S7LEHK", + "title": "Verifiable Open Source Vaccines to Save Millions of Lives from the Developing World Up", + "description": "Viruses & bacteria like HCV, Strep A, and TB cumulatively take millions of lives each year – effective vaccines against them would considerably reduce that death toll. Unfortunately, big pharma isn’t interested in investing in developing these vaccines, and even if they did exist, rising vaccine hesitancy may prevent many from benefitting. PopVax is pioneering a new model of developing first-in-the-world verifiable vaccines at dramatically lower cost in India with radically greater transparency.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [ - "client", - "plugin", - "indexer" - ], "tags": [ - "Layer 1", - "Developer Infrastructure", - "Tooling", - "plugin", - "Developer Infrastructure", - "Layer 1", - "Tooling" + "DeSci", + "Effective Altruism", + "Public good" ], - "language": "en", - "speakers": [ - "alexey-shekhirin" + "keywords": [ + "vaccines", + "biotech", + "public health" ], + "duration": 1022, + "language": "en", + "sources_swarmHash": "59e01008d62db1db4ee007dd926f53ffa4d235af99b1d8c6d3bbc2729e6a1fa1", + "sources_youtubeId": "c4upJlvW_fE", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6735ba4a9dbb7a90e18ca9f7", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ba4a9dbb7a90e18ca9f7.vtt", + "transcript_text": " Hey folks, I'm Soham Sankaran, I'm the founder of a company called Popfax, which works on verifiable open source vaccines, intended to save millions of lives from the developing world up. And I'll explain what all of that means. First, let me say something that I think has unfortunately become controversial of recent times. Vaccines are one of the most effective public health interventions known to man. Over the last 50 years, they're estimated, and this is just childhood vaccines, are estimated to have saved over 150 million lives across key pathogens that attack younger children and adolescents. And when you add adult vaccinations, they're likely tens of millions more. But there are pathogens, important pathogens that are quite prevalent for which there are no existing vaccines. These include tuberculosis, strep A, and HCV in particular. So just these three pathogens, and here I'm talking about tuberculosis in adults, not tuberculosis in children, kill over 1.8 million people per year across the world, and there are no effective vaccines for them today. Unfortunately, instead of speeding up, vaccine development is slowing down. It took the malaria vaccine, which is one of the more recent infectious disease vaccines to be licensed, 35 years to go from concept to licensure. And these programs now routinely cost $1 billion plus, which is a very large amount of money to invest in a single drug. And then because they're only dosed, let's say, between one and three times usually, unlike GLP-1 agonists like Ozempic, which are dosed considerably more frequently, pharma companies do not see them as a particularly high return on investment. And so they have pulled back, especially in the post-COVID era, from investing in new infectious disease vaccine programs, in particular for pathogens like TB, which they see as being only relevant in developing countries which are poorer. But there's a potential solution here. So this is a sort of graphic of the top 10 vaccine manufacturers in the world by volume in 2021 during COVID, but excluding the COVID-19 vaccines. And what you can see here is that Indian vaccine manufacturers, including the Serum Institute of India, are very substantial in their presence. They account for almost 40% of the volume of vaccine doses shipped globally. And so why don't we V-ACK, so to speak? Why don't we accelerate vaccine development by relying on this capacity that we have in India and other developing countries? And also the fact that you know you can do things cheaper in countries like India, right? We can do this, we think, 10x faster and 10x cheaper. And you might ask why aren't people doing this already? There are already these big vaccine companies in India. Surely they must be thinking on these lines. Unfortunately, India as a country spends almost nothing on research and development. It is a joke how little we spend on R&D across government and across private industry. And Indian companies, including vaccine companies, are not particularly interested in taking technical risks, such as developing new vaccines for pathogens where vaccines have been hard to develop. And so PopFax is trying to solve this problem. We are trying to solve long-standing problems in vaccine design, including the three pathogens that I mentioned, where people have made attempts but no success. Using mRNA and computational protein design, tools that have just reached the point where we can actually use them to make novel vaccines that couldn't have been made before. As I'm sure many of you are aware, Demis Hassabis, John Jumper, and David Baker just won the Nobel Prize in Chemistry recently for their work in protein structure prediction and protein design. And so these tools are just now reaching the maturity where we can actually use them productively. And we want to leverage, as I said, the talent, cost, and speed advantages of operating in India, which is to say we are the only company or one of the only companies doing this kind of novel vaccine development in India, and so the best talent in the country, folks who want to work on these world-changing problems but don't have the opportunity, essentially come to us. Unfortunately, no one wanted to fund PopFacts. When I started this three years ago, VCs and non-profits essentially didn't believe that the talent pool existed in India to do this. The Gates Foundation gave us an early 100K check, but that was not enough to build a vaccine platform or take anything to clinic. As I said, these vaccine programs, they routinely cost a billion dollars. Even if you can do it for 10x cheaper, that's still hundreds of millions of dollars, right? And we had some good early data about, you know, two and a half years ago, but it had rendered both me and the company bankrupt. Then there was a massive pump in Shiba Inu coin, which I'm given to understand is a meme coin of a meme coin. And Vitalik founded the biosecurity and public health organization, Balvi, and one of their investments was us. And so they've now funded us a cumulative $15-plus million. And I think it's interesting that it took Vitalik, somebody who is outside of the sort of general hierarchy of public health funding, to see what I think is obvious, which is that the talent is there. It was really a resource constraint that meant that folks in India had not had the opportunity to go after these problems. So let me talk a bit about what we've actually done with that money and what that means for the future. So this is extremely dense, and I apologize in advance, but I'll explain what's going on here. That green thing that you see there is the receptor binding domain or RBD of the SARS-CoV-2 virus. It is the immunodominant antigen of SARS-CoV-2. Antibodies against that particular protein are the key drivers of neutralization and protection in the COVID vaccines. So it's a subset of the spike protein that I'm sure all of you have heard of. It's not the entire spike, it's just a part of it, right? What we've done is we've pioneered an approach where within mRNA, we can encode a self-assembling virus-like particle, which is what is in the blue, that basically displays a whole bunch of copies of this RBD protein. And in doing so, we've achieved two things. One is we've achieved considerably more potency. So what that, if you look at our sort of highlighted, you know, bolded bar here, two micrograms of our RBD-VLP display mRNA, which uses this approach, provides 22x greater neutralizing antibody response, which correlates quite highly with protection, compared with two micrograms of the full spike mRNA sequence from one of the U.S. approved COVID vaccines. And this is all in mice. And what's particularly interesting about this is, you know, we can use it to make a vaccine that's more potent. We can also use it to make a vaccine that's potentially safer. Because we can achieve with 95% less dose, as you can see at the top, with just 0.1 micrograms, we can achieve the same result as you would without using our strategy with two micrograms. And so I can inject much less mRNA, much less lipid nanoparticle, and have potentially a much safer product overall. And the kinds of things that Philip was talking about, you know, all get worse as you increase the dose of whatever your vaccine product or drug product is. The other interesting thing that we got here is breadth. So if you think about COVID-19 and why the vaccines were less effective as we got later into the pandemic, the reason isn't that they didn't have a durable response. The reason is that the pathogen evolved. Variants of the pathogen that were no longer susceptible to the antibodies elicited by the original vaccines became prevalent. And as a result of that, we ended up with a situation where what were very effective vaccines at even preventing transmission became relatively ineffective as new variants evolved. However, using this mRNA-encoded VLP strategy, what we've been able to do actually is elicit antibodies that neutralize a whole breadth of variants. So our construct is the red line, which is able to potently neutralize, you know, even as it's diluted, a whole bunch of these other variants, wild type, gamma, lambda, Omicron, BA1, whereas the US approved COVID vaccines, the original ones in blue, are not able to neutralize those other variants. So this approach, which we are, you know, among the first to pioneer in mRNA and protein design, has potential substantial gains for COVID and for other pathogens like influenza. Now, let me talk a little bit more broadly about what strategies like this are actually trying to do, right? If you think about any pathogen, you know, it has a whole bunch of antigens, which are, in the case of a virus, those are going to be proteins that elicit certain kinds of immune responses, certain types of antibodies, right? And then some of those antibodies are going to be functional. You know, they might sort of neutralize the pathogen, right? Some of the antibodies are going to be non-functional. They might bind but not neutralize or not bind at all, right? And then within the context of these functional antibodies, there's some subset of antibodies that you might consider broadly functional. And what that means is even as the pathogen evolves, even as there are additional variants that come up, those antibodies, which are some subset of the original antibodies, remain effective. So the intention of our vaccine design approach is basically to move the distribution of antibodies listed, whether it's in an animal or a human, towards these broadly functional antibodies, which have a chance of providing durable protection against even new variants that might emerge. And this is relevant in the context of COVID for this variant evolution story. In the case of some of the other pathogens that I mentioned, for example, HCV, it is absolutely essential. Because HCV as a pathogen like HIV evolves within its own host. And so, you know, if you were to be infected with HCV, you might have thousands of different variants of the HCV pathogen within your own body. And so, an antibody response that is effective against just one of them is not going to be effective at clearing the pathogen. So, we've used this, you know, we've used a variant of what's called an epitope scaffolding approach to basically attempt to elicit specific types of antibodies that provide this kind of broad functionality and broad protection. And what we've been able to do in just three years is to run these processes of designing and testing in animals these novel immunogens, which can elicit these antibodies, much faster than anybody typically does in vaccine R&D programs. Again, as I said, leveraging the fact that we can do this research much more cheaply and much more quickly in India. And what we found is mRNA in particular gives us an advantage here. mRNA is, of course, not the actual immunogen. What you're injecting in mRNA is almost code that encodes a specific design of immunogen that's intended to elicit some antibody response. And so because we're using mRNA, which is a standardized process, we can test thousands of different immunogens and then when we alight upon one that that actually gives us the response we want, we can translate it not just into a sort of lab environment but into an actual GMP manufacturing environment. Apologies. We can translate not just new a lab environment but into an actual clinical environment, into a clinical study very quickly, which is not something that you can do with conventional vaccines as easily, which is why you get these extremely long timelines, right? And so what we know so far is basically this VAC approach in India, it works, right? We've been able to do this at 20x cheaper than you would usually spend on a preclinical vaccine program to build a new platform and get to phase one. We've 10x the testing throughput of a normal vaccine program. We've tested, you know, thousands and thousands of novel LNP formulations of novel designed immunogens, which are basically vaccine designs. We've tested them know thousands and thousands of novel LNP formulations of novel designed immunogens which are basically vaccine designs we've tested them in vitro but we've also tested them in vivo 500 plus and we've been able to achieve the results you saw in the beginning which is that our vaccine appears to be much much better than the existing COVID-19 vaccines so what that means is I can take 10x the number of shots on goal for the same amount of funding as a company in Boston or San Francisco and potentially save 10x the number of shots on goal for the same amount of funding as a company in Boston or San Francisco and potentially save 10x the number of lives in the long run. Right? If somebody gives me $300 million, a company in Boston, and this is all based on a real comparison with a biotech company that's developing vaccines and doing quite well doing so, they might take one candidate through a phase one clinical trial and be getting ready for a phase two clinical trial and be getting ready for a phase two clinical trial with a new platform. For us, we've been able to build a new platform and with that amount of money, with $300 million, we would be able to take 10 new candidates to phase one and two and potentially bring multiple products to licensure. And again, in the long run, that saves a lot of lives, right? Because every year we wait to develop these vaccines, millions of people are dying. Another key piece of this is that we've built our own GMP manufacturing facility. So what that means is we have the ability within our own facility to make clinical doses in a way that's safe to inject in humans, such that we can very quickly take these new candidates and advance them to clinical studies. And here I'll reference what Philip was talking about a little bit, right? I think it's critical to ensure that we build a process that is not only, you know, safe by the standards of existing regulatory norms, but is legible enough and is understandable by the general public that they trust what comes out of our work, right? And so the good news here is, and in fact, I'm announcing this today, we just announced it via press release this morning, is that NIAID, which is part of the US National Institutes of Health, has selected our next generation COVID-19 vaccine as part of the US government's project NextGen for a phase one trial, which will happen in the US early next year, which to my knowledge is one of the first clinical trials of an Indian design vaccine in the U.S. In fact, no Indian vaccine or Indian design vaccine has ever been approved in the U.S. No Indian company's vaccine has ever been approved in the U.S. And our intention is to have our vaccines available all over the world, right, not just in rich countries, not just in poor countries, but to everybody. And so this is very exciting news, because I think what this means is our approach, which, as I said up top, is an approach that at the beginning, nobody supported, right? Nobody believed that we had the talent in India to do this. Nobody was certain, you know, or people, in fact, were quite certain that we wouldn't be able to do this, right? And it took an early bet from Gates and a substantial bet from Vitalik and his team this has now been essentially co-signed by one of the most credible organizations in public health, right? So NIAID thinks that this vaccine works very well, at least pre-clinically, and they think it's worth evaluating in humans. The COVID vaccines have, you know, have been somewhat maligned, but best estimates suggest they saved tens of millions of lives. We can quibble about specific numbers, but it's likely a very large number. But again, as Philip was talking about, there's been a lot of questions about whether the process followed in approving these vaccines were processes that are appropriate for human health. And at the same time, on the other side of the coin, the reality is that a lot of people who wanted these vaccines in developing countries didn't get them, because Pfizer and Moderna were not that interested in selling their vaccines at knockdown prices in developing countries didn't get them because Pfizer and Moderna were not that interested in selling their vaccines at knockdown prices in poor countries, right? And so what we've done is we've worked with Balvi to come up with a model where we will be open sourcing our COVID-19 vaccine. We will not be enforcing our intellectual property on that vaccine for a number of years. And what we'll be allowing people to do is to verify not just the R&D processes that led to that vaccine, but also the manufacturing processes. So that manufacturers across the rest of the world can not only make copies of this vaccine themselves, they can distribute at low cost, but also that people can understand whether the processes that we've used in developing and manufacturing this vaccine are compatible with what we say out in public and are safe for them, right? They can have independent scientists evaluate these processes in a way that existing vaccine companies don't make enough information available to do. And that's something that we can do because we are comfortable releasing far more information than anybody has ever done for a vaccine development or manufacturing program before. Let me just briefly talk about what we call the Popvax Million Lives Mission. As I said, there are these three pathogens that collectively kill 1.8 million plus people per year. We want to develop effective vaccines against all three of them that we think can save over a million lives per year. We want to do this using a new sustainable model for funding for public health vaccines, similar to what we've done with Vitalik and his team, where, you know, we will provide full details needed for vaccine verification, R&D improvement, and manufacturing openly. We'll cap profits in developing countries and allow for open licensing on transparent terms of this intellectual property, and we will have uncapped profits in rich countries. And this is a model that we want to advance, that we think could be the future of sustainable vaccine development. Yeah, I'm happy to take your questions outside. And please feel free to email me if you're interested in working with us, funding us, or collaborating with us. Thank you so much. Thank you so much. That was great.", "eventId": "devcon-7", - "slot_start": 1731484800000, - "slot_end": 1731486600000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1grvRBeTUC4cPjxwSFQPy6d3VmlJ6P3Y2_R99fgeourE" + "slot_start": 1731572400000, + "slot_end": 1731573300000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1sK71lOtl_9Q8SbWOBVtDNhLVBhc--pIc-AxaYE2toIM", + "resources_slides": "https://drive.google.com/file/d/1ZnLUJlsKkwKWtFHaJ-j4b2KAgjWukxqm/view", + "speakers": [ + "soham-sankaran" + ] }, "vector": [ - 0, - 0, - 0, 0, 6, 0, @@ -829539,13 +827238,13 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -829606,13 +827305,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -829645,7 +827342,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -829703,6 +827399,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -829727,6 +827424,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -829841,6 +827539,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -830127,7 +827826,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -830157,7 +827855,6 @@ 2, 0, 0, - 0, 2, 0, 0, @@ -830176,55 +827873,44 @@ }, { "session": { - "id": "utilizing-national-ids-in-the-ethereum-ecosystem", - "sourceId": "PR78EL", - "title": "Utilizing national IDs in the Ethereum ecosystem", - "description": "This panel brings together developers of MynaWallet, Anon-Aadhaar, Proof of Passport and zkPassport, who are exploring and developing applications that utilize government-issued IDs in the Ethereum ecosystem. We will discuss the characteristics of each ID system and what functions can be realized using tech stacks in the Ethereum ecosystem and cryptographic technology.", - "track": "Real World Ethereum", - "type": "Panel", + "id": "verifier-alliance-inside-of-the-contract-verification-pipeline", + "sourceId": "Q3EDF8", + "title": "Verifier Alliance: inside of the contract verification pipeline", + "description": "The talk will guide you through a smart-contract verification process step by step while introducing some technical details and challenges verification services have to handle. Will describe what we have learned building \"Verifier Alliance\" - a new collective that unites different verification providers to have an open and shared database of smart contracts (verifieralliance.org).", + "track": "Developer Experience", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "Civil Resistance", - "Privacy", - "Identity", - "Civil Resistance", - "Identity", - "Privacy" + "DevEx", + "verification", + "contracts", + "DevEx" ], "keywords": [ - "National IDs", - "Selective Disclosure" + "Contract", + "Verification" ], - "duration": 3351, + "duration": 519, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "69a3730194c47cec0c3005a9eed1c4f8dd9c959160dc3ba772e0007bc7847a61", + "sources_youtubeId": "2U4Wad2ebwI", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736baf49dbb7a90e12ccb78", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731552300000, - "slot_end": 1731555900000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1DNOsJyO6qTZrHr9rXUHPF9-HZEOF4NkaTmABCndOG0g", - "resources_slides": null, + "slot_start": 1731472800000, + "slot_end": 1731473400000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1WNKyHeXOwkXmvaf0GIGfAtO5R7MQYyUbdRwxgk23ZzQ", + "resources_slides": "https://drive.google.com/file/d/1LcM0tpfszmP_-tK-zsx9X767Zip80dBX/view", "speakers": [ - "florent", - "hiroyuki-tachibana", - "michael-elliot", - "nico", - "yanis" + "rim-rakhimov" ] }, "vector": [ - 0, - 0, - 0, 0, 0, 0, @@ -830331,22 +828017,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -830392,8 +828062,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -830922,8 +828590,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -830948,6 +828614,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -831089,7 +828756,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -831202,7 +828868,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -831257,6 +828922,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -831411,6 +829077,21 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -831536,7 +829217,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -831546,6 +829226,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -831558,41 +829242,41 @@ }, { "session": { - "id": "vadcops-leveraging-starks-for-tailored-proof-generation", - "sourceId": "BEJPG8", - "title": "VADCOPs: Leveraging STARKs for Tailored Proof Generation", - "description": "VADCOP is a proving method using STARKs to achieve cost-efficiency by focusing on active parts of the execution trace rather than the entire trace. Traditional modular designs, which divide machines into components and use relational arguments, face inefficiencies due to the padding of unused cells with dummy values. VADCOPs optimize performance by allowing maximum modularity and avoiding unused components, making proof generation precise and efficient without unnecessary redundancy.", - "track": "Applied Cryptography", - "type": "Talk", + "id": "verkle-integration-in-reth", + "sourceId": "T8LKTM", + "title": "Verkle integration in reth", + "description": "This talk concerns the presentation of EPF Project: Verkle integration in reth.\r\nThe project comprised of replacing the current state-commitment structure in reth with verkle tries and other modifications for statelessness, including implementing EIPs such as EIP-4762: Statelessness gas cost changes (to REVM), EIP-6800: Ethereum State using a unified verkle trie, EIP-7709: Read BLOCKHASH from storage and update cost, and passing the associated execution-spec-test vectors designed for these EIPs.", + "track": "[CLS] EPF Day", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "vadcops" + "EPF", + "Core Protocol", + "Cryptography", + "Verkle trees" ], "keywords": [ - "STARKs", - "VADCOPs" + "Stateless clients", + "Verge" ], - "duration": 1347, + "duration": 871, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "4c3add1def5321ff44e6a128ca79299c6b8f9c3c4c274d2d79412d0bcf266853", + "sources_youtubeId": "7tOXl-C21CQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67344f579dbb7a90e1b0dad9", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673453859dbb7a90e1de05dc.vtt", - "transcript_text": " essentially distributing a verifiable binary version of the chain from one machine to another, essentially. And what this diagram is showing is you've got a set of pages and they're all hash verified. Now, what are the implications of all that process? Well, basically what you end up with is very quick sync performance effectively so the big difference between Eragon to an error Eragon 3 is the amount of time it takes to sync something and this is You see this particularly on large chains so Basically my role in the Eragon team. I mainly work on Polygon rather than Ethereum. So, basically, my role in the Aragon team, I mainly work on Polygon rather than Ethereum. So, basically, my role in the Aragon team, I mainly work on Polygon rather than Ethereum. So basically my role in the Aragon team, I mainly work on Polygon rather than Ethereum. So basically my role in the Aragon team, I mainly work on Polygon rather than Ethereum. So basically my role in the Aragon team, I mainly work on Polygon rather than Ethereum. So basically my role in the Aragon team, I mainly work on Polygon rather than Ethereum,", + "sources_streamethId": "67342f409dbb7a90e1c9090f", "eventId": "devcon-7", - "slot_start": 1731479400000, - "slot_end": 1731481200000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1vlLbALGk1-PoxsWpK3hZ1d85x7eK1bnX8dA5Jjf4Yj0", - "resources_slides": null, + "slot_start": 1731472200000, + "slot_end": 1731473100000, + "slot_roomId": "breakout-1", + "resources_presentation": "https://docs.google.com/presentation/d/1Uq2DzZBnDwPSfrV2xqfm-mlie2DOZZKEwi0Kk44YlQI", + "resources_slides": "https://drive.google.com/file/d/1f2InLp14m4mOw2XdTyOMZmMgHq9fVR-6/view", "speakers": [ - "felicia-barcelo", - "hector-masip-ardevol" + "aditya-gupta" ] }, "vector": [ @@ -831606,19 +829290,12 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -832298,8 +829975,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -832309,6 +829984,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -832365,11 +830041,13 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -832543,6 +830221,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -832655,6 +830334,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -832884,7 +830564,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -832914,8 +830593,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -832932,43 +830611,35 @@ }, { "session": { - "id": "verifiable-open-source-vaccines-to-save-millions-of-lives-from-the-developing-world-up", - "sourceId": "S7LEHK", - "title": "Verifiable Open Source Vaccines to Save Millions of Lives from the Developing World Up", - "description": "Viruses & bacteria like HCV, Strep A, and TB cumulatively take millions of lives each year – effective vaccines against them would considerably reduce that death toll. Unfortunately, big pharma isn’t interested in investing in developing these vaccines, and even if they did exist, rising vaccine hesitancy may prevent many from benefitting. PopVax is pioneering a new model of developing first-in-the-world verifiable vaccines at dramatically lower cost in India with radically greater transparency.", + "id": "viruses-and-chronic-aging-building-a-research-community", + "sourceId": "FX8UQF", + "title": "Viruses and Chronic Aging: Building a Research Community", + "description": "Did you know that mitochondrial dysfunction, inflammation, and cognitive decline are directly accelerated by viruses? In fact, the viruses that infect us over a lifetime are technically not even alive, and therefore must “hack” our human cellular metabolism machinery to do anything at all. This talk will overview the first-ever global collaborative network studying & treating chronic viruses as drivers of aging, including how certain lifespan-promoting drugs may help combat viral activity.", "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", "type": "Lightning Talk", - "expertise": "", - "audience": "Engineering", + "expertise": "Intermediate", + "audience": "Community", "featured": false, "doNotRecord": false, - "tags": [ - "DeSci", - "Effective Altruism", - "Public good" - ], - "keywords": [ - "vaccines", - "biotech", - "public health" - ], - "duration": 1022, + "tags": [], + "keywords": [], + "duration": 996, "language": "en", - "sources_swarmHash": "59e01008d62db1db4ee007dd926f53ffa4d235af99b1d8c6d3bbc2729e6a1fa1", - "sources_youtubeId": "c4upJlvW_fE", + "sources_swarmHash": "4aff1867805b00275edb2ba9798287686622d38009dc1a7f1cae7feacc201c23", + "sources_youtubeId": "OcwxbzqP8Jc", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735ba4a9dbb7a90e18ca9f7", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ba4a9dbb7a90e18ca9f7.vtt", - "transcript_text": " Hey folks, I'm Soham Sankaran, I'm the founder of a company called Popfax, which works on verifiable open source vaccines, intended to save millions of lives from the developing world up. And I'll explain what all of that means. First, let me say something that I think has unfortunately become controversial of recent times. Vaccines are one of the most effective public health interventions known to man. Over the last 50 years, they're estimated, and this is just childhood vaccines, are estimated to have saved over 150 million lives across key pathogens that attack younger children and adolescents. And when you add adult vaccinations, they're likely tens of millions more. But there are pathogens, important pathogens that are quite prevalent for which there are no existing vaccines. These include tuberculosis, strep A, and HCV in particular. So just these three pathogens, and here I'm talking about tuberculosis in adults, not tuberculosis in children, kill over 1.8 million people per year across the world, and there are no effective vaccines for them today. Unfortunately, instead of speeding up, vaccine development is slowing down. It took the malaria vaccine, which is one of the more recent infectious disease vaccines to be licensed, 35 years to go from concept to licensure. And these programs now routinely cost $1 billion plus, which is a very large amount of money to invest in a single drug. And then because they're only dosed, let's say, between one and three times usually, unlike GLP-1 agonists like Ozempic, which are dosed considerably more frequently, pharma companies do not see them as a particularly high return on investment. And so they have pulled back, especially in the post-COVID era, from investing in new infectious disease vaccine programs, in particular for pathogens like TB, which they see as being only relevant in developing countries which are poorer. But there's a potential solution here. So this is a sort of graphic of the top 10 vaccine manufacturers in the world by volume in 2021 during COVID, but excluding the COVID-19 vaccines. And what you can see here is that Indian vaccine manufacturers, including the Serum Institute of India, are very substantial in their presence. They account for almost 40% of the volume of vaccine doses shipped globally. And so why don't we V-ACK, so to speak? Why don't we accelerate vaccine development by relying on this capacity that we have in India and other developing countries? And also the fact that you know you can do things cheaper in countries like India, right? We can do this, we think, 10x faster and 10x cheaper. And you might ask why aren't people doing this already? There are already these big vaccine companies in India. Surely they must be thinking on these lines. Unfortunately, India as a country spends almost nothing on research and development. It is a joke how little we spend on R&D across government and across private industry. And Indian companies, including vaccine companies, are not particularly interested in taking technical risks, such as developing new vaccines for pathogens where vaccines have been hard to develop. And so PopFax is trying to solve this problem. We are trying to solve long-standing problems in vaccine design, including the three pathogens that I mentioned, where people have made attempts but no success. Using mRNA and computational protein design, tools that have just reached the point where we can actually use them to make novel vaccines that couldn't have been made before. As I'm sure many of you are aware, Demis Hassabis, John Jumper, and David Baker just won the Nobel Prize in Chemistry recently for their work in protein structure prediction and protein design. And so these tools are just now reaching the maturity where we can actually use them productively. And we want to leverage, as I said, the talent, cost, and speed advantages of operating in India, which is to say we are the only company or one of the only companies doing this kind of novel vaccine development in India, and so the best talent in the country, folks who want to work on these world-changing problems but don't have the opportunity, essentially come to us. Unfortunately, no one wanted to fund PopFacts. When I started this three years ago, VCs and non-profits essentially didn't believe that the talent pool existed in India to do this. The Gates Foundation gave us an early 100K check, but that was not enough to build a vaccine platform or take anything to clinic. As I said, these vaccine programs, they routinely cost a billion dollars. Even if you can do it for 10x cheaper, that's still hundreds of millions of dollars, right? And we had some good early data about, you know, two and a half years ago, but it had rendered both me and the company bankrupt. Then there was a massive pump in Shiba Inu coin, which I'm given to understand is a meme coin of a meme coin. And Vitalik founded the biosecurity and public health organization, Balvi, and one of their investments was us. And so they've now funded us a cumulative $15-plus million. And I think it's interesting that it took Vitalik, somebody who is outside of the sort of general hierarchy of public health funding, to see what I think is obvious, which is that the talent is there. It was really a resource constraint that meant that folks in India had not had the opportunity to go after these problems. So let me talk a bit about what we've actually done with that money and what that means for the future. So this is extremely dense, and I apologize in advance, but I'll explain what's going on here. That green thing that you see there is the receptor binding domain or RBD of the SARS-CoV-2 virus. It is the immunodominant antigen of SARS-CoV-2. Antibodies against that particular protein are the key drivers of neutralization and protection in the COVID vaccines. So it's a subset of the spike protein that I'm sure all of you have heard of. It's not the entire spike, it's just a part of it, right? What we've done is we've pioneered an approach where within mRNA, we can encode a self-assembling virus-like particle, which is what is in the blue, that basically displays a whole bunch of copies of this RBD protein. And in doing so, we've achieved two things. One is we've achieved considerably more potency. So what that, if you look at our sort of highlighted, you know, bolded bar here, two micrograms of our RBD-VLP display mRNA, which uses this approach, provides 22x greater neutralizing antibody response, which correlates quite highly with protection, compared with two micrograms of the full spike mRNA sequence from one of the U.S. approved COVID vaccines. And this is all in mice. And what's particularly interesting about this is, you know, we can use it to make a vaccine that's more potent. We can also use it to make a vaccine that's potentially safer. Because we can achieve with 95% less dose, as you can see at the top, with just 0.1 micrograms, we can achieve the same result as you would without using our strategy with two micrograms. And so I can inject much less mRNA, much less lipid nanoparticle, and have potentially a much safer product overall. And the kinds of things that Philip was talking about, you know, all get worse as you increase the dose of whatever your vaccine product or drug product is. The other interesting thing that we got here is breadth. So if you think about COVID-19 and why the vaccines were less effective as we got later into the pandemic, the reason isn't that they didn't have a durable response. The reason is that the pathogen evolved. Variants of the pathogen that were no longer susceptible to the antibodies elicited by the original vaccines became prevalent. And as a result of that, we ended up with a situation where what were very effective vaccines at even preventing transmission became relatively ineffective as new variants evolved. However, using this mRNA-encoded VLP strategy, what we've been able to do actually is elicit antibodies that neutralize a whole breadth of variants. So our construct is the red line, which is able to potently neutralize, you know, even as it's diluted, a whole bunch of these other variants, wild type, gamma, lambda, Omicron, BA1, whereas the US approved COVID vaccines, the original ones in blue, are not able to neutralize those other variants. So this approach, which we are, you know, among the first to pioneer in mRNA and protein design, has potential substantial gains for COVID and for other pathogens like influenza. Now, let me talk a little bit more broadly about what strategies like this are actually trying to do, right? If you think about any pathogen, you know, it has a whole bunch of antigens, which are, in the case of a virus, those are going to be proteins that elicit certain kinds of immune responses, certain types of antibodies, right? And then some of those antibodies are going to be functional. You know, they might sort of neutralize the pathogen, right? Some of the antibodies are going to be non-functional. They might bind but not neutralize or not bind at all, right? And then within the context of these functional antibodies, there's some subset of antibodies that you might consider broadly functional. And what that means is even as the pathogen evolves, even as there are additional variants that come up, those antibodies, which are some subset of the original antibodies, remain effective. So the intention of our vaccine design approach is basically to move the distribution of antibodies listed, whether it's in an animal or a human, towards these broadly functional antibodies, which have a chance of providing durable protection against even new variants that might emerge. And this is relevant in the context of COVID for this variant evolution story. In the case of some of the other pathogens that I mentioned, for example, HCV, it is absolutely essential. Because HCV as a pathogen like HIV evolves within its own host. And so, you know, if you were to be infected with HCV, you might have thousands of different variants of the HCV pathogen within your own body. And so, an antibody response that is effective against just one of them is not going to be effective at clearing the pathogen. So, we've used this, you know, we've used a variant of what's called an epitope scaffolding approach to basically attempt to elicit specific types of antibodies that provide this kind of broad functionality and broad protection. And what we've been able to do in just three years is to run these processes of designing and testing in animals these novel immunogens, which can elicit these antibodies, much faster than anybody typically does in vaccine R&D programs. Again, as I said, leveraging the fact that we can do this research much more cheaply and much more quickly in India. And what we found is mRNA in particular gives us an advantage here. mRNA is, of course, not the actual immunogen. What you're injecting in mRNA is almost code that encodes a specific design of immunogen that's intended to elicit some antibody response. And so because we're using mRNA, which is a standardized process, we can test thousands of different immunogens and then when we alight upon one that that actually gives us the response we want, we can translate it not just into a sort of lab environment but into an actual GMP manufacturing environment. Apologies. We can translate not just new a lab environment but into an actual clinical environment, into a clinical study very quickly, which is not something that you can do with conventional vaccines as easily, which is why you get these extremely long timelines, right? And so what we know so far is basically this VAC approach in India, it works, right? We've been able to do this at 20x cheaper than you would usually spend on a preclinical vaccine program to build a new platform and get to phase one. We've 10x the testing throughput of a normal vaccine program. We've tested, you know, thousands and thousands of novel LNP formulations of novel designed immunogens, which are basically vaccine designs. We've tested them know thousands and thousands of novel LNP formulations of novel designed immunogens which are basically vaccine designs we've tested them in vitro but we've also tested them in vivo 500 plus and we've been able to achieve the results you saw in the beginning which is that our vaccine appears to be much much better than the existing COVID-19 vaccines so what that means is I can take 10x the number of shots on goal for the same amount of funding as a company in Boston or San Francisco and potentially save 10x the number of shots on goal for the same amount of funding as a company in Boston or San Francisco and potentially save 10x the number of lives in the long run. Right? If somebody gives me $300 million, a company in Boston, and this is all based on a real comparison with a biotech company that's developing vaccines and doing quite well doing so, they might take one candidate through a phase one clinical trial and be getting ready for a phase two clinical trial and be getting ready for a phase two clinical trial with a new platform. For us, we've been able to build a new platform and with that amount of money, with $300 million, we would be able to take 10 new candidates to phase one and two and potentially bring multiple products to licensure. And again, in the long run, that saves a lot of lives, right? Because every year we wait to develop these vaccines, millions of people are dying. Another key piece of this is that we've built our own GMP manufacturing facility. So what that means is we have the ability within our own facility to make clinical doses in a way that's safe to inject in humans, such that we can very quickly take these new candidates and advance them to clinical studies. And here I'll reference what Philip was talking about a little bit, right? I think it's critical to ensure that we build a process that is not only, you know, safe by the standards of existing regulatory norms, but is legible enough and is understandable by the general public that they trust what comes out of our work, right? And so the good news here is, and in fact, I'm announcing this today, we just announced it via press release this morning, is that NIAID, which is part of the US National Institutes of Health, has selected our next generation COVID-19 vaccine as part of the US government's project NextGen for a phase one trial, which will happen in the US early next year, which to my knowledge is one of the first clinical trials of an Indian design vaccine in the U.S. In fact, no Indian vaccine or Indian design vaccine has ever been approved in the U.S. No Indian company's vaccine has ever been approved in the U.S. And our intention is to have our vaccines available all over the world, right, not just in rich countries, not just in poor countries, but to everybody. And so this is very exciting news, because I think what this means is our approach, which, as I said up top, is an approach that at the beginning, nobody supported, right? Nobody believed that we had the talent in India to do this. Nobody was certain, you know, or people, in fact, were quite certain that we wouldn't be able to do this, right? And it took an early bet from Gates and a substantial bet from Vitalik and his team this has now been essentially co-signed by one of the most credible organizations in public health, right? So NIAID thinks that this vaccine works very well, at least pre-clinically, and they think it's worth evaluating in humans. The COVID vaccines have, you know, have been somewhat maligned, but best estimates suggest they saved tens of millions of lives. We can quibble about specific numbers, but it's likely a very large number. But again, as Philip was talking about, there's been a lot of questions about whether the process followed in approving these vaccines were processes that are appropriate for human health. And at the same time, on the other side of the coin, the reality is that a lot of people who wanted these vaccines in developing countries didn't get them, because Pfizer and Moderna were not that interested in selling their vaccines at knockdown prices in developing countries didn't get them because Pfizer and Moderna were not that interested in selling their vaccines at knockdown prices in poor countries, right? And so what we've done is we've worked with Balvi to come up with a model where we will be open sourcing our COVID-19 vaccine. We will not be enforcing our intellectual property on that vaccine for a number of years. And what we'll be allowing people to do is to verify not just the R&D processes that led to that vaccine, but also the manufacturing processes. So that manufacturers across the rest of the world can not only make copies of this vaccine themselves, they can distribute at low cost, but also that people can understand whether the processes that we've used in developing and manufacturing this vaccine are compatible with what we say out in public and are safe for them, right? They can have independent scientists evaluate these processes in a way that existing vaccine companies don't make enough information available to do. And that's something that we can do because we are comfortable releasing far more information than anybody has ever done for a vaccine development or manufacturing program before. Let me just briefly talk about what we call the Popvax Million Lives Mission. As I said, there are these three pathogens that collectively kill 1.8 million plus people per year. We want to develop effective vaccines against all three of them that we think can save over a million lives per year. We want to do this using a new sustainable model for funding for public health vaccines, similar to what we've done with Vitalik and his team, where, you know, we will provide full details needed for vaccine verification, R&D improvement, and manufacturing openly. We'll cap profits in developing countries and allow for open licensing on transparent terms of this intellectual property, and we will have uncapped profits in rich countries. And this is a model that we want to advance, that we think could be the future of sustainable vaccine development. Yeah, I'm happy to take your questions outside. And please feel free to email me if you're interested in working with us, funding us, or collaborating with us. Thank you so much. Thank you so much. That was great.", + "sources_streamethId": "6735bc959dbb7a90e1a2fe20", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735bc959dbb7a90e1a2fe20.vtt", + "transcript_text": " Hi, I'm Amy Prowal. I'm the President and Research Director of PolyBioResearch Foundation, and today I'm truly excited to talk to you about viruses and chronic aging, building a research community. So where am I going with this? First, what I want to talk about is viruses as drivers of aging processes. This is really not discussed as understood as it should be. And there's so many compelling examples of how viruses can contribute to chronic aging processes. This is really important for the longevity community and for people who are just dealing with many forms of chronic disease. If we understand these interactions, we can better figure out how to combat them and how to best create strategies to mitigate viral effects on aging. So let me give you some examples of viruses and how they contribute to aging processes. So there's definitely many drivers of human aging, but here are some of the three main factors. Mitochondrial dysfunction, these are the just energy powerhouses of our cells, and they become dysfunctional as we age, often. Inflammating, which is basically just the fact that over time, as people get older, tend to have more chronic inflammation, more immune cells that are just activated in an unproductive way. That's called inflammation. And cognitive decline, obviously. People who are getting older and aging have memory problems. They have brain, you know, dementia, even post-Alzheimer's type phenomenon even sometimes. So one thing to understand is that as we age, and in fact, as we are born and across the scopes of our lives, we inherit viruses from our parents, chronic viruses sometimes. They're passed, in some cases, in the womb. Plus, we also, over the course of our lives, and this happens with exposures with other people, it happens from our environment, and sometimes the foods and other things that we consume, we accumulate different viruses, and they become part of what is known as our human virome, the viruses in us. And there are many viruses that become a burden in our systems as we just live. These are the herpes viruses, the papillomaviruses, and increasingly a growing number of RNA viruses are also understood to be persistent viruses that are with us for life. Now, what does this virome then do to aging? Well, consider just some of these virome components. This is, you know, sometimes when people are, let's say, in college, they get mono. And people understand that people get sick, they get a sore throat, they don't feel well, they're feverish, they try to avoid people for a while. What really means when you get mono is that you get the Epstein-Barr virus. It's a herpes virus. And that virus stays with you. Again, this is a persistent virus. Once you have it, it does not clear your system. It stays with you for the rest of your life. This is the thing. If someone's immune system is robust, if it's in a good shape, if it's active, what the immune system does technically is keep these chronic viruses like the Epstein-Barr virus, if you get it, and over 90% of people in the world, by the way, harbor Epstein-Barr virus, keeps these viruses in check. It keeps them in a dormant form. The immune system keeps them in a latent form. And that way they technically cannot activate and create more proteins or things that can actually drive disease or potentially aging processes. Now, however, if viruses do become active, if they are moving out of a state of dormancy because something happens to the immune system, and this could be many things, it could be another infection, it could be just exposure to pollutants, to chemicals, to many things that wear down our immune systems over time, viruses can become more active. And one of the things that viruses do when they activate is that they affect our mitochondria directly. In fact, this is one of the key things to understand about viruses, our health and aging, is that viruses are actually obligate intracellular pathogens. And what that means is by definition, they are not even alive. They must, in order to replicate and create new versions of their cells, they must pull the substrates, the backbones to do that, to create new versions of themselves from our human cells, from our mitochondria. So they do this. Every single virus hijacks our mitochondria in order to create new versions of itself and do basically anything that it does. This is a paper that I wrote about this phenomenon with my colleague, neuroscientist at Polybio, Mike Van Elzucker, who also is a neuroscientist at Harvard, we wrote about how pathogens, bacteria, viral, fungal, parasite pathogens even, hijack the metabolism or the mitochondria of the host cells they infect to just gain those basic substrates again to just create new versions of themselves. This is core to what they do. This is a diagram from our paper. This is basically a human mitochondria in the diagram. At the intermediate of that paper, of the figure, you can see the TCA cycle. That's a very important part of just gaining substrates for our own energy metabolism so that we can function, burn glucose, burn other fuels that allow our mitochondria to make us energy producers in a good way. The blue boxes contain different human viruses that are part or can be part of the human viral persistent chronic viruses. And those are just the different parts of metabolic pathways that they hack or hijack as part of their ability to just create new versions of themselves or replicate. Okay, so I mentioned before that inflammation, just chronic inflammation often accelerating over time is also strongly associated with human aging and issues with longevity, for example, in this paper. Now, what inflammation often results from is the activation of immune cells, including cytokines, which become active, and one of the questions is why, but we certainly know that especially cytokine immune which become active. And one of the questions is why, but we certainly know that especially cytokine amine cells become active. Now, IL-11 is an example of a cytokine. So it's an inflammatory molecule in the human body that can become more active when things become inflamed. Now, this doesn't seem too surprising then. In this paper, this team showed that inhibiting this inflammatory molecule, the cytokine IL-11, extended mammalian healthspan and lifespan, suggesting that inhibition of some forms of inflammation is helpful in that regard. Now, what though, in the first place, was causing that cytokine IL-11 or interleukin-11 to be active, to be more active than it should be. Well, one of the biggest driving factors, again, that so many viruses and other bacteria or pathogens or parasites do is they activate immune cells as part of their persistence. What happens is the immune system recognizes them, tries to target them or to keep them in check, and in the process becomes more active, more perpetually active. So here is a study showing that that cytokine IL-11 or interleukin-11 is actually stimulated both in animals and in the lab by viruses including respiratory viruses. So these are drivers of inflammation. Most pathogens can be direct drivers of inflammation. Okay now we just have the ability, the viruses and bacteria, but I'm going to focus on viruses here, can basically just hack the signaling pathways that are the heart of our longevity health networks. For example, there are pathways in the human body that control processes associated with cellular senescence. For example, that's the ability of a cell to correctly divide, to correctly grow, not overdo that, not sort of underdo that process, but to do it robustly. There are networks in the human body that scientists have calculated as mattering in terms of human signaling associated with longevity. Well, this team did a network-based analysis, and they uncovered dozens of viruses that encode proteins experimentally demonstrated to interact with proteins associated with these human aging networks, including senescence. So just dozens of viruses and thousands of interactions between these viral proteins to the point where they ended up calling dozens of viruses in the study that they identified age distorters because of the study that they identified age disorders because of the fact that their proteins could have such detrimental or modulatory effects on these aging networks. In other words, their reproduction and their ability to replicate benefits directly from interference with their host's aging processes. And here on this chart are just some of the top viruses that were basically shown to have proteins that interfered with human cellular senescence pathways. There are the herpes viruses, which most of us acquire over the course of our lives, the papillomaviruses. Interestingly, influenza A virus was one of the top drivers of aging. And we never even think about the flu type viruses, which is influenza in an aging capacity, but we probably should a little bit more. So with that in mind, then, this is a final takeaway from that paper. This is what the team concluded. Owing to the considerable number of human viruses, this evolutionary-minded view encourages a reconceptualization of the locus of aging, no longer exclusively focused on our own genetic material, but expanded towards a larger set of genetic entities interacting with our species, such as viruses. So boom, the heart of aging. All right, now what about cognitive decline? What about just direct mechanisms by which viruses or other pathogens can drive cognitive decline? Here is one. This is the team that we work with at Harvard Medical School. They're really cool. They've been using models of a brain in a dish where they actually recreate the neuron structures of a brain in a model or just experiments in mice to show that the Alzheimer's plaque, the amyloid beta plaque that is the plaque that forms in the brains of patients with Alzheimer's disease that defines the disease, actually acts as an antimicrobial peptide or part of the immune response that forms in response to pathogens directly in order to combat them. So basically what happens is there's a virus that gets into the brain tissue model, and then the plaque forms around it as part of the response to the virus. That's what they were showing in this study in response to the herpes viruses. But this team has also shown the same phenomenon with bacterial pathogens and with fungal pathogens. This places infection at the heart of the driving of amyloid plaque in the Alzheimer's brain. Here's another example of a team who's working on the same phenomenon. Van Riesheids' group at Arizona State University. This is an image of cytomegalovirus, which is another herpes virus that many of us just carry with us for life. It's look in the image here, it's concentrated in the microglial immune cells of the brain around the plaques of these cells along with the axons and dendrites of neurons, again, that are inflamed and directly part of the Alzheimer's disease process. So there are a growing number of teams connecting viruses directly to neurodegeneration. Now, this is an interesting study. Okay, what do we do? Well, there's some really low-hanging fruit. No one's even doing anything about this. And this is an example of just easy measures we could take to control the impact of viruses on aging if we made it a priority. This is a team in Taiwan. And what they did is they tracked people over time, some of whom were given just affordable herpes virus existing generic medications. So for example, let's say someone gets genital herpes, they're given Valchex. It's just an over-the-counter herpes, anti-herpes virus drug. So some of the people in the study were given more of those anti-herpes virus drugs and some weren't. When they looked at the group that was given these anti-herpetic medications, they had a much lower risk of dementia than the people who didn't. In fact, up to a 10 times lower risk of dementia. So really, it's extremely low hanging fruit to maybe start to use some of the drugs that we have to inhibit viral activity in the context of human aging. Now, what about in COVID, long COVID? Now, I know we're all a little burned out on COVID, but really, part of what our group does is study still the chronic consequences of SARS-CoV-2. We have to. It's because it's one more virus that is one of these players that can contribute to chronic disease and unfortunately aging processes. And you'll hear about long COVID and it sounds like a vague phenomenon when you hear about it in the news often. Really, it's not though. A lot of us that are directly studying long COVID realize that the persistence of the SARS-CoV-2 virus in tissue in the human body over time, in other words, SARS-CoV-2 potentially becoming just another member of the human virome, is happening in at least a decent number of people with long COVID. And here's a paper that a group of us of long COVID researchers wrote about SARS-CoV-2 persistence as a driver of post-COVID symptoms. Here's an example of a team that we work with. This is in the bottom right, gut tissue from the lining of the gut collected from someone almost in one case over two years after they got COVID. And in this case, the person did have symptoms. They had chronic symptoms. But still, what you're seeing there in the purple, that pink part is the SARS-CoV-2 virus still there in the gut tissue after over two years, sort of embedded there with immune cells around it, clustered, preventing it potentially from being cleared. So it's there in a persistent capacity, which means that at least in some people, SARS-CoV-2 may be acting or seems to be acting as a persistent virus that can also contribute to chronic disease and aging processes. In fact, this is a table from one of our papers that just, this is just some, there's some other studies that have shown persistent SARS-CoV-2 up to then over two years or more after initial infection in at least a subset of people. Okay, so then what do we do about this? Well, one of the drugs we're actually looking about in the long COVID world is called CORE, which is treating people with conditions initiated or exacerbated by infection, is we're actually running a trial of rapamycin in patients with long COVID. Rapamycin is an mTOR inhibitor that has, you know, different properties on the immune system. Now, one of the things that's really interesting about rapamycin is that in some studies, at least rapalogs or analogs of rapamycin, drugs similar to it, have been shown to, in a low once a week dose, not in a high dose, in a lower dose, to enhance parts of the immune response that can better control viral infection. So for example, that trial that I showed you gave patients two rapalogs over the course of six weeks, and a couple couple things happened. First, they showed the people taking rapamycin an increase in interferon-induced antiviral gene expression, with interferons being one of the primary molecules or parts of the human response that combats viruses and keeps them down. Also, the people in the rapamycin group, everyone in the trial was given the influenza vaccine, but those who took rapamycin group, everyone in the trial was given the influenza vaccine, but those who took rapamycin had a more robust response to the vaccine. In other words, their immune system seemed to activate more and create more antibodies in response to that vaccine. Also, the participants on rapamycin, even though they just took the drugs for six weeks, reported a lower rate of infection for a full year after being on the rapamycin. This includes respiratory infections, UTIs, though, multiple types of infections, suggesting, again, that rapamycin was helping to control viral activity. And in a related study, the team found that rapamycin in some patients improved T-cell exhaustion. And again, when viruses persist, they tend to knock down T-cell cells and their activity, which are parts of our immune system, making them literally exhausted. And rapamycin was shown to potentially improve that. Again, so rapamycin, we're trialing now to see if it might help patients who have persistent SARS-CoV-2 or other virus problems in long COVID better control those infections. And what this means is there's a you know, a use of rapamycin in terms of potential viral control that is also probably relevant to human aging.", "eventId": "devcon-7", - "slot_start": 1731572400000, - "slot_end": 1731573300000, + "slot_start": 1731573300000, + "slot_end": 1731574200000, "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1sK71lOtl_9Q8SbWOBVtDNhLVBhc--pIc-AxaYE2toIM", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/17eofu9OtkjONNHPpAdEmPg8MIz7E8ahPAxLdRwJsfNY", + "resources_slides": "https://drive.google.com/file/d/13rvMfS2uyuibX_a3uOoFiSmF9Q5yXf5M/view", "speakers": [ - "soham-sankaran" + "amy-proal" ] }, "vector": [ @@ -833676,11 +831347,8 @@ 0, 0, 0, - 6, - 0, - 0, - 0, 0, + 6, 0, 0, 0, @@ -833832,7 +831500,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -833857,7 +831524,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -833972,7 +831638,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -834290,13 +831955,14 @@ 2, 0, 0, - 2, 0, 0, 0, 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -834308,46 +831974,38 @@ }, { "session": { - "id": "verifier-alliance-inside-of-the-contract-verification-pipeline", - "sourceId": "Q3EDF8", - "title": "Verifier Alliance: inside of the contract verification pipeline", - "description": "The talk will guide you through a smart-contract verification process step by step while introducing some technical details and challenges verification services have to handle. Will describe what we have learned building \"Verifier Alliance\" - a new collective that unites different verification providers to have an open and shared database of smart contracts (verifieralliance.org).", - "track": "Developer Experience", + "id": "visions-of-a-viable-dacc-biosafety-strategy", + "sourceId": "7VDGQM", + "title": "Visions of a Viable d/acc Biosafety Strategy", + "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Developer", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "DevEx", - "verification", - "contracts", - "DevEx" - ], - "keywords": [ - "Contract", - "Verification" - ], - "duration": 519, + "tags": [], + "keywords": [], + "duration": 610, "language": "en", - "sources_swarmHash": "69a3730194c47cec0c3005a9eed1c4f8dd9c959160dc3ba772e0007bc7847a61", - "sources_youtubeId": "2U4Wad2ebwI", + "sources_swarmHash": "101751999983d5d26371cafb5bff3f14e153bee0637bd0281e2bbc514d97dd38", + "sources_youtubeId": "oOsKEwKD-jE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6735ab179dbb7a90e1a340ab", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ab179dbb7a90e1a340ab.vtt", + "transcript_text": " Okay, so welcome to DEAC Biodefense. So I think I'll start off by just briefly recapping how the hell I even got here. Because basically, I started off being the Ethereum guy, and I'm historically not a bio person and more of a math and computer and economics person. I was definitely interested in longevity almost since the beginning. I read Aubrey de Grey's book, Ending Aging, back when I was 13, and that just really impressed me, both the sheer importance of solving aging, but also the fact that there is a roadmap to actually get us there. And so I was excited about longevity the whole time ever since. But then for DEAC Biodefense in particular, in 2021, there was this big meme coin mania that I'm sure you guys all remember. So first in 2016, there was Dogecoin, and Dogecoin went up really high. And then during the 2020-21 bubble, a bunch of people basically started like copycatting Dogecoin and making their own dog-themed coins and hoping that they'll go up to. And so some of these people decided as a strategy that they would put a large portion of their supply into my wallet balance and then publicly say Vitalik Buterin supports this coin, even though I never touched it. And some of these coins just like went up way higher than they should. And so one of those big coins was called Chiba Inu. There were also others. And I sold a bunch of them. And I donated some to a couple of organizations, one of which is Crypto Relief India. So at the time in India, there was this big COVID crisis that was happening. Lots of people were getting sick. It looked very bad from outside. It looked like no one was really caring about the situation, so I came in and donated to this group that originally Baljie introduced me to. First I donated a little bit, and I donated a amount of Shiba tokens that I thought would be worth a few million dollars at most, but then they ended up managing to cash out like literally $470 million dollars and so after that we yeah so 470 million ended up being like more than crypto and relief india needs for its whole roadmap and i was also started researching and understanding COVID and pandemic-related topics more heavily. And Balvi, between myself and some of my science advisors, basically slowly formed and ended up spending a significant chunk of that meme coin funding on basically moonshot topics that deal with all kinds of anti-pandemic related efforts in general. So biodefense is important, right? So compared to the natural environment, population density is way higher than before. Urbanization continues to increase. We have very easy worldwide air travel. In the best case scenarios, we might even have like rockets that will take us from New York to Bangkok in one hour but I mean Eli Dorado told me that like that's probably not going to be commercialized anytime soon because rockets are a little too dangerous but we have supersonic jets they might take us there in like four or five hours and so I expect air travel to just to be even more ubiquitous. Factory farming creates lots of pandemics, man-made pandemics so gain-of-function research", "eventId": "devcon-7", - "slot_start": 1731472800000, - "slot_end": 1731473400000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1WNKyHeXOwkXmvaf0GIGfAtO5R7MQYyUbdRwxgk23ZzQ", - "resources_slides": null, + "slot_start": 1731567600000, + "slot_end": 1731568200000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1yGQBHJnRzdZfi9mog9ipmc0zYrZB2nzXpEG4mGwCGko", + "resources_slides": "", "speakers": [ - "rim-rakhimov" + "vitalik-buterin" ] }, "vector": [ - 0, - 0, 0, 6, 0, @@ -834541,6 +832199,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -835051,7 +832710,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -835127,7 +832785,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -835360,7 +833017,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -835516,7 +833172,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -835660,13 +833315,13 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, + 2, 0, 0, - 2, 0, 0, 0, @@ -835682,41 +833337,47 @@ }, { "session": { - "id": "verkle-integration-in-reth", - "sourceId": "T8LKTM", - "title": "Verkle integration in reth", - "description": "This talk concerns the presentation of EPF Project: Verkle integration in reth.\r\nThe project comprised of replacing the current state-commitment structure in reth with verkle tries and other modifications for statelessness, including implementing EIPs such as EIP-4762: Statelessness gas cost changes (to REVM), EIP-6800: Ethereum State using a unified verkle trie, EIP-7709: Read BLOCKHASH from storage and update cost, and passing the associated execution-spec-test vectors designed for these EIPs.", - "track": "[CLS] EPF Day", + "id": "visual-code-of-cypherpunk-and-lessons-from-subcultural-aesthetics-we-should-remember-on-the-road-to-mass-adoption", + "sourceId": "ZAYEXK", + "title": "Visual code of cypherpunk, and lessons from subcultural aesthetics we should remember on the road to mass adoption", + "description": "I want to take builders on the turbulent ride through how subcultural and social movements used their visual codes when spreading globally, and what design tasks are still ahead of us on the way to making Ethereum cypherpunk again and onboarding the next billion users to Web3 at the same time.\r\n\r\nThis ride will include three stops:\r\n1. waving one's emotional state into the collective identity\r\n2. using shared aesthetics as a signal of belonging\r\n3. coordinating a collective design process.", + "track": "Cypherpunk & Privacy", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Research", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "EPF", - "Core Protocol", - "Cryptography", - "Verkle trees" + "Coordination", + "Identity", + "Design", + "communication", + "Coordination", + "Design", + "Identity" ], "keywords": [ - "Stateless clients", - "Verge" + "culture", + "aesthetics", + "communication" ], - "duration": 871, + "duration": 758, "language": "en", - "sources_swarmHash": "4c3add1def5321ff44e6a128ca79299c6b8f9c3c4c274d2d79412d0bcf266853", - "sources_youtubeId": "7tOXl-C21CQ", + "sources_swarmHash": "3dd85d3f006a11a867fed6fa39a4901b593753cc0b383f9af7098ac9914b54ce", + "sources_youtubeId": "aUkVqsDW6t4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67342f409dbb7a90e1c9090f", + "sources_streamethId": "67388bae1b0f83434d2b7c95", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67388bae1b0f83434d2b7c95.vtt", + "transcript_text": " Let's see in the wipe. So we all want mass adoption for our products and we want this products to be loved and used by many. And yet we have one heavy anchor that will always hold us back from mass adoption – a visual language or cypherpunk. Let me show you what's happening. Visual language consists of two core layers. The symbols – they carry meaning and cultural baggage. And the styles – Styles evoke emotions. They always go together when it comes to how we respond to visual messaging. A positive symbol in a positive style evokes joy. A positive symbol in a negative style makes rather negative impact. Think about toys in the horror movies. A negative symbol in a positive style can cause confusion, but rather it brings amusement. And think about Halloween. And a negative symbol in a negative style evokes nothing like fear and disgust. People try to distance themselves from everything that lives in this quarter. Well, adoption lives there. How do you think where cyberpunk is? Let's see. The first symbol, an anonymity mask, traces back to guy folks who earned a reputation of a failed terrorist and actually never fought for democracy. Well, Alan Moore tried to recover this image in the comics V for Vendetta, making it a symbol of vigilantism, but later Hollywood successfully shifted the message, adding a narrative with a fear, creating taglines, and a lot of blood. Today, it's a symbol of cypher funks. Now, here you already see a second symbol, hoods, that was originally associated with monks, the knowledge keepers, which is a good beginning. But later, they became a symbol of secret societies, many of which practice ritual murder and dark magic, if such a thing exists. And recently Hollywood dressed all the assassins in hoods, adding a nice layer of death and blood to the overall image. Today hoods is a symbol of cypherpunks. Well, here you already see the next symbol, which is a common line romance, which again by Hollywood, in a form of digital reign, together with the techno style, presented technology as a global threat to everyday life. And the last style I would like to mention is glitch effect, which was a way for early internet artists to present, to visualize a system vulnerability and the beauty of breaking it. But for normal people, the glitch effect evokes the same feeling as breaking glass. Now, how do you think where on this matrix cipherfunks are? Well, fortunately, we have a century of subcultural movements that can teach us how to ease this tension between our heritage and the past and the mass adoption needs. So here are three main lessons I want cypherpunks to hear. First, choose joy as a strategy. Cypherpunk looked like it was designed for post-apocalypse. But privacy should not look like paranoia. It should look like freedom. Just look how pride movement turned the protest into a celebration. They didn't promote a better hiding spaces. They made visibility powerful and joyful itself. And you may say, meh, rainbows. But people love rainbows. Ethereum love rainbows. So we already want this joy. Next lesson, aim for simplicity. Cyberpunk visuals require technical tools to reproduce them. But if your grandma and even you cannot draw the symbol with anything that you have in your hands right now, it will not spread. Extinction Rebellion teaches us how to do. They created a symbol which a child could draw in seconds and every human could reproduce with anything they have around. They later gave this symbol to communities and communities style it with their own wipes to represent their emotions around the movement and the movement went global. And last example, embrace evolution. Black panthers used powerful yet very aggressive imagery, usually containing weapons. Because of this aggression, the movement didn't achieve the reach they hoped for, but the movement did not die. The new generation learned from the past and took over and moved from displaying weapons to displaying words, from we protect ourselves to we deserve to be here. And they reached millions and more. So if I were here, an incarnation of all the subcultural movements from the past, I would ask our ecosystem to not make Ethereum cypherpunk again, but to make it a new cypherpunk. And maybe, just maybe, it will also have a different name. Thank you. Thank you very much. So we have a bit of time to ask a few questions. Raise your hand if you have any questions. Over here. Who's doing it right? Who can you call out or are there any examples that you've seen this week of different ways to evoke this sentiment in our ecosystem visually? I mean, who is doing the right cypher form style or who is doing the right in general? The visual styling that you've described and the evolution of that expression, have you seen any of it in person? We are here right now. So we have some more questions over there. Hello. Thank you for the talk. So my question is, from the negative examples that you presented, it seems like a lot of them were kind of, you know, serious, tough, masculinity focused. And crypto as an industry is also quite masculine in terms of just people who tend to, you know, run the companies. If we gather up everyone who works in crypto, mostly it's men. uh one of the positive examples that he had was you know the pride flag for example which is something that is quite challenging to traditional masculinity so how much do you think that plays into the current branding issues that crypto experiences thanks well I'm so glad you asked this because like maybe 20 minutes before the talk I just cut off one slide because I needed to fit in five minutes. But after the glitch effect, what I wanted to say is the cypher fund imagery has a very heavy gender coding and is all white male narrative, which is exactly what you tell about. Well, all those movements that I showed, they actually show the equality of everyone. And what I really love in Ethereum here right now in DEF CON is we have more of non-white people. So it's really good that white people are not dominating here. Also, if you study cyberpunk movement itself, there were a lot of regional directions who actually didn't go for this american uh white narrative cyberpunk and actually uh for the original cyberpunk movement hollywood did a lot of bad job for us because as much as we were trying to narrate uh well developers don't do a lot of good job for the visual communication so they did a lot of good job for the visual communication, so they did a lot of good stuff for vision building, but Hollywood knows how to show masses what exactly the government wants to see, and in general, nothing goes on the screen which can harm the government. So even V for Vendetta, the author of V for Vendetta wasn't involved in the movies because he didn't agree with the flatness and bloodness of the movie creation. The original comics had a very deep narrative that you actually need to see where he comes from. Did I answer your question? Thank you. So there's a last question over there. Okay if I may I would like to have like a very small comment. I absolutely do agree with you that the imagery we are using is holding us back and this is like spot-on well job, but the job well done. But none of the images that you showed are actually connected to the original cypherpunk movement because they didn't have any image. It was just a mailing list, a very traditional mailing list that didn't even allow to attach any imagery. The closest they were getting to having any visual identity is when three of the people from the original cypherpunk movement were featured on the cover of Wire magazine and they were wearing masks, but they were very different masks. They have nothing to do with Guy Fawkes and it was not their idea to wear the mask. That's one thing. Second, they used one pretty cool image that became really associated with them, but not with the movement, but one of the campaigns that they were running. In the early 90s, there was this danger that U.S. government will impose a surveillance mechanism on a communication with, like, Clipper project. And what cypherpunks did was, like, basically they hijacked Intel logo, and instead of Intel inside, they put Clipper inside. And this became, like, a sticker that they were distributing in many, many places. And so this was, like, quite successful. But everything else that you showed has absolutely nothing to do with cyberpunks. And one last thing, cyberpunks, the name itself, it's not that they created. They were not considering themselves to be a counter-cultural movement. It was basically a joke from someone who was looking at them and commenting how they are like approaching things. But they were not punkish at all. Absolutely. Yes. And that's how branding works. It's not what you say about you. It's what others say about you. So I encourage you, open your laptop and Google hacker icon and see what you have in dance just google cypher funks and see what google gives you and it's yeah that's the whole point there the world and we know which powers are engineering the reputation of cypher funks which is going to the minds and hearts of people outside of crypto bubble. And the call is not being passive around this, but actually do actions to create the image that we want to have, not what happens because some other very...", "eventId": "devcon-7", - "slot_start": 1731472200000, - "slot_end": 1731473100000, - "slot_roomId": "breakout-1", - "resources_presentation": "https://docs.google.com/presentation/d/1Uq2DzZBnDwPSfrV2xqfm-mlie2DOZZKEwi0Kk44YlQI", - "resources_slides": null, + "slot_start": 1731495000000, + "slot_end": 1731495600000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1JfZtSjos8JrMCOBp9B9xIaU5dMAfVMzayGYW7eA5F7Q", + "resources_slides": "https://drive.google.com/file/d/1on4DVwDiZF20dNebVIDLXMFlMb70govU/view", "speakers": [ - "aditya-gupta" + "ira-nezhynska" ] }, "vector": [ @@ -835725,6 +833386,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -835735,7 +833397,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -836484,13 +834145,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -836519,6 +834178,8 @@ 0, 0, 0, + 2, + 0, 0, 0, 0, @@ -836611,6 +834272,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -836622,6 +834284,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -836664,7 +834327,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -836731,6 +834393,14 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -836778,18 +834448,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -837039,11 +834697,9 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, + 2, 0, 0, 0, @@ -837056,43 +834712,53 @@ }, { "session": { - "id": "viruses-and-chronic-aging-building-a-research-community", - "sourceId": "FX8UQF", - "title": "Viruses and Chronic Aging: Building a Research Community", - "description": "Did you know that mitochondrial dysfunction, inflammation, and cognitive decline are directly accelerated by viruses? In fact, the viruses that infect us over a lifetime are technically not even alive, and therefore must “hack” our human cellular metabolism machinery to do anything at all. This talk will overview the first-ever global collaborative network studying & treating chronic viruses as drivers of aging, including how certain lifespan-promoting drugs may help combat viral activity.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Community", + "id": "vlsmsanalyzing-faulty-distributed-systems", + "sourceId": "AKRLKH", + "title": "VLSMs—analyzing faulty distributed systems", + "description": "Validating Labeled State transition and Message production systems (VLSMs) provide a general approach to modeling and verifying faulty distributed systems. With formal definitions of validation and equivocation, we are able to prove that for systems of validators, the impact of Byzantine components is indistinguishable from the effect of the introduction of corresponding equivocating components. All of the results presented in this talk have been formalized and checked in the Coq proof assistant", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Expert", + "audience": "Research", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 996, + "tags": [ + "Consensus", + "Distributed validator technology", + "Formal Verification", + "correct-by-construction", + "Consensus", + "Distributed validator technology", + "Formal Verification" + ], + "keywords": [ + "Correct-by-construction" + ], + "duration": 1787, "language": "en", - "sources_swarmHash": "4aff1867805b00275edb2ba9798287686622d38009dc1a7f1cae7feacc201c23", - "sources_youtubeId": "OcwxbzqP8Jc", + "sources_swarmHash": "faf3bda887c2724dd5bd923f3f360e2226fc675126f2a2e5d499b3311e2a1db3", + "sources_youtubeId": "loyKzWQlyEo", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735bc959dbb7a90e1a2fe20", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735bc959dbb7a90e1a2fe20.vtt", - "transcript_text": " Hi, I'm Amy Prowal. I'm the President and Research Director of PolyBioResearch Foundation, and today I'm truly excited to talk to you about viruses and chronic aging, building a research community. So where am I going with this? First, what I want to talk about is viruses as drivers of aging processes. This is really not discussed as understood as it should be. And there's so many compelling examples of how viruses can contribute to chronic aging processes. This is really important for the longevity community and for people who are just dealing with many forms of chronic disease. If we understand these interactions, we can better figure out how to combat them and how to best create strategies to mitigate viral effects on aging. So let me give you some examples of viruses and how they contribute to aging processes. So there's definitely many drivers of human aging, but here are some of the three main factors. Mitochondrial dysfunction, these are the just energy powerhouses of our cells, and they become dysfunctional as we age, often. Inflammating, which is basically just the fact that over time, as people get older, tend to have more chronic inflammation, more immune cells that are just activated in an unproductive way. That's called inflammation. And cognitive decline, obviously. People who are getting older and aging have memory problems. They have brain, you know, dementia, even post-Alzheimer's type phenomenon even sometimes. So one thing to understand is that as we age, and in fact, as we are born and across the scopes of our lives, we inherit viruses from our parents, chronic viruses sometimes. They're passed, in some cases, in the womb. Plus, we also, over the course of our lives, and this happens with exposures with other people, it happens from our environment, and sometimes the foods and other things that we consume, we accumulate different viruses, and they become part of what is known as our human virome, the viruses in us. And there are many viruses that become a burden in our systems as we just live. These are the herpes viruses, the papillomaviruses, and increasingly a growing number of RNA viruses are also understood to be persistent viruses that are with us for life. Now, what does this virome then do to aging? Well, consider just some of these virome components. This is, you know, sometimes when people are, let's say, in college, they get mono. And people understand that people get sick, they get a sore throat, they don't feel well, they're feverish, they try to avoid people for a while. What really means when you get mono is that you get the Epstein-Barr virus. It's a herpes virus. And that virus stays with you. Again, this is a persistent virus. Once you have it, it does not clear your system. It stays with you for the rest of your life. This is the thing. If someone's immune system is robust, if it's in a good shape, if it's active, what the immune system does technically is keep these chronic viruses like the Epstein-Barr virus, if you get it, and over 90% of people in the world, by the way, harbor Epstein-Barr virus, keeps these viruses in check. It keeps them in a dormant form. The immune system keeps them in a latent form. And that way they technically cannot activate and create more proteins or things that can actually drive disease or potentially aging processes. Now, however, if viruses do become active, if they are moving out of a state of dormancy because something happens to the immune system, and this could be many things, it could be another infection, it could be just exposure to pollutants, to chemicals, to many things that wear down our immune systems over time, viruses can become more active. And one of the things that viruses do when they activate is that they affect our mitochondria directly. In fact, this is one of the key things to understand about viruses, our health and aging, is that viruses are actually obligate intracellular pathogens. And what that means is by definition, they are not even alive. They must, in order to replicate and create new versions of their cells, they must pull the substrates, the backbones to do that, to create new versions of themselves from our human cells, from our mitochondria. So they do this. Every single virus hijacks our mitochondria in order to create new versions of itself and do basically anything that it does. This is a paper that I wrote about this phenomenon with my colleague, neuroscientist at Polybio, Mike Van Elzucker, who also is a neuroscientist at Harvard, we wrote about how pathogens, bacteria, viral, fungal, parasite pathogens even, hijack the metabolism or the mitochondria of the host cells they infect to just gain those basic substrates again to just create new versions of themselves. This is core to what they do. This is a diagram from our paper. This is basically a human mitochondria in the diagram. At the intermediate of that paper, of the figure, you can see the TCA cycle. That's a very important part of just gaining substrates for our own energy metabolism so that we can function, burn glucose, burn other fuels that allow our mitochondria to make us energy producers in a good way. The blue boxes contain different human viruses that are part or can be part of the human viral persistent chronic viruses. And those are just the different parts of metabolic pathways that they hack or hijack as part of their ability to just create new versions of themselves or replicate. Okay, so I mentioned before that inflammation, just chronic inflammation often accelerating over time is also strongly associated with human aging and issues with longevity, for example, in this paper. Now, what inflammation often results from is the activation of immune cells, including cytokines, which become active, and one of the questions is why, but we certainly know that especially cytokine immune which become active. And one of the questions is why, but we certainly know that especially cytokine amine cells become active. Now, IL-11 is an example of a cytokine. So it's an inflammatory molecule in the human body that can become more active when things become inflamed. Now, this doesn't seem too surprising then. In this paper, this team showed that inhibiting this inflammatory molecule, the cytokine IL-11, extended mammalian healthspan and lifespan, suggesting that inhibition of some forms of inflammation is helpful in that regard. Now, what though, in the first place, was causing that cytokine IL-11 or interleukin-11 to be active, to be more active than it should be. Well, one of the biggest driving factors, again, that so many viruses and other bacteria or pathogens or parasites do is they activate immune cells as part of their persistence. What happens is the immune system recognizes them, tries to target them or to keep them in check, and in the process becomes more active, more perpetually active. So here is a study showing that that cytokine IL-11 or interleukin-11 is actually stimulated both in animals and in the lab by viruses including respiratory viruses. So these are drivers of inflammation. Most pathogens can be direct drivers of inflammation. Okay now we just have the ability, the viruses and bacteria, but I'm going to focus on viruses here, can basically just hack the signaling pathways that are the heart of our longevity health networks. For example, there are pathways in the human body that control processes associated with cellular senescence. For example, that's the ability of a cell to correctly divide, to correctly grow, not overdo that, not sort of underdo that process, but to do it robustly. There are networks in the human body that scientists have calculated as mattering in terms of human signaling associated with longevity. Well, this team did a network-based analysis, and they uncovered dozens of viruses that encode proteins experimentally demonstrated to interact with proteins associated with these human aging networks, including senescence. So just dozens of viruses and thousands of interactions between these viral proteins to the point where they ended up calling dozens of viruses in the study that they identified age distorters because of the study that they identified age disorders because of the fact that their proteins could have such detrimental or modulatory effects on these aging networks. In other words, their reproduction and their ability to replicate benefits directly from interference with their host's aging processes. And here on this chart are just some of the top viruses that were basically shown to have proteins that interfered with human cellular senescence pathways. There are the herpes viruses, which most of us acquire over the course of our lives, the papillomaviruses. Interestingly, influenza A virus was one of the top drivers of aging. And we never even think about the flu type viruses, which is influenza in an aging capacity, but we probably should a little bit more. So with that in mind, then, this is a final takeaway from that paper. This is what the team concluded. Owing to the considerable number of human viruses, this evolutionary-minded view encourages a reconceptualization of the locus of aging, no longer exclusively focused on our own genetic material, but expanded towards a larger set of genetic entities interacting with our species, such as viruses. So boom, the heart of aging. All right, now what about cognitive decline? What about just direct mechanisms by which viruses or other pathogens can drive cognitive decline? Here is one. This is the team that we work with at Harvard Medical School. They're really cool. They've been using models of a brain in a dish where they actually recreate the neuron structures of a brain in a model or just experiments in mice to show that the Alzheimer's plaque, the amyloid beta plaque that is the plaque that forms in the brains of patients with Alzheimer's disease that defines the disease, actually acts as an antimicrobial peptide or part of the immune response that forms in response to pathogens directly in order to combat them. So basically what happens is there's a virus that gets into the brain tissue model, and then the plaque forms around it as part of the response to the virus. That's what they were showing in this study in response to the herpes viruses. But this team has also shown the same phenomenon with bacterial pathogens and with fungal pathogens. This places infection at the heart of the driving of amyloid plaque in the Alzheimer's brain. Here's another example of a team who's working on the same phenomenon. Van Riesheids' group at Arizona State University. This is an image of cytomegalovirus, which is another herpes virus that many of us just carry with us for life. It's look in the image here, it's concentrated in the microglial immune cells of the brain around the plaques of these cells along with the axons and dendrites of neurons, again, that are inflamed and directly part of the Alzheimer's disease process. So there are a growing number of teams connecting viruses directly to neurodegeneration. Now, this is an interesting study. Okay, what do we do? Well, there's some really low-hanging fruit. No one's even doing anything about this. And this is an example of just easy measures we could take to control the impact of viruses on aging if we made it a priority. This is a team in Taiwan. And what they did is they tracked people over time, some of whom were given just affordable herpes virus existing generic medications. So for example, let's say someone gets genital herpes, they're given Valchex. It's just an over-the-counter herpes, anti-herpes virus drug. So some of the people in the study were given more of those anti-herpes virus drugs and some weren't. When they looked at the group that was given these anti-herpetic medications, they had a much lower risk of dementia than the people who didn't. In fact, up to a 10 times lower risk of dementia. So really, it's extremely low hanging fruit to maybe start to use some of the drugs that we have to inhibit viral activity in the context of human aging. Now, what about in COVID, long COVID? Now, I know we're all a little burned out on COVID, but really, part of what our group does is study still the chronic consequences of SARS-CoV-2. We have to. It's because it's one more virus that is one of these players that can contribute to chronic disease and unfortunately aging processes. And you'll hear about long COVID and it sounds like a vague phenomenon when you hear about it in the news often. Really, it's not though. A lot of us that are directly studying long COVID realize that the persistence of the SARS-CoV-2 virus in tissue in the human body over time, in other words, SARS-CoV-2 potentially becoming just another member of the human virome, is happening in at least a decent number of people with long COVID. And here's a paper that a group of us of long COVID researchers wrote about SARS-CoV-2 persistence as a driver of post-COVID symptoms. Here's an example of a team that we work with. This is in the bottom right, gut tissue from the lining of the gut collected from someone almost in one case over two years after they got COVID. And in this case, the person did have symptoms. They had chronic symptoms. But still, what you're seeing there in the purple, that pink part is the SARS-CoV-2 virus still there in the gut tissue after over two years, sort of embedded there with immune cells around it, clustered, preventing it potentially from being cleared. So it's there in a persistent capacity, which means that at least in some people, SARS-CoV-2 may be acting or seems to be acting as a persistent virus that can also contribute to chronic disease and aging processes. In fact, this is a table from one of our papers that just, this is just some, there's some other studies that have shown persistent SARS-CoV-2 up to then over two years or more after initial infection in at least a subset of people. Okay, so then what do we do about this? Well, one of the drugs we're actually looking about in the long COVID world is called CORE, which is treating people with conditions initiated or exacerbated by infection, is we're actually running a trial of rapamycin in patients with long COVID. Rapamycin is an mTOR inhibitor that has, you know, different properties on the immune system. Now, one of the things that's really interesting about rapamycin is that in some studies, at least rapalogs or analogs of rapamycin, drugs similar to it, have been shown to, in a low once a week dose, not in a high dose, in a lower dose, to enhance parts of the immune response that can better control viral infection. So for example, that trial that I showed you gave patients two rapalogs over the course of six weeks, and a couple couple things happened. First, they showed the people taking rapamycin an increase in interferon-induced antiviral gene expression, with interferons being one of the primary molecules or parts of the human response that combats viruses and keeps them down. Also, the people in the rapamycin group, everyone in the trial was given the influenza vaccine, but those who took rapamycin group, everyone in the trial was given the influenza vaccine, but those who took rapamycin had a more robust response to the vaccine. In other words, their immune system seemed to activate more and create more antibodies in response to that vaccine. Also, the participants on rapamycin, even though they just took the drugs for six weeks, reported a lower rate of infection for a full year after being on the rapamycin. This includes respiratory infections, UTIs, though, multiple types of infections, suggesting, again, that rapamycin was helping to control viral activity. And in a related study, the team found that rapamycin in some patients improved T-cell exhaustion. And again, when viruses persist, they tend to knock down T-cell cells and their activity, which are parts of our immune system, making them literally exhausted. And rapamycin was shown to potentially improve that. Again, so rapamycin, we're trialing now to see if it might help patients who have persistent SARS-CoV-2 or other virus problems in long COVID better control those infections. And what this means is there's a you know, a use of rapamycin in terms of potential viral control that is also probably relevant to human aging.", + "sources_streamethId": "67358e2b9dbb7a90e1a57339", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67358e2b9dbb7a90e1a57339.vtt", + "transcript_text": " Testing, testing. Hi, everyone. Hi, hi. Yeah, I'll take this mic, actually. Thank you for coming to this early morning talk. It's going to be a talk on formal verification and like some basic methods in distributed systems and reasoning about faulty distributed systems. All of the, everything presented here has been formally verified by the team at Runtime Verification and is, like, available to, like, click and check. And you can look at the proofs in the talk, but this talk is not going to be focused on the proofs, more just definitions and theorems and not really walking through the proofs. But you can check them out. And there's a... So I've separated a bunch of sections, a section on validation theory, a section on equivocation theory, and then on relating and reducing Byzantine faults to equivocation faults in the context of validators. So let's kick it off. So let's talk about validation. And so here's the slides where I guess you'll... So this is the name of the paper. Validating labeled state transition message production systems. So it's for modeling distributed systems, faulty distributed systems, and all these amazing people worked on it for a long time. Here you can scan this QR code and pull up the PDF if you like. I'll show this also again later. So here's the validation theory section outline. Basically, we're going to go through the definition of this model, its compositions, and then the definition of validator, and then move on to the equivocation section. So here's the first definition here. So a VLSM is a tuple, you know, as we often like to define these things. It's sort of like a state transition system like you're normally used to, except for it has a few other things, like a label, which is also not too unconventional. But it has a first order message set in the definition. It has initial states and initial messages. And then there's a transition function that takes labels, states, and messages, optional messages, and gives us states and optional messages. This is like the state transition slash message receipt and production function that describes for a particular VLSM, sort of what's happening computationally, you can imagine. And then they're also equipped with another thing, which is why they're called validating, this beta, which is a validity condition. And it basically says, it's going to be valid to transition on a label from a state given a message. So when you're in a particular state and you receive a message and you might want to transition, there might be multiple possible transitions each identified by a label. And some of them might be basically banned. Even though they're defined by the transition, the validity condition won't let you take that transition. So this is like a state transition system with native messages, with initial state messages, and with a transition function that's totally defined over this domain. And then we sort of restrict it, effectively making it partial with this validity condition. This is like the validation condition. Basically, we're going to imagine that these things don't transition, even though the transition is defined, when the validation condition is not satisfied. And so that's the definition, but actually, I haven't told you how to get the states and messages, but it's sort of what you would expect. You start at the initial states and the initial messages, and then we build up this fixed point by taking the union of all the states that you get from the transitions and all the messages that you get from the transitions and transitioning from those states using those messages. So basically starting from the initial states and messages, transitioning and sending all the messages that are produced from there and receiving them and doing it over and over again, basically, until you even get stuff like a node in an early state receiving a message from some other state. This really is a big fix point. And so there is an interesting thing that can sort of happen, which is the validity condition can be satisfied even when an input message is invalid. So we have to slightly distinguish between just the validity condition being satisfied and the actual trace being valid. The trace is valid only if all the input messages are also valid. So, you know, no garbage in sort of allowed in a valid message. So that's basically the definition of VLSM. And then they have a pretty natural way to compose them. And so, now I'm going to go through that definition. Unless someone has, like, a question now before the composition. Okay. So, basically, what we're going to do is we're going to take a disjoint sum of the components for the label. So we identify the label, which component it is. The state is a tuple of the component states. The initial states is a tuple of the initial states. And then we have like a union of the messages for the initial messages. And then here, actually, we're composing of VLSMs that have the same message type. This is just so that all our transitions are defined and everything. And to let them send messages to and from each other, so they're not just independently. Like if that was a disjoint union, they wouldn't be communicating. So we have a disjoint union for the labels, tuple for the state, and then a regular union for the messages. And then in the free composition, we have a transition that basically just affects only one component exactly according to the transition that would have had before the composition and checks the validity condition only of that component exactly like it was before. So very little, basically nothing being done by the composition except for transitioning the individual components and checking their individual composition, sorry, their individual validity conditions individually. There's no composition-wise constraints, but they can message each other in this thing. And then, you know, note also that this is also VLSM, and that's why it's like sort of a composable model, you know, same type of definition and everything. Yeah? So the split up of the transition functions and the validity where they are purely mathematical? So the question is, is the split up between the transition and the validity purely mathematical? I mean, I guess it's for the sake of convenience when dealing with the math and... So, but, I mean, more more traditionally in math you'd use like a partial function I guess whereas here we're about to get into this conversation about validation and distributed systems and what can happen in a distributed systems that you might not be able to tell locally about and so it gets there's there there is a reason why we are thinking about the validation at different levels. And actually, and that actually does sort of spell it out. Basically, there's going to be, actually, the next slide, we're going to apply, we're going to talk about constraint compositions where there's an additional constraint on the, so basically, like, this constraint composition just basically conjuncts a constraint on top of the, uh, validity, on top of the validity constraint of the free composition, which just is the individual constraints applied independently. So this, this, this composition constraint, um, you know, lets us sort of analyze, uh, things a little bit more conveniently than just a partial function approach. I guess you want to be able to see that a transition would be possible, but it's not there. Yes, so the question is if we're doing this to try to see if a transaction is possible but not valid. And yeah, you're very much going in the right direction. And we're getting there to try to see if a transaction is possible but not valid, and yeah, you're very much going in the right direction. And we're getting there with this definition here. So, this is the definition of validator. It's a very natural, simple definition of validator, which is kind of useful in many different contexts. So, in a... And so, the components are a validator for the composition basically they're checking if they're truly a part of that composition and they're sort of you know only transitioning as if they are a part of that composition even though they don't know it per se so let me just go through this definition so basically a component in a constrained composition is a validator. If any transition that that component can make can be lifted to a valid transition in that composition. So if the component has a valid transition, then if a validator has a transition, which it can take, then there's also a transition in the composite system where that validator can take that transition. It sounds kind of... It sounds weird, but basically it's not. The local condition lifts to a distributed one. And so basically, this local component is checking about a condition that's distributed across the whole composition. And that's sort of non-trivial because of information disparity between the nodes. So there's a message here being received, and this is the message being sent, and this is the label, and this is a constrained transition, which doesn't necessarily mean that M is valid. However, it's lifted to a valid transition with M being received. So basically, the validity condition of the component is enough to guarantee the validity of the message received in the composition. So basically the component locally is able to verify whether the message has this distributed property. And that's why it's called the validator. Because it's basically able to check something that's outside of its scope. And it's again defined here with respect to a particular composition and a particular constraint on that composition. So you might have different validators for lots of different distributed settings. But we're specifically going to be interested and focused on equivocation for a reason that I've already talked about, but it sort of reveals at the end why equivocation is so particularly interesting to look at. So the equivocation theory section, talk about evidence, and then using evidence to describe compensation constraints that limit and validity conditions that limit equivocation. And then we'll talk about models of equivocation. And then all this will tie in nicely when we start talking about Byzantine faults. So this is sort of what we're used to seeing in blockchains when it comes to slashing conditions. This is a starting point, or was a starting point in our proof of stake research. Basically, when messages have the same sender, they've been, they're like collected by the same node, or like in the same smart contract, you can imagine. And they basically could not have possibly been produced by the sender in a single round of the protocol. So if you run a trace of these things, there isn't a single trace where those two messages are produced by that node. And so this is evidence of equivocation this is somehow we have two messages that couldn't have been produced by their sender and we have them sort of in the same state this is sort of a sort of faulty behavior and this is a local evidence and here's an interesting definition. Yeah, yeah, of course. Sorry. If we don't have a history, how do we check that? What? Yes. So that's a good question. I mean, I think it's undecidable in general, but the question is whether it could not have been possibly produced. So basically you need to sort of quantify where all traces and say there is no trace where these two messages can be produced. So in practice, you know, we have lots of simplifying assumptions like you're guessing, you know, we have lots of simplifying assumptions like you're guessing, you know? But the definition doesn't say how, you know, how we can come to this decision about whether a message could have been produced. You know, it's okay. That's actually another nice thing about having the validity conditions as sort of like predicates. You can have undefined, sorry, undecidable conditions, whereas, you know, if you're using partial functions, that would be an issue. So there's global evidence is a little bit more interesting and a little bit more, maybe a little more decidable, right? Because you, in this, we have like a sort of global view of the trace. So, we can basically check that So... Yeah, so... So this is getting into some later content that I was hoping to, I think I've slightly misordered this. But anyways, if you have a God's eye view of a VLSM trace, and you have a message that wasn't sent by a component, but it was received by some component, that's an equivocation. Sorry. Denise, can you go ahead? No? But I think it would be like a equivocation or a thought-form equivocation. Mm-hmm. Yeah. So here's a theorem, right? That the local equivocation is always going to be less than the global equivocation. And all these are checked in the theorem provers, but you can sort of imagine why that is. And basically, we can use these global and local definitions of equivocations to limit the equivocations to create basically a composition constraint where the faults are limited. We can easily just say, okay, well, there shouldn't be any equivocation and talk about DSM traces where there aren't any equivocations. And we also use the full node assumption to reduce the amount of equivocations because then you can only sort of get an equivocation from the sender of a message because you've already received all of its dependencies. We can limit equivocations to just a subset. And we can also assign weights to the nodes and then limit the equivocations by their total weight. These are, like, example conditions in composition constraints or a local constraint. So this is a composition constraint for a validator on a global constraint that looks like this. And so, like, in this particular example, this validator is just checking the local equivocation weight and if it's less than T when the composition constraint is checking the global equivocation. And the validator property is basically that from the local one, there should be a state where the global one is also satisfied. Basically, the lifting property of the valid state from the local to the distributed property. So, you know, this was talking about basically what equivocation looks like and how to detect it and therefore how to talk about, you know, non-constructively traces that have limited equivocation. But we do have a very nice constructive sort of approach to where we can describe equivocators and basically there's two models for equivocation. There's a state equivocator, which basically splits its current state up or has many states for the same validator. And it can do that by forking or by starting new machines. And it also has... And there's also the message equivocation model, where instead of the state splitting and having multiple copies of a validator, validators can receive messages that haven't been sent. And sort of this sort of is what we're observing in that definition of global equivocation. And it turns out that these two things are equivalent, actually. The traces that you can get from the state equivocations and the message equivocations are the same. Whether you are receiving messages that haven't been sent or splitting up states, if you project down to those equivoc up states, if you, like, project down to those equivocator states, we get exactly the same traces. And it's kind of interesting, basically, like, splitting a timeline and communicating across timelines end up producing exactly the same states. And so, these two are models of the same phenomenon equivocation. And that's why we have those two definitions there where one of them seems a little bit different than the other. You know, somehow two messages that couldn't have been produced in a single trace evokes a state equivocation, and a message that hasn't been sent yet to being received evokes the message equivocation. But they are equivalent. So that's a pretty cool result that's going to be useful later. But basically, to repeat it, the models of equivocation that split the state and models of equivocation that allow communication from other traces lead to the same traces for validators for a limited equivocation. So that means that when you have evidence of equivocation being produced, you can produce that evidence either with state equivocation or message equivocation, and you get exactly the same state, exactly the same evidence. Great. So that's the first two sections. Any questions before the next one? Excuse me. So here we go. Yeah, please. Your microphone is off, sir. Can you try it? I guess in the previous discussion, you kind of assumed finite branching, which means that you cannot make infinitely many copies at the same time. No, we have an unbounded, we have like a, a list, like, unbounded list of copies. Okay, but still finite, right? Yeah, finite, but finite unbounded, yeah. Yeah, because when it comes to infinite messages and states... Yeah, that's a good question. I think we have possibly infinite... traces, but not states and messages? At the moment. Yeah, I guess so. Sorry about that. Yeah. We'll get there. Yeah, so now basically we're gonna do, yeah, go ahead. Yeah, please use the mic here. Microphone, microphone. Yeah, please use the mic here. Microphone, microphone. Just hold it closer. Oh, no, never mind. Sorry. Hi. Can you hear me? Yeah, that's great. This is great. Sorry. I think the states can be infinite but not reachable. It's a matter of which are the reachable states. But it matters how many labels you have and that gives you how many moves you can do. But in reality, yes, it's bounded and, yeah. Great. So, let's move to the Byzantine faults. So, basically, we can model Byzantine faults in VLSM by replacing a node with a node that basically has a free behavior that uses labels to send and receive any message at any time. The important behavior is that it can send any message at any time, basically modeling someone that can send any sort of malformed and invalid message at any point. And we do have a little bit of constraints, which is that we don't let them forge messages on other nodes, and we do have a full node assumption. But, you know, they can send any message, you know, signed by them from them, basically, without forged messages inside. And so, we can replace equivocation limited validators with Byzantine components and find that they have exactly the same traces. The ones that aren't replaced have the same traces. So if you have a trace in the equivocation-limited composition, where some set B of validators is Byzantine, they have the same traces as if they're composed with equivocators instead. And basically that's because of the validator property. So if you have a validator property on receiving a message from a Byzantine sender, that means that there is a composite state where that sender, as an equivocator, can validly send that message. Because here we're validating for a limited equivocation setting, so some amount of equivocation is valid in that setting. And so we can replace these Byzantine nodes with equivocating nodes. And then look at the traces of the validators that aren't equivocating and show that they have exactly the same traces. Denise, do you have a question? Okay. And the same result also holds for weight-limited equivocation model. So it's not just for a fixed set, but for under T-limited equivocation weight. All the behaviors of non-equivocating components due to equivocating components is exactly replicated by Byzantine behavior with the same T limit. And so that basically means that under the limited less than T weight equivocation, we get all of the same traces for the validators as limited less than T-weight Byzantine faults. So that's sort of the sort of magical way that we can not use Byzantine fault tolerance. Basically, for these equivocation-limited validators, equivocation faults are exactly as expressive as Byzantine faults because by validating for that limited faulty setting, you know, they're restricting their transitions a lot. And if a Byzantine fault, a Byzantine node can send some malformed message that they receive, that means that that transition can be lifted to a valid state in the composition under the limited T equivocation condition, which means that there are nodes in the composition distributed that satisfy that less than T threshold, but, you know, aren't Byzantine nodes, but equivocation nodes. And then, you know, like putting those transitions together to get traces, we can rebuild exactly the same traces. And so basically this forms an alternative for analyzing faulty distributed systems to Byzantine fault tolerance. And quite simply, you know, by studying equivocation limiting and equivocation faults instead of, and equivocation faults instead of Byzantine faults. So somehow equivocation faults are like a special kind of fault where if you validate for limiting equivocation, that's just as good as validating for limited Byzantine faults. Oh, sorry. That's just as good as, sorry, that actually lets you throw out Byzantine fault tolerance analysis altogether when thinking about, like, what traces you could go to. You can sort of just go to the protocol-defined ones, and it doesn't really matter what the Byzantine nodes do. They're basically just protocol- equivocators as far as the analyst is concerned. And so instead of having misbehaving nodes, they just have either like a state replicator or a message passer that sort of crosses timelines. Which is sort of much more tamed types and well defined behavior. So later we're gonna relax the full node assumption and treat synchronization faults. I'm out of time. Thank you so much. Thanks for coming. Really appreciate it. If you have any questions you can find me outside later. Thank you.", "eventId": "devcon-7", - "slot_start": 1731573300000, - "slot_end": 1731574200000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/17eofu9OtkjONNHPpAdEmPg8MIz7E8ahPAxLdRwJsfNY", - "resources_slides": null, + "slot_start": 1731552300000, + "slot_end": 1731554100000, + "slot_roomId": "classroom-d", + "resources_presentation": "https://docs.google.com/presentation/d/1neM1-qHBPiHQ47mw5gGhxKmdlAYMtpZujIccA88zZM8", + "resources_slides": "https://drive.google.com/file/d/1thif2hdl5jczmisfpTqoFwra7_H0YoUg/view", "speakers": [ - "amy-proal" + "vlad-zamfir" ] }, "vector": [ 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -837841,6 +835507,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -838032,6 +835699,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -838295,6 +835963,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -838369,12 +836038,7 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -838404,6 +836068,7 @@ 0, 2, 0, + 2, 0, 0, 0, @@ -838411,10 +836076,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -838424,54 +836085,55 @@ }, { "session": { - "id": "visions-of-a-viable-dacc-biosafety-strategy", - "sourceId": "7VDGQM", - "title": "Visions of a Viable d/acc Biosafety Strategy", - "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "", + "id": "voices-of-tech-and-open-source-movement-across-asia", + "sourceId": "QCPSDK", + "title": "Voices of Tech & Open Source Movement Across Asia", + "description": "This panel discussion features individuals from the open source communities, developer and user groups across Asia. These figures span different decades and have witnessed various phases of the tech movement, including the rise of open source, in their respective countries. Some have been pioneers since the early days, while others have emerged as key players through recent college engagements and grassroots initiatives.", + "track": "Cypherpunk & Privacy", + "type": "Panel", + "expertise": "Beginner", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 610, + "tags": [ + "FOSS", + "regional", + "insights" + ], + "keywords": [ + "FOSS", + "Regional", + "Insights" + ], + "duration": 3310, "language": "en", - "sources_swarmHash": "101751999983d5d26371cafb5bff3f14e153bee0637bd0281e2bbc514d97dd38", - "sources_youtubeId": "oOsKEwKD-jE", + "sources_swarmHash": "61c49c5cdb4bc3d649ccd86731882f2c81639c8d942834f908404b9e8bbc21d7", + "sources_youtubeId": "TsI7-ejb_Ig", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735ab179dbb7a90e1a340ab", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735ab179dbb7a90e1a340ab.vtt", - "transcript_text": " Okay, so welcome to DEAC Biodefense. So I think I'll start off by just briefly recapping how the hell I even got here. Because basically, I started off being the Ethereum guy, and I'm historically not a bio person and more of a math and computer and economics person. I was definitely interested in longevity almost since the beginning. I read Aubrey de Grey's book, Ending Aging, back when I was 13, and that just really impressed me, both the sheer importance of solving aging, but also the fact that there is a roadmap to actually get us there. And so I was excited about longevity the whole time ever since. But then for DEAC Biodefense in particular, in 2021, there was this big meme coin mania that I'm sure you guys all remember. So first in 2016, there was Dogecoin, and Dogecoin went up really high. And then during the 2020-21 bubble, a bunch of people basically started like copycatting Dogecoin and making their own dog-themed coins and hoping that they'll go up to. And so some of these people decided as a strategy that they would put a large portion of their supply into my wallet balance and then publicly say Vitalik Buterin supports this coin, even though I never touched it. And some of these coins just like went up way higher than they should. And so one of those big coins was called Chiba Inu. There were also others. And I sold a bunch of them. And I donated some to a couple of organizations, one of which is Crypto Relief India. So at the time in India, there was this big COVID crisis that was happening. Lots of people were getting sick. It looked very bad from outside. It looked like no one was really caring about the situation, so I came in and donated to this group that originally Baljie introduced me to. First I donated a little bit, and I donated a amount of Shiba tokens that I thought would be worth a few million dollars at most, but then they ended up managing to cash out like literally $470 million dollars and so after that we yeah so 470 million ended up being like more than crypto and relief india needs for its whole roadmap and i was also started researching and understanding COVID and pandemic-related topics more heavily. And Balvi, between myself and some of my science advisors, basically slowly formed and ended up spending a significant chunk of that meme coin funding on basically moonshot topics that deal with all kinds of anti-pandemic related efforts in general. So biodefense is important, right? So compared to the natural environment, population density is way higher than before. Urbanization continues to increase. We have very easy worldwide air travel. In the best case scenarios, we might even have like rockets that will take us from New York to Bangkok in one hour but I mean Eli Dorado told me that like that's probably not going to be commercialized anytime soon because rockets are a little too dangerous but we have supersonic jets they might take us there in like four or five hours and so I expect air travel to just to be even more ubiquitous. Factory farming creates lots of pandemics, man-made pandemics so gain-of-function research", + "sources_streamethId": "67343e109dbb7a90e1d6c5ab", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67343f599dbb7a90e1f03f5b.vtt", + "transcript_text": " Hello everyone, my name is Rim. I'm a software engineer at BoxScout, which is an open source Box Explorer. And today I want to invite you to try to verify a couple of contracts along with me. Let's start with answering the question, what the contract verification is. When you deploy the contract onto the chain, it is represented as a number of bytes, which Ethereum virtual machine can understand and execute. And there are no solidity of IP sources stored inside the blockchain. So when indexers index the contract data, all we can see are those two bytecode values, which are contract creation code and contract and time of deployed codes. But most of the people are not good in understanding what the in understanding the raw sequence of bytes and what we usually want to see what is represented in the picture below so we ask our developers to send those sources to us and we recompile them and check that those actually correspond to the on-chain code values. And this is the contract verification. And today I want to verify a couple of contracts with you. So let's start with the simple one. Represented by those two code values here. What do we need from the user to verify the contract? First of all, we need the source files themselves, of course. And let's assume that this tricky storage contract is our potential candidate. It is tricky just because it adds some magic number before storing it inside the storage. That's it. Also, we need the compiler version, which the contract has been compiled with, and the compilation settings. With all that information, our next first step is just to combine all of that into the standard JSON input format, which has all information just in one file. We submit this JSON to the compiler, and what returns us back is the standard JSON output, which is quite big usually, but what is important here for us is that it returns two bytecode values, compiled creation and compiled runtime code values. So what we have to do here is to just take those two bytecodes and compare them. Do they match? Yep, they match, so that's it. Actually, is it always that easy to verify the context, though? Let's look at a little bit more complex example here, which is where we used as external library for making the addition operation, and external libraries as the contact codes, which are deployed once at some address, and when our contacts can link their addresses inside themselves, and reuse their functions by delegate call opcode. So we'll do the same transformations as before, and we'll get two bytecodes as well. But do they match? Well, we can see that there is a strange, not even a hex part inside the compiled creation code, which does not correspond to the on-chain value. So why it happens? Actually, this is the place where the library at least should be put at, but as we haven't provided it to the compiler during the compilation, it just doesn't know what to put inside and places some placeholder instead. And our question is how to verify such contracts. Luckily for us, there is a special section inside the standard JSON output, which is named link references, and which for each unlinked library contains some information how to, where this library address should be placed inside, especially the first byte where it should be placed at and its length which is always 20 bytes. So what we need to do is just to take the specified offset value, when take the next 20 bytes from the on chain code and substitute it inside the compiled code. So do those two byte code match now? Yes, we do. Luckily for us. So here we are just to verify the second contract for today. In general, such replacements, we name them code transformations, and those are some actions which may be applied to the compiled code before or after, during the deployment process, and which changes its bytecode a little bit, but which remains the functionality the same. And there are currently five of such transformations we know about and support. And we've talked about the libraries, but there are four more we don't have time to talk about today. But if you are interested, you might just follow the QR link and see some more information about. So, also, I think the last slide, my presentation title was Verifier Alliance, the first part of it. And I haven't talked about that a lot. But if you are interested in that part as well, you are welcome to the panel which will take place today at 5.30 p.m. where Boxcouts, SourceFind, RoadScan, the members of this Verify Alliance initiative will describe you this a little bit more and talk about verification as well. Thank you. I think that's it. Thank you, Reem. We have questions for Reem? Oh, okay. This is Mike, too. This is pretty awesome, actually. Why is it so difficult to have decentralized contract verification? We use services like BoxScout, Etherscan, but why after all these years is the experience still so bad in general? Well, I think it happens a lot because you have to store this contract somewhere first of all, and the resource file which tries to decentralize the storage process itself and but actually what is more important here were a lot of different formats and all Like different explorers use their own formats to store this data inside source file uses its own data and one of the Vsverify Alliance initiatives idea was to develop the schema in which all contracts should be sorted, and with that actually we are going to have just one database of all verified contracts shared between different verification providers and I hope that will help to increase the decentralization of this data so we're going to share some market dumps for that opens access to the database maybe and hopefully that will work. All right. Shoot. In the verification part for contracts that use library, looks like we are using the reference by code from the deploy by code. Is that safe? Yes, that is safe, because after the compilation we've seen that with 20 bytes, the library address was assumed to be put inside those 20 bytes by the contract code itself, and this address can be anything actually actually so we just take the actual value so we assume that the on-chain code should also contain the library address at this place and take it as our address. So it's actually safe just because this offset was in the standard JSON output section. Alright, thank you so much for this session. Please help me appreciate our amazing speaker, Ren.", "eventId": "devcon-7", - "slot_start": 1731567600000, - "slot_end": 1731568200000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1yGQBHJnRzdZfi9mog9ipmc0zYrZB2nzXpEG4mGwCGko", - "resources_slides": null, + "slot_start": 1731468600000, + "slot_end": 1731472200000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1ADQtojPz5zGpvoa8L2aH0vcyddEYsowQH6-jcNkUIMU", + "resources_slides": "https://drive.google.com/file/d/1x7Q40vFhsOaM-PITqgtBU7cZReJH6-sK/view", "speakers": [ - "vitalik-buterin" + "hong-phuc-dang", + "mario-behling", + "brianna-chang", + "mishari-muqbil" ] }, "vector": [ - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -838649,7 +836311,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -839175,6 +836836,10 @@ 0, 0, 0, + 6, + 6, + 6, + 6, 0, 0, 0, @@ -839546,6 +837211,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -839747,6 +837413,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -839773,7 +837441,6 @@ 0, 2, 0, - 0, 2, 0, 0, @@ -839792,53 +837459,44 @@ }, { "session": { - "id": "visual-code-of-cypherpunk-and-lessons-from-subcultural-aesthetics-we-should-remember-on-the-road-to-mass-adoption", - "sourceId": "ZAYEXK", - "title": "Visual code of cypherpunk, and lessons from subcultural aesthetics we should remember on the road to mass adoption", - "description": "I want to take builders on the turbulent ride through how subcultural and social movements used their visual codes when spreading globally, and what design tasks are still ahead of us on the way to making Ethereum cypherpunk again and onboarding the next billion users to Web3 at the same time.\r\n\r\nThis ride will include three stops:\r\n1. waving one's emotional state into the collective identity\r\n2. using shared aesthetics as a signal of belonging\r\n3. coordinating a collective design process.", - "track": "Cypherpunk & Privacy", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Community", + "id": "voting-with-time-commitment", + "sourceId": "7V7QNK", + "title": "Voting with time commitment", + "description": "Token-based voting mechanisms employed by DAOs can encounter three potential problems: plutocracy, Sybil attacks and vote buying. If one were to design a voting mechanism from scratch, how does one ensure that these issues are addressed adequately down the road? This talk aims to provide some intuition for the trade-offs faced when tackling these problems in general, and the role of time commitment in alleviating these issues, in particular.", + "track": "Cryptoeconomics", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Coordination", - "Identity", - "Design", - "communication", - "Coordination", - "Design", - "Identity" + "Governance", + "Mechanism design", + "voting", + "Governance", + "Mechanism design" ], "keywords": [ - "culture", - "aesthetics", - "communication" + "Voting" ], - "duration": 758, + "duration": 1534, "language": "en", - "sources_swarmHash": "3dd85d3f006a11a867fed6fa39a4901b593753cc0b383f9af7098ac9914b54ce", - "sources_youtubeId": "aUkVqsDW6t4", + "sources_swarmHash": "ae548aae445f4151f042de6c1fc3c06468c5bc76bf04a3bf33063c4dbff22215", + "sources_youtubeId": "CYrmSPVuGqs", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67388bae1b0f83434d2b7c95", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67388bae1b0f83434d2b7c95.vtt", - "transcript_text": " Let's see in the wipe. So we all want mass adoption for our products and we want this products to be loved and used by many. And yet we have one heavy anchor that will always hold us back from mass adoption – a visual language or cypherpunk. Let me show you what's happening. Visual language consists of two core layers. The symbols – they carry meaning and cultural baggage. And the styles – Styles evoke emotions. They always go together when it comes to how we respond to visual messaging. A positive symbol in a positive style evokes joy. A positive symbol in a negative style makes rather negative impact. Think about toys in the horror movies. A negative symbol in a positive style can cause confusion, but rather it brings amusement. And think about Halloween. And a negative symbol in a negative style evokes nothing like fear and disgust. People try to distance themselves from everything that lives in this quarter. Well, adoption lives there. How do you think where cyberpunk is? Let's see. The first symbol, an anonymity mask, traces back to guy folks who earned a reputation of a failed terrorist and actually never fought for democracy. Well, Alan Moore tried to recover this image in the comics V for Vendetta, making it a symbol of vigilantism, but later Hollywood successfully shifted the message, adding a narrative with a fear, creating taglines, and a lot of blood. Today, it's a symbol of cypher funks. Now, here you already see a second symbol, hoods, that was originally associated with monks, the knowledge keepers, which is a good beginning. But later, they became a symbol of secret societies, many of which practice ritual murder and dark magic, if such a thing exists. And recently Hollywood dressed all the assassins in hoods, adding a nice layer of death and blood to the overall image. Today hoods is a symbol of cypherpunks. Well, here you already see the next symbol, which is a common line romance, which again by Hollywood, in a form of digital reign, together with the techno style, presented technology as a global threat to everyday life. And the last style I would like to mention is glitch effect, which was a way for early internet artists to present, to visualize a system vulnerability and the beauty of breaking it. But for normal people, the glitch effect evokes the same feeling as breaking glass. Now, how do you think where on this matrix cipherfunks are? Well, fortunately, we have a century of subcultural movements that can teach us how to ease this tension between our heritage and the past and the mass adoption needs. So here are three main lessons I want cypherpunks to hear. First, choose joy as a strategy. Cypherpunk looked like it was designed for post-apocalypse. But privacy should not look like paranoia. It should look like freedom. Just look how pride movement turned the protest into a celebration. They didn't promote a better hiding spaces. They made visibility powerful and joyful itself. And you may say, meh, rainbows. But people love rainbows. Ethereum love rainbows. So we already want this joy. Next lesson, aim for simplicity. Cyberpunk visuals require technical tools to reproduce them. But if your grandma and even you cannot draw the symbol with anything that you have in your hands right now, it will not spread. Extinction Rebellion teaches us how to do. They created a symbol which a child could draw in seconds and every human could reproduce with anything they have around. They later gave this symbol to communities and communities style it with their own wipes to represent their emotions around the movement and the movement went global. And last example, embrace evolution. Black panthers used powerful yet very aggressive imagery, usually containing weapons. Because of this aggression, the movement didn't achieve the reach they hoped for, but the movement did not die. The new generation learned from the past and took over and moved from displaying weapons to displaying words, from we protect ourselves to we deserve to be here. And they reached millions and more. So if I were here, an incarnation of all the subcultural movements from the past, I would ask our ecosystem to not make Ethereum cypherpunk again, but to make it a new cypherpunk. And maybe, just maybe, it will also have a different name. Thank you. Thank you very much. So we have a bit of time to ask a few questions. Raise your hand if you have any questions. Over here. Who's doing it right? Who can you call out or are there any examples that you've seen this week of different ways to evoke this sentiment in our ecosystem visually? I mean, who is doing the right cypher form style or who is doing the right in general? The visual styling that you've described and the evolution of that expression, have you seen any of it in person? We are here right now. So we have some more questions over there. Hello. Thank you for the talk. So my question is, from the negative examples that you presented, it seems like a lot of them were kind of, you know, serious, tough, masculinity focused. And crypto as an industry is also quite masculine in terms of just people who tend to, you know, run the companies. If we gather up everyone who works in crypto, mostly it's men. uh one of the positive examples that he had was you know the pride flag for example which is something that is quite challenging to traditional masculinity so how much do you think that plays into the current branding issues that crypto experiences thanks well I'm so glad you asked this because like maybe 20 minutes before the talk I just cut off one slide because I needed to fit in five minutes. But after the glitch effect, what I wanted to say is the cypher fund imagery has a very heavy gender coding and is all white male narrative, which is exactly what you tell about. Well, all those movements that I showed, they actually show the equality of everyone. And what I really love in Ethereum here right now in DEF CON is we have more of non-white people. So it's really good that white people are not dominating here. Also, if you study cyberpunk movement itself, there were a lot of regional directions who actually didn't go for this american uh white narrative cyberpunk and actually uh for the original cyberpunk movement hollywood did a lot of bad job for us because as much as we were trying to narrate uh well developers don't do a lot of good job for the visual communication so they did a lot of good job for the visual communication, so they did a lot of good stuff for vision building, but Hollywood knows how to show masses what exactly the government wants to see, and in general, nothing goes on the screen which can harm the government. So even V for Vendetta, the author of V for Vendetta wasn't involved in the movies because he didn't agree with the flatness and bloodness of the movie creation. The original comics had a very deep narrative that you actually need to see where he comes from. Did I answer your question? Thank you. So there's a last question over there. Okay if I may I would like to have like a very small comment. I absolutely do agree with you that the imagery we are using is holding us back and this is like spot-on well job, but the job well done. But none of the images that you showed are actually connected to the original cypherpunk movement because they didn't have any image. It was just a mailing list, a very traditional mailing list that didn't even allow to attach any imagery. The closest they were getting to having any visual identity is when three of the people from the original cypherpunk movement were featured on the cover of Wire magazine and they were wearing masks, but they were very different masks. They have nothing to do with Guy Fawkes and it was not their idea to wear the mask. That's one thing. Second, they used one pretty cool image that became really associated with them, but not with the movement, but one of the campaigns that they were running. In the early 90s, there was this danger that U.S. government will impose a surveillance mechanism on a communication with, like, Clipper project. And what cypherpunks did was, like, basically they hijacked Intel logo, and instead of Intel inside, they put Clipper inside. And this became, like, a sticker that they were distributing in many, many places. And so this was, like, quite successful. But everything else that you showed has absolutely nothing to do with cyberpunks. And one last thing, cyberpunks, the name itself, it's not that they created. They were not considering themselves to be a counter-cultural movement. It was basically a joke from someone who was looking at them and commenting how they are like approaching things. But they were not punkish at all. Absolutely. Yes. And that's how branding works. It's not what you say about you. It's what others say about you. So I encourage you, open your laptop and Google hacker icon and see what you have in dance just google cypher funks and see what google gives you and it's yeah that's the whole point there the world and we know which powers are engineering the reputation of cypher funks which is going to the minds and hearts of people outside of crypto bubble. And the call is not being passive around this, but actually do actions to create the image that we want to have, not what happens because some other very...", + "sources_streamethId": "673476f19dbb7a90e139f8dc", "eventId": "devcon-7", - "slot_start": 1731495000000, - "slot_end": 1731495600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1JfZtSjos8JrMCOBp9B9xIaU5dMAfVMzayGYW7eA5F7Q", - "resources_slides": null, + "slot_start": 1731489600000, + "slot_end": 1731491400000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1P434UTSmq4E68DmH8ddDjupGoA0DAAfW5KIZ-umwqaM", + "resources_slides": "https://drive.google.com/file/d/1ioMqFQEfmNl59Tzdg4sHBg-fwiRr9Yep/view", "speakers": [ - "ira-nezhynska" + "vijay-mohan" ] }, "vector": [ - 0, - 0, - 0, 0, 0, 6, @@ -840544,44 +838202,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -840589,6 +838209,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -840631,12 +838252,12 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -840720,6 +838341,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -840730,7 +838352,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -840742,7 +838363,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -840851,7 +838471,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -840987,6 +838606,35 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -841152,7 +838800,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -841163,6 +838810,15 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -841172,59 +838828,51 @@ }, { "session": { - "id": "vlsmsanalyzing-faulty-distributed-systems", - "sourceId": "AKRLKH", - "title": "VLSMs—analyzing faulty distributed systems", - "description": "Validating Labeled State transition and Message production systems (VLSMs) provide a general approach to modeling and verifying faulty distributed systems. With formal definitions of validation and equivocation, we are able to prove that for systems of validators, the impact of Byzantine components is indistinguishable from the effect of the introduction of corresponding equivocating components. All of the results presented in this talk have been formalized and checked in the Coq proof assistant", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Expert", - "audience": "Research", + "id": "wallet-infra-improvements-and-building-apps-for-the-next-generation", + "sourceId": "RQAAFS", + "title": "Wallet Infra Improvements, and Building Apps for the Next Generation", + "description": "In this talk I go over infrastructure innovations that are happening in the space right now, how they’re going to be how we bring the next wave of users into crypto, and why right now is the best time to build a consumer app in this space.", + "track": "Usability", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Consensus", - "Distributed validator technology", - "Formal Verification", - "correct-by-construction", - "Consensus", - "Distributed validator technology", - "Formal Verification" + "Accessibility", + "Account Abstraction", + "Architecture", + "Frameworks", + "Gas", + "Intents", + "Payment", + "UI/UX" ], "keywords": [ - "Correct-by-construction" + "wallet", + "dapps", + "" ], - "duration": 1787, + "duration": 538, "language": "en", - "sources_swarmHash": "faf3bda887c2724dd5bd923f3f360e2226fc675126f2a2e5d499b3311e2a1db3", - "sources_youtubeId": "loyKzWQlyEo", + "sources_swarmHash": "cb6c67ca170aa1455e17b9c03261ca9968617fe7b6d4a82cb7a707ec3e730e25", + "sources_youtubeId": "GUZ33tEdOJw", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67358e2b9dbb7a90e1a57339", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67358e2b9dbb7a90e1a57339.vtt", - "transcript_text": " Testing, testing. Hi, everyone. Hi, hi. Yeah, I'll take this mic, actually. Thank you for coming to this early morning talk. It's going to be a talk on formal verification and like some basic methods in distributed systems and reasoning about faulty distributed systems. All of the, everything presented here has been formally verified by the team at Runtime Verification and is, like, available to, like, click and check. And you can look at the proofs in the talk, but this talk is not going to be focused on the proofs, more just definitions and theorems and not really walking through the proofs. But you can check them out. And there's a... So I've separated a bunch of sections, a section on validation theory, a section on equivocation theory, and then on relating and reducing Byzantine faults to equivocation faults in the context of validators. So let's kick it off. So let's talk about validation. And so here's the slides where I guess you'll... So this is the name of the paper. Validating labeled state transition message production systems. So it's for modeling distributed systems, faulty distributed systems, and all these amazing people worked on it for a long time. Here you can scan this QR code and pull up the PDF if you like. I'll show this also again later. So here's the validation theory section outline. Basically, we're going to go through the definition of this model, its compositions, and then the definition of validator, and then move on to the equivocation section. So here's the first definition here. So a VLSM is a tuple, you know, as we often like to define these things. It's sort of like a state transition system like you're normally used to, except for it has a few other things, like a label, which is also not too unconventional. But it has a first order message set in the definition. It has initial states and initial messages. And then there's a transition function that takes labels, states, and messages, optional messages, and gives us states and optional messages. This is like the state transition slash message receipt and production function that describes for a particular VLSM, sort of what's happening computationally, you can imagine. And then they're also equipped with another thing, which is why they're called validating, this beta, which is a validity condition. And it basically says, it's going to be valid to transition on a label from a state given a message. So when you're in a particular state and you receive a message and you might want to transition, there might be multiple possible transitions each identified by a label. And some of them might be basically banned. Even though they're defined by the transition, the validity condition won't let you take that transition. So this is like a state transition system with native messages, with initial state messages, and with a transition function that's totally defined over this domain. And then we sort of restrict it, effectively making it partial with this validity condition. This is like the validation condition. Basically, we're going to imagine that these things don't transition, even though the transition is defined, when the validation condition is not satisfied. And so that's the definition, but actually, I haven't told you how to get the states and messages, but it's sort of what you would expect. You start at the initial states and the initial messages, and then we build up this fixed point by taking the union of all the states that you get from the transitions and all the messages that you get from the transitions and transitioning from those states using those messages. So basically starting from the initial states and messages, transitioning and sending all the messages that are produced from there and receiving them and doing it over and over again, basically, until you even get stuff like a node in an early state receiving a message from some other state. This really is a big fix point. And so there is an interesting thing that can sort of happen, which is the validity condition can be satisfied even when an input message is invalid. So we have to slightly distinguish between just the validity condition being satisfied and the actual trace being valid. The trace is valid only if all the input messages are also valid. So, you know, no garbage in sort of allowed in a valid message. So that's basically the definition of VLSM. And then they have a pretty natural way to compose them. And so, now I'm going to go through that definition. Unless someone has, like, a question now before the composition. Okay. So, basically, what we're going to do is we're going to take a disjoint sum of the components for the label. So we identify the label, which component it is. The state is a tuple of the component states. The initial states is a tuple of the initial states. And then we have like a union of the messages for the initial messages. And then here, actually, we're composing of VLSMs that have the same message type. This is just so that all our transitions are defined and everything. And to let them send messages to and from each other, so they're not just independently. Like if that was a disjoint union, they wouldn't be communicating. So we have a disjoint union for the labels, tuple for the state, and then a regular union for the messages. And then in the free composition, we have a transition that basically just affects only one component exactly according to the transition that would have had before the composition and checks the validity condition only of that component exactly like it was before. So very little, basically nothing being done by the composition except for transitioning the individual components and checking their individual composition, sorry, their individual validity conditions individually. There's no composition-wise constraints, but they can message each other in this thing. And then, you know, note also that this is also VLSM, and that's why it's like sort of a composable model, you know, same type of definition and everything. Yeah? So the split up of the transition functions and the validity where they are purely mathematical? So the question is, is the split up between the transition and the validity purely mathematical? I mean, I guess it's for the sake of convenience when dealing with the math and... So, but, I mean, more more traditionally in math you'd use like a partial function I guess whereas here we're about to get into this conversation about validation and distributed systems and what can happen in a distributed systems that you might not be able to tell locally about and so it gets there's there there is a reason why we are thinking about the validation at different levels. And actually, and that actually does sort of spell it out. Basically, there's going to be, actually, the next slide, we're going to apply, we're going to talk about constraint compositions where there's an additional constraint on the, so basically, like, this constraint composition just basically conjuncts a constraint on top of the, uh, validity, on top of the validity constraint of the free composition, which just is the individual constraints applied independently. So this, this, this composition constraint, um, you know, lets us sort of analyze, uh, things a little bit more conveniently than just a partial function approach. I guess you want to be able to see that a transition would be possible, but it's not there. Yes, so the question is if we're doing this to try to see if a transaction is possible but not valid. And yeah, you're very much going in the right direction. And we're getting there to try to see if a transaction is possible but not valid, and yeah, you're very much going in the right direction. And we're getting there with this definition here. So, this is the definition of validator. It's a very natural, simple definition of validator, which is kind of useful in many different contexts. So, in a... And so, the components are a validator for the composition basically they're checking if they're truly a part of that composition and they're sort of you know only transitioning as if they are a part of that composition even though they don't know it per se so let me just go through this definition so basically a component in a constrained composition is a validator. If any transition that that component can make can be lifted to a valid transition in that composition. So if the component has a valid transition, then if a validator has a transition, which it can take, then there's also a transition in the composite system where that validator can take that transition. It sounds kind of... It sounds weird, but basically it's not. The local condition lifts to a distributed one. And so basically, this local component is checking about a condition that's distributed across the whole composition. And that's sort of non-trivial because of information disparity between the nodes. So there's a message here being received, and this is the message being sent, and this is the label, and this is a constrained transition, which doesn't necessarily mean that M is valid. However, it's lifted to a valid transition with M being received. So basically, the validity condition of the component is enough to guarantee the validity of the message received in the composition. So basically the component locally is able to verify whether the message has this distributed property. And that's why it's called the validator. Because it's basically able to check something that's outside of its scope. And it's again defined here with respect to a particular composition and a particular constraint on that composition. So you might have different validators for lots of different distributed settings. But we're specifically going to be interested and focused on equivocation for a reason that I've already talked about, but it sort of reveals at the end why equivocation is so particularly interesting to look at. So the equivocation theory section, talk about evidence, and then using evidence to describe compensation constraints that limit and validity conditions that limit equivocation. And then we'll talk about models of equivocation. And then all this will tie in nicely when we start talking about Byzantine faults. So this is sort of what we're used to seeing in blockchains when it comes to slashing conditions. This is a starting point, or was a starting point in our proof of stake research. Basically, when messages have the same sender, they've been, they're like collected by the same node, or like in the same smart contract, you can imagine. And they basically could not have possibly been produced by the sender in a single round of the protocol. So if you run a trace of these things, there isn't a single trace where those two messages are produced by that node. And so this is evidence of equivocation this is somehow we have two messages that couldn't have been produced by their sender and we have them sort of in the same state this is sort of a sort of faulty behavior and this is a local evidence and here's an interesting definition. Yeah, yeah, of course. Sorry. If we don't have a history, how do we check that? What? Yes. So that's a good question. I mean, I think it's undecidable in general, but the question is whether it could not have been possibly produced. So basically you need to sort of quantify where all traces and say there is no trace where these two messages can be produced. So in practice, you know, we have lots of simplifying assumptions like you're guessing, you know, we have lots of simplifying assumptions like you're guessing, you know? But the definition doesn't say how, you know, how we can come to this decision about whether a message could have been produced. You know, it's okay. That's actually another nice thing about having the validity conditions as sort of like predicates. You can have undefined, sorry, undecidable conditions, whereas, you know, if you're using partial functions, that would be an issue. So there's global evidence is a little bit more interesting and a little bit more, maybe a little more decidable, right? Because you, in this, we have like a sort of global view of the trace. So, we can basically check that So... Yeah, so... So this is getting into some later content that I was hoping to, I think I've slightly misordered this. But anyways, if you have a God's eye view of a VLSM trace, and you have a message that wasn't sent by a component, but it was received by some component, that's an equivocation. Sorry. Denise, can you go ahead? No? But I think it would be like a equivocation or a thought-form equivocation. Mm-hmm. Yeah. So here's a theorem, right? That the local equivocation is always going to be less than the global equivocation. And all these are checked in the theorem provers, but you can sort of imagine why that is. And basically, we can use these global and local definitions of equivocations to limit the equivocations to create basically a composition constraint where the faults are limited. We can easily just say, okay, well, there shouldn't be any equivocation and talk about DSM traces where there aren't any equivocations. And we also use the full node assumption to reduce the amount of equivocations because then you can only sort of get an equivocation from the sender of a message because you've already received all of its dependencies. We can limit equivocations to just a subset. And we can also assign weights to the nodes and then limit the equivocations by their total weight. These are, like, example conditions in composition constraints or a local constraint. So this is a composition constraint for a validator on a global constraint that looks like this. And so, like, in this particular example, this validator is just checking the local equivocation weight and if it's less than T when the composition constraint is checking the global equivocation. And the validator property is basically that from the local one, there should be a state where the global one is also satisfied. Basically, the lifting property of the valid state from the local to the distributed property. So, you know, this was talking about basically what equivocation looks like and how to detect it and therefore how to talk about, you know, non-constructively traces that have limited equivocation. But we do have a very nice constructive sort of approach to where we can describe equivocators and basically there's two models for equivocation. There's a state equivocator, which basically splits its current state up or has many states for the same validator. And it can do that by forking or by starting new machines. And it also has... And there's also the message equivocation model, where instead of the state splitting and having multiple copies of a validator, validators can receive messages that haven't been sent. And sort of this sort of is what we're observing in that definition of global equivocation. And it turns out that these two things are equivalent, actually. The traces that you can get from the state equivocations and the message equivocations are the same. Whether you are receiving messages that haven't been sent or splitting up states, if you project down to those equivoc up states, if you, like, project down to those equivocator states, we get exactly the same traces. And it's kind of interesting, basically, like, splitting a timeline and communicating across timelines end up producing exactly the same states. And so, these two are models of the same phenomenon equivocation. And that's why we have those two definitions there where one of them seems a little bit different than the other. You know, somehow two messages that couldn't have been produced in a single trace evokes a state equivocation, and a message that hasn't been sent yet to being received evokes the message equivocation. But they are equivalent. So that's a pretty cool result that's going to be useful later. But basically, to repeat it, the models of equivocation that split the state and models of equivocation that allow communication from other traces lead to the same traces for validators for a limited equivocation. So that means that when you have evidence of equivocation being produced, you can produce that evidence either with state equivocation or message equivocation, and you get exactly the same state, exactly the same evidence. Great. So that's the first two sections. Any questions before the next one? Excuse me. So here we go. Yeah, please. Your microphone is off, sir. Can you try it? I guess in the previous discussion, you kind of assumed finite branching, which means that you cannot make infinitely many copies at the same time. No, we have an unbounded, we have like a, a list, like, unbounded list of copies. Okay, but still finite, right? Yeah, finite, but finite unbounded, yeah. Yeah, because when it comes to infinite messages and states... Yeah, that's a good question. I think we have possibly infinite... traces, but not states and messages? At the moment. Yeah, I guess so. Sorry about that. Yeah. We'll get there. Yeah, so now basically we're gonna do, yeah, go ahead. Yeah, please use the mic here. Microphone, microphone. Yeah, please use the mic here. Microphone, microphone. Just hold it closer. Oh, no, never mind. Sorry. Hi. Can you hear me? Yeah, that's great. This is great. Sorry. I think the states can be infinite but not reachable. It's a matter of which are the reachable states. But it matters how many labels you have and that gives you how many moves you can do. But in reality, yes, it's bounded and, yeah. Great. So, let's move to the Byzantine faults. So, basically, we can model Byzantine faults in VLSM by replacing a node with a node that basically has a free behavior that uses labels to send and receive any message at any time. The important behavior is that it can send any message at any time, basically modeling someone that can send any sort of malformed and invalid message at any point. And we do have a little bit of constraints, which is that we don't let them forge messages on other nodes, and we do have a full node assumption. But, you know, they can send any message, you know, signed by them from them, basically, without forged messages inside. And so, we can replace equivocation limited validators with Byzantine components and find that they have exactly the same traces. The ones that aren't replaced have the same traces. So if you have a trace in the equivocation-limited composition, where some set B of validators is Byzantine, they have the same traces as if they're composed with equivocators instead. And basically that's because of the validator property. So if you have a validator property on receiving a message from a Byzantine sender, that means that there is a composite state where that sender, as an equivocator, can validly send that message. Because here we're validating for a limited equivocation setting, so some amount of equivocation is valid in that setting. And so we can replace these Byzantine nodes with equivocating nodes. And then look at the traces of the validators that aren't equivocating and show that they have exactly the same traces. Denise, do you have a question? Okay. And the same result also holds for weight-limited equivocation model. So it's not just for a fixed set, but for under T-limited equivocation weight. All the behaviors of non-equivocating components due to equivocating components is exactly replicated by Byzantine behavior with the same T limit. And so that basically means that under the limited less than T weight equivocation, we get all of the same traces for the validators as limited less than T-weight Byzantine faults. So that's sort of the sort of magical way that we can not use Byzantine fault tolerance. Basically, for these equivocation-limited validators, equivocation faults are exactly as expressive as Byzantine faults because by validating for that limited faulty setting, you know, they're restricting their transitions a lot. And if a Byzantine fault, a Byzantine node can send some malformed message that they receive, that means that that transition can be lifted to a valid state in the composition under the limited T equivocation condition, which means that there are nodes in the composition distributed that satisfy that less than T threshold, but, you know, aren't Byzantine nodes, but equivocation nodes. And then, you know, like putting those transitions together to get traces, we can rebuild exactly the same traces. And so basically this forms an alternative for analyzing faulty distributed systems to Byzantine fault tolerance. And quite simply, you know, by studying equivocation limiting and equivocation faults instead of, and equivocation faults instead of Byzantine faults. So somehow equivocation faults are like a special kind of fault where if you validate for limiting equivocation, that's just as good as validating for limited Byzantine faults. Oh, sorry. That's just as good as, sorry, that actually lets you throw out Byzantine fault tolerance analysis altogether when thinking about, like, what traces you could go to. You can sort of just go to the protocol-defined ones, and it doesn't really matter what the Byzantine nodes do. They're basically just protocol- equivocators as far as the analyst is concerned. And so instead of having misbehaving nodes, they just have either like a state replicator or a message passer that sort of crosses timelines. Which is sort of much more tamed types and well defined behavior. So later we're gonna relax the full node assumption and treat synchronization faults. I'm out of time. Thank you so much. Thanks for coming. Really appreciate it. If you have any questions you can find me outside later. Thank you.", + "sources_streamethId": "67357fa59dbb7a90e11d9f1f", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67357fa59dbb7a90e11d9f1f.vtt", + "transcript_text": " Cool. Hey guys. So as you can see, I actually changed the title of my talk really last minute. But my name is Meida Kothari and I lead product for all of the wallet related things at Uniswap Labs. So in this talk, I want to cover some of the incredible infrastructure innovations that have happened over recent years, and why I think they're going to bring in the next wave of users in crypto. Forgot my little thing. So when we talk about onboarding the next billion users, often the narrative online is focused on blaming crypto UX for why we haven't had mass adoption yet. But what does better Crypto UX really mean? Like if you look at a lot of our apps, they're already beautiful, they're clear, they're comparable to Web2. And based on recent breakout successes for dApps like Polymarket or TradingBots or whatever you use, I feel like it's safe to say that users are willing to overcome a lot of friction to do what they want. In my opinion, what crypto often lacks is more opinionated products. And what a lot of these infrastructure innovations that I'm about to highlight do is give developers optionality to build more opinionated products. Historically, I think we've put a lot of burden of choice on users for every micro decision, whether it's how they store their seed phrase, whether it's how they pay gas, whether it's what chain they're on, everything else. So I find a more useful framing for the crypto UX problem is how do we make it easier for users to do more with their balance? Before I get into these innovations, I kind of wanted to set the framing and identify core problems to a really quick case study. So for this, I'm going to use my dad as an example. He's a software engineer. He's technologically savvy. He trades a lot on apps like Robinhood and E-Trade for stocks. And he knows what ETH and Bitcoin are and holds some but doesn't really know much about it. He doesn't really know about ERC-20s, L2s, the underlying technology, anything else. So he somehow finds out about a token. This actually happened a few months ago. Let's call it Blah Token. And he asked me for help. And so I helped walk him through how to buy it. So I walk him through the process. He only has crypto in his Robinhood centralized exchange account right now. And so I had to guide him through downloading a non-custodial wallet, explain what that non-custodial word meant and then what a seed phrase was and why it was so important. And then I had to walk him through transferring his ETH from the centralized exchange into this new wallet. But oh, it turns out the asset was on base and he didn't have ETH on base to pay the gas and so I had to explain what bridges were, what L2s were, what gas was, and then we had to bridge his ETH over to base. And then he was finally able to swap the asset using Uniswap. But of course, he had to reserve some of that ETH for gas. That took so many steps, so much education, so much time. And one of the biggest issues I think all of us see in the space right now is onboarding. And my dad specifically in this case was a very high intent technical user. And it took him several minutes to create a wallet and was thrown so many new concepts right up front. Currently, I feel like we're taking high intent users who probably come in wanting to do something, wanting to try out a wallet or buy an asset, whatever. And we end up surfacing so many friction points for them that they end up churning before even completing the action that they initially set out to do. Luckily though, there are so many infrastructure advancements that have been made already that make this whole process easier. There are quite a few, but for the sake of time, I want to focus on four. So the first one is account abstraction, specifically through EIP-7702. So account abstraction and smart contract wallets have been in the zeitgeist for a while now, but one of the biggest issues with most of these implementations is that they're not compatible with EOAs, which are the most popular form of existing wallets. 7702 basically allows EOAs to get some, not all, of the benefits of account abstraction without forcing users to fully migrate into a smart contract wallet. So while this isn't fully comprehensive account abstraction, it does two things that I think are really exciting. One is gas abstraction. So in my dad's case, he wouldn't have to worry about the concept of gas or gas tokens, etc. And another thing it does well is batch transactions. Second is this growth of multi-chain standards. So it's very clear we're going through a multi-chain world. Block spaces are getting more and more specialized. And I feel like we don't give enough credit for how much Ethereum has evolved here. Like I remember just even a few years ago, going from one chain to another was so, so cumbersome. But now with intense space standards like ERC7683, which a couple other speakers before me talked about, and with block-space standards like the super chain that Optimism came up with, and just generally more options for different types of bridging, we have way more of a unified experience for cross-chain interactions that are happening on-chain. So now it's up to dApps to figure out how they want to represent these multi-chain interactions, like how much they want to abstract and how much they don't. But it is so, so much easier. The third thing I want to mention is embedded wallets. So as I mentioned before, onboarding is a huge, huge issue in the space right now. And all the innovation happening around embedded wallets allows users to create wallets in seconds. And also users can use familiar concepts like email or social auth or even pass keys, which are becoming increasingly popular outside of crypto. And sophisticated traders are already taking advantage of this tech when they're using certain telegram bots and things like that, or dApps that are using things like Privy or Capsule under the hood. The last one that I want to talk about is AI. So can't forget that as much as our industry is innovating, so are others. Everyone I think is aware of the massive impact AI is having on consumer apps right now. And I'm pumped about the confluence of that and crypto. And I think there's a ton of stuff here that I could talk about with AI, obviously, but the ones that I want to focus on are context and discovery. So with AI, we can rapidly give users more context and information on the actions that they're trying to do on chain, as well as the assets that they're trying to buy. A big issue, another big issue I see in crypto right now is discovery. And I think fine-tuned AI can really help with this. So clearly right now is the best time to build consumer apps in crypto because of all this infrastructure innovation that's been happening. So I want to end with a call to action to builders, especially new builders. All of these new consumer app devs, I hope you guys build more opinionated apps utilizing this tool chest of infrastructure to give users the most smooth experience possible. Thank you. Thank you, Medha. And we have time for a question. Let's see. Okay. And there you go. You got it. Thanks for the talk. It's very inspiring. I would like to ask maybe a Medha question. If there's a top three app, because we're on a consumer app, or you want to build, what would be the top three you can recommend for us to build on that? Did you say top three dApps I would recommend building? Did I get that right? Oh, yeah, yes. It's more like you want to get your thoughts on that. For sure. That's a great question. Let me think about this. There are so many exciting things right now. I would say one is just like a better information aggregator, so something that's like more personalized based on your trading history and also like assets that you own in a fun way I think that would be really exciting another one is like a multi-chain dApp that's like super completely abstracted so I saw some demos for this that some optimism devs did and I think stuff like that would be really cool where the end user doesn't have to care about the chain that they're on, but they're able to interact with people who are using multiple chains. Yeah. Thanks for the question. Lovely. We have time for another question. Yes. Just speak up. In your opinion, what's the biggest drawback to 7702? That's a great question. I think there's two. One is smart contract risk. So you're introducing just more smart contract risk since you're delegating to a contract. And then the second one is just lock-in. So if we don't standardize the 7702 implementations that wallets do, it could lead to a case where, like, let's say I have a seed phrase on, like, Uniswap wallet, and then it implements 7702, and I get all these, like, cool features like gas abstraction, but then I export that private key and put it into, like, let's say Metamask or Coinbase wallet or whatever, and then suddenly those guarantees are gone if they're not using the same 7702 standard. So that's something I do worry about. Thank you. Thank you for your questions. That's all the time we have for them. Thanks again, Mera. Give it up for Mera.", "eventId": "devcon-7", - "slot_start": 1731552300000, - "slot_end": 1731554100000, - "slot_roomId": "classroom-d", - "resources_presentation": "https://docs.google.com/presentation/d/1neM1-qHBPiHQ47mw5gGhxKmdlAYMtpZujIccA88zZM8", - "resources_slides": null, + "slot_start": 1731558600000, + "slot_end": 1731559200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1eJwIYkq9W94rsLobC0VKWwi7AVWG4wvUfj48LQs1f8k", + "resources_slides": "https://drive.google.com/file/d/1DyNrIWI32ba7eJrfAi3fMYMfzGXP66GS/view", "speakers": [ - "vlad-zamfir" + "medha-kothari" ] }, "vector": [ - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -841233,6 +838881,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -841923,7 +839572,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -841938,6 +839586,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -841970,7 +839619,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -842024,13 +839672,18 @@ 0, 0, 0, + 2, 0, 0, 0, + 2, 0, 0, 0, + 2, + 2, 0, + 2, 0, 0, 0, @@ -842073,6 +839726,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -842108,6 +839762,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -842162,7 +839817,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -842194,6 +839848,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -842427,7 +840082,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -842503,7 +840157,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -842545,51 +840198,54 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "voices-of-tech-and-open-source-movement-across-asia", - "sourceId": "QCPSDK", - "title": "Voices of Tech & Open Source Movement Across Asia", - "description": "This panel discussion features individuals from the open source communities, developer and user groups across Asia. These figures span different decades and have witnessed various phases of the tech movement, including the rise of open source, in their respective countries. Some have been pioneers since the early days, while others have emerged as key players through recent college engagements and grassroots initiatives.", - "track": "Cypherpunk & Privacy", + "id": "wallet-ux-panel", + "sourceId": "9HACGK", + "title": "Wallet UX Panel", + "description": "Wallets are here to provide great user experience with robust security. \r\nBringing the top wallet providers (Fireblocks, Safe, Metamask, Coinbase and WalletConnect/Reown) to talk about how Ethereum user UX evolved and how we can make it much better.", + "track": "Usability", "type": "Panel", "expertise": "Beginner", - "audience": "Engineering", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "FOSS", - "regional", - "insights" + "Coordination", + "Custody", + "Account Abstraction", + "standards", + "Account Abstraction", + "Coordination", + "Custody" ], "keywords": [ - "FOSS", - "Regional", - "Insights" + "Wallets", + "User Experience", + "Standards" ], - "duration": 3310, + "duration": 3396, "language": "en", - "sources_swarmHash": "61c49c5cdb4bc3d649ccd86731882f2c81639c8d942834f908404b9e8bbc21d7", - "sources_youtubeId": "TsI7-ejb_Ig", + "sources_swarmHash": "2f420c5d40f8af815eaa182ef386e07100536e8611142260c83d0014d1c20481", + "sources_youtubeId": "UhQWxhId2Nk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67343e109dbb7a90e1d6c5ab", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67343f599dbb7a90e1f03f5b.vtt", - "transcript_text": " Hello everyone, my name is Rim. I'm a software engineer at BoxScout, which is an open source Box Explorer. And today I want to invite you to try to verify a couple of contracts along with me. Let's start with answering the question, what the contract verification is. When you deploy the contract onto the chain, it is represented as a number of bytes, which Ethereum virtual machine can understand and execute. And there are no solidity of IP sources stored inside the blockchain. So when indexers index the contract data, all we can see are those two bytecode values, which are contract creation code and contract and time of deployed codes. But most of the people are not good in understanding what the in understanding the raw sequence of bytes and what we usually want to see what is represented in the picture below so we ask our developers to send those sources to us and we recompile them and check that those actually correspond to the on-chain code values. And this is the contract verification. And today I want to verify a couple of contracts with you. So let's start with the simple one. Represented by those two code values here. What do we need from the user to verify the contract? First of all, we need the source files themselves, of course. And let's assume that this tricky storage contract is our potential candidate. It is tricky just because it adds some magic number before storing it inside the storage. That's it. Also, we need the compiler version, which the contract has been compiled with, and the compilation settings. With all that information, our next first step is just to combine all of that into the standard JSON input format, which has all information just in one file. We submit this JSON to the compiler, and what returns us back is the standard JSON output, which is quite big usually, but what is important here for us is that it returns two bytecode values, compiled creation and compiled runtime code values. So what we have to do here is to just take those two bytecodes and compare them. Do they match? Yep, they match, so that's it. Actually, is it always that easy to verify the context, though? Let's look at a little bit more complex example here, which is where we used as external library for making the addition operation, and external libraries as the contact codes, which are deployed once at some address, and when our contacts can link their addresses inside themselves, and reuse their functions by delegate call opcode. So we'll do the same transformations as before, and we'll get two bytecodes as well. But do they match? Well, we can see that there is a strange, not even a hex part inside the compiled creation code, which does not correspond to the on-chain value. So why it happens? Actually, this is the place where the library at least should be put at, but as we haven't provided it to the compiler during the compilation, it just doesn't know what to put inside and places some placeholder instead. And our question is how to verify such contracts. Luckily for us, there is a special section inside the standard JSON output, which is named link references, and which for each unlinked library contains some information how to, where this library address should be placed inside, especially the first byte where it should be placed at and its length which is always 20 bytes. So what we need to do is just to take the specified offset value, when take the next 20 bytes from the on chain code and substitute it inside the compiled code. So do those two byte code match now? Yes, we do. Luckily for us. So here we are just to verify the second contract for today. In general, such replacements, we name them code transformations, and those are some actions which may be applied to the compiled code before or after, during the deployment process, and which changes its bytecode a little bit, but which remains the functionality the same. And there are currently five of such transformations we know about and support. And we've talked about the libraries, but there are four more we don't have time to talk about today. But if you are interested, you might just follow the QR link and see some more information about. So, also, I think the last slide, my presentation title was Verifier Alliance, the first part of it. And I haven't talked about that a lot. But if you are interested in that part as well, you are welcome to the panel which will take place today at 5.30 p.m. where Boxcouts, SourceFind, RoadScan, the members of this Verify Alliance initiative will describe you this a little bit more and talk about verification as well. Thank you. I think that's it. Thank you, Reem. We have questions for Reem? Oh, okay. This is Mike, too. This is pretty awesome, actually. Why is it so difficult to have decentralized contract verification? We use services like BoxScout, Etherscan, but why after all these years is the experience still so bad in general? Well, I think it happens a lot because you have to store this contract somewhere first of all, and the resource file which tries to decentralize the storage process itself and but actually what is more important here were a lot of different formats and all Like different explorers use their own formats to store this data inside source file uses its own data and one of the Vsverify Alliance initiatives idea was to develop the schema in which all contracts should be sorted, and with that actually we are going to have just one database of all verified contracts shared between different verification providers and I hope that will help to increase the decentralization of this data so we're going to share some market dumps for that opens access to the database maybe and hopefully that will work. All right. Shoot. In the verification part for contracts that use library, looks like we are using the reference by code from the deploy by code. Is that safe? Yes, that is safe, because after the compilation we've seen that with 20 bytes, the library address was assumed to be put inside those 20 bytes by the contract code itself, and this address can be anything actually actually so we just take the actual value so we assume that the on-chain code should also contain the library address at this place and take it as our address. So it's actually safe just because this offset was in the standard JSON output section. Alright, thank you so much for this session. Please help me appreciate our amazing speaker, Ren.", + "sources_streamethId": "673446f19dbb7a90e1697a9c", "eventId": "devcon-7", - "slot_start": 1731468600000, - "slot_end": 1731472200000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1ADQtojPz5zGpvoa8L2aH0vcyddEYsowQH6-jcNkUIMU", - "resources_slides": null, + "slot_start": 1731472200000, + "slot_end": 1731475800000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1qtrl6r-TYlWqtL69dNckKj8GBF_OtG2FNSwchnfA6ew", + "resources_slides": "https://drive.google.com/file/d/1A0h36Tj9OdHEsLBKh-SHtYSzoDfnzMcF/view", "speakers": [ - "brianna-chang", - "hong-phuc-dang", - "mario-behling", - "mishari-muqbil" + "lukas-schor", + "derek-rein", + "arik-galansky", + "adam-ceresko" ] }, "vector": [ @@ -842598,6 +840254,9 @@ 0, 0, 0, + 0, + 0, + 0, 6, 0, 0, @@ -843303,6 +840962,7 @@ 0, 0, 0, + 0, 6, 6, 6, @@ -843388,6 +841048,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -843481,6 +841142,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -843490,6 +841152,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -843680,15 +841343,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -843817,6 +841471,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -843883,10 +841538,6 @@ 0, 0, 0, - 2, - 2, - 0, - 0, 0, 0, 0, @@ -843911,12 +841562,12 @@ 0, 2, 0, - 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -843929,44 +841580,35 @@ }, { "session": { - "id": "voting-with-time-commitment", - "sourceId": "7V7QNK", - "title": "Voting with time commitment", - "description": "Token-based voting mechanisms employed by DAOs can encounter three potential problems: plutocracy, Sybil attacks and vote buying. If one were to design a voting mechanism from scratch, how does one ensure that these issues are addressed adequately down the road? This talk aims to provide some intuition for the trade-offs faced when tackling these problems in general, and the role of time commitment in alleviating these issues, in particular.", - "track": "Cryptoeconomics", - "type": "Talk", - "expertise": "Intermediate", + "id": "warren-winter", + "sourceId": "9PWLDW", + "title": "Warren Winter", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Governance", - "Mechanism design", - "voting", - "Governance", - "Mechanism design" - ], - "keywords": [ - "Voting" - ], - "duration": 1534, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "ae548aae445f4151f042de6c1fc3c06468c5bc76bf04a3bf33063c4dbff22215", - "sources_youtubeId": "CYrmSPVuGqs", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673476f19dbb7a90e139f8dc", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731489600000, - "slot_end": 1731491400000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1P434UTSmq4E68DmH8ddDjupGoA0DAAfW5KIZ-umwqaM", - "resources_slides": null, - "speakers": [ - "vijay-mohan" - ] + "slot_start": 1731577500000, + "slot_end": 1731580200000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1KC8s2MGqxozkSjf4Ogbdu9s8XFZLZgkj32ySca-LrnQ", + "resources_slides": "" }, "vector": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 6, @@ -844681,7 +842323,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -844725,7 +842366,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -844814,7 +842454,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -845080,21 +842719,13 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -845284,7 +842915,6 @@ 2, 0, 0, - 0, 2, 0, 0, @@ -845303,48 +842933,26 @@ }, { "session": { - "id": "wallet-infra-improvements-and-building-apps-for-the-next-generation", - "sourceId": "RQAAFS", - "title": "Wallet Infra Improvements, and Building Apps for the Next Generation", - "description": "In this talk I go over infrastructure innovations that are happening in the space right now, how they’re going to be how we bring the next wave of users into crypto, and why right now is the best time to build a consumer app in this space.", - "track": "Usability", - "type": "Lightning Talk", - "expertise": "Beginner", + "id": "web3-poetry-day-1", + "sourceId": "VDMFMR", + "title": "Web3 Poetry - Day 1", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Accessibility", - "Account Abstraction", - "Architecture", - "Frameworks", - "Gas", - "Intents", - "Payment", - "UI/UX" - ], - "keywords": [ - "wallet", - "dapps" - ], - "duration": 538, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "cb6c67ca170aa1455e17b9c03261ca9968617fe7b6d4a82cb7a707ec3e730e25", - "sources_youtubeId": "GUZ33tEdOJw", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "67357fa59dbb7a90e11d9f1f", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67357fa59dbb7a90e11d9f1f.vtt", - "transcript_text": " Cool. Hey guys. So as you can see, I actually changed the title of my talk really last minute. But my name is Meida Kothari and I lead product for all of the wallet related things at Uniswap Labs. So in this talk, I want to cover some of the incredible infrastructure innovations that have happened over recent years, and why I think they're going to bring in the next wave of users in crypto. Forgot my little thing. So when we talk about onboarding the next billion users, often the narrative online is focused on blaming crypto UX for why we haven't had mass adoption yet. But what does better Crypto UX really mean? Like if you look at a lot of our apps, they're already beautiful, they're clear, they're comparable to Web2. And based on recent breakout successes for dApps like Polymarket or TradingBots or whatever you use, I feel like it's safe to say that users are willing to overcome a lot of friction to do what they want. In my opinion, what crypto often lacks is more opinionated products. And what a lot of these infrastructure innovations that I'm about to highlight do is give developers optionality to build more opinionated products. Historically, I think we've put a lot of burden of choice on users for every micro decision, whether it's how they store their seed phrase, whether it's how they pay gas, whether it's what chain they're on, everything else. So I find a more useful framing for the crypto UX problem is how do we make it easier for users to do more with their balance? Before I get into these innovations, I kind of wanted to set the framing and identify core problems to a really quick case study. So for this, I'm going to use my dad as an example. He's a software engineer. He's technologically savvy. He trades a lot on apps like Robinhood and E-Trade for stocks. And he knows what ETH and Bitcoin are and holds some but doesn't really know much about it. He doesn't really know about ERC-20s, L2s, the underlying technology, anything else. So he somehow finds out about a token. This actually happened a few months ago. Let's call it Blah Token. And he asked me for help. And so I helped walk him through how to buy it. So I walk him through the process. He only has crypto in his Robinhood centralized exchange account right now. And so I had to guide him through downloading a non-custodial wallet, explain what that non-custodial word meant and then what a seed phrase was and why it was so important. And then I had to walk him through transferring his ETH from the centralized exchange into this new wallet. But oh, it turns out the asset was on base and he didn't have ETH on base to pay the gas and so I had to explain what bridges were, what L2s were, what gas was, and then we had to bridge his ETH over to base. And then he was finally able to swap the asset using Uniswap. But of course, he had to reserve some of that ETH for gas. That took so many steps, so much education, so much time. And one of the biggest issues I think all of us see in the space right now is onboarding. And my dad specifically in this case was a very high intent technical user. And it took him several minutes to create a wallet and was thrown so many new concepts right up front. Currently, I feel like we're taking high intent users who probably come in wanting to do something, wanting to try out a wallet or buy an asset, whatever. And we end up surfacing so many friction points for them that they end up churning before even completing the action that they initially set out to do. Luckily though, there are so many infrastructure advancements that have been made already that make this whole process easier. There are quite a few, but for the sake of time, I want to focus on four. So the first one is account abstraction, specifically through EIP-7702. So account abstraction and smart contract wallets have been in the zeitgeist for a while now, but one of the biggest issues with most of these implementations is that they're not compatible with EOAs, which are the most popular form of existing wallets. 7702 basically allows EOAs to get some, not all, of the benefits of account abstraction without forcing users to fully migrate into a smart contract wallet. So while this isn't fully comprehensive account abstraction, it does two things that I think are really exciting. One is gas abstraction. So in my dad's case, he wouldn't have to worry about the concept of gas or gas tokens, etc. And another thing it does well is batch transactions. Second is this growth of multi-chain standards. So it's very clear we're going through a multi-chain world. Block spaces are getting more and more specialized. And I feel like we don't give enough credit for how much Ethereum has evolved here. Like I remember just even a few years ago, going from one chain to another was so, so cumbersome. But now with intense space standards like ERC7683, which a couple other speakers before me talked about, and with block-space standards like the super chain that Optimism came up with, and just generally more options for different types of bridging, we have way more of a unified experience for cross-chain interactions that are happening on-chain. So now it's up to dApps to figure out how they want to represent these multi-chain interactions, like how much they want to abstract and how much they don't. But it is so, so much easier. The third thing I want to mention is embedded wallets. So as I mentioned before, onboarding is a huge, huge issue in the space right now. And all the innovation happening around embedded wallets allows users to create wallets in seconds. And also users can use familiar concepts like email or social auth or even pass keys, which are becoming increasingly popular outside of crypto. And sophisticated traders are already taking advantage of this tech when they're using certain telegram bots and things like that, or dApps that are using things like Privy or Capsule under the hood. The last one that I want to talk about is AI. So can't forget that as much as our industry is innovating, so are others. Everyone I think is aware of the massive impact AI is having on consumer apps right now. And I'm pumped about the confluence of that and crypto. And I think there's a ton of stuff here that I could talk about with AI, obviously, but the ones that I want to focus on are context and discovery. So with AI, we can rapidly give users more context and information on the actions that they're trying to do on chain, as well as the assets that they're trying to buy. A big issue, another big issue I see in crypto right now is discovery. And I think fine-tuned AI can really help with this. So clearly right now is the best time to build consumer apps in crypto because of all this infrastructure innovation that's been happening. So I want to end with a call to action to builders, especially new builders. All of these new consumer app devs, I hope you guys build more opinionated apps utilizing this tool chest of infrastructure to give users the most smooth experience possible. Thank you. Thank you, Medha. And we have time for a question. Let's see. Okay. And there you go. You got it. Thanks for the talk. It's very inspiring. I would like to ask maybe a Medha question. If there's a top three app, because we're on a consumer app, or you want to build, what would be the top three you can recommend for us to build on that? Did you say top three dApps I would recommend building? Did I get that right? Oh, yeah, yes. It's more like you want to get your thoughts on that. For sure. That's a great question. Let me think about this. There are so many exciting things right now. I would say one is just like a better information aggregator, so something that's like more personalized based on your trading history and also like assets that you own in a fun way I think that would be really exciting another one is like a multi-chain dApp that's like super completely abstracted so I saw some demos for this that some optimism devs did and I think stuff like that would be really cool where the end user doesn't have to care about the chain that they're on, but they're able to interact with people who are using multiple chains. Yeah. Thanks for the question. Lovely. We have time for another question. Yes. Just speak up. In your opinion, what's the biggest drawback to 7702? That's a great question. I think there's two. One is smart contract risk. So you're introducing just more smart contract risk since you're delegating to a contract. And then the second one is just lock-in. So if we don't standardize the 7702 implementations that wallets do, it could lead to a case where, like, let's say I have a seed phrase on, like, Uniswap wallet, and then it implements 7702, and I get all these, like, cool features like gas abstraction, but then I export that private key and put it into, like, let's say Metamask or Coinbase wallet or whatever, and then suddenly those guarantees are gone if they're not using the same 7702 standard. So that's something I do worry about. Thank you. Thank you for your questions. That's all the time we have for them. Thanks again, Mera. Give it up for Mera.", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731558600000, - "slot_end": 1731559200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1eJwIYkq9W94rsLobC0VKWwi7AVWG4wvUfj48LQs1f8k", - "resources_slides": null, - "speakers": [ - "medha-kothari" - ] + "slot_start": 1731398400000, + "slot_end": 1731402000000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1YlWqriBn80NWKxkgkyOqcJWz0Dtul50_teTrfXcFHJA", + "resources_slides": "" }, "vector": [ 0, @@ -845355,6 +842963,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -846062,7 +843671,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -846149,18 +843757,13 @@ 0, 0, 0, - 2, 0, 0, 0, - 2, 0, 0, 0, - 2, - 2, 0, - 2, 0, 0, 0, @@ -846203,7 +843806,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -846239,7 +843841,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -846325,7 +843926,8 @@ 0, 0, 0, - 2, + 0, + 0, 0, 0, 0, @@ -846665,6 +844267,7 @@ 0, 2, 0, + 0, 2, 0, 0, @@ -846683,49 +844286,26 @@ }, { "session": { - "id": "wallet-ux-panel", - "sourceId": "9HACGK", - "title": "Wallet UX Panel", - "description": "Wallets are here to provide great user experience with robust security. \r\nBringing the top wallet providers (Fireblocks, Safe, Metamask, Coinbase and WalletConnect/Reown) to talk about how Ethereum user UX evolved and how we can make it much better.", - "track": "Usability", - "type": "Panel", - "expertise": "Beginner", - "audience": "Community", + "id": "web3-poetry-day-3", + "sourceId": "GN8LTB", + "title": "Web3 Poetry - Day 3", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "Coordination", - "Custody", - "Account Abstraction", - "standards", - "Account Abstraction", - "Coordination", - "Custody" - ], - "keywords": [ - "Wallets", - "User Experience", - "Standards" - ], - "duration": 3396, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "2f420c5d40f8af815eaa182ef386e07100536e8611142260c83d0014d1c20481", - "sources_youtubeId": "UhQWxhId2Nk", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673446f19dbb7a90e1697a9c", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731472200000, - "slot_end": 1731475800000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1qtrl6r-TYlWqtL69dNckKj8GBF_OtG2FNSwchnfA6ew", - "resources_slides": null, - "speakers": [ - "lukas-schor", - "derek-rein", - "arik-galansky", - "adam-ceresko" - ] + "slot_start": 1731571200000, + "slot_end": 1731574800000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/16EbmLxT3rCfmlW9Mc5CErRzSrp05fgvj7stvb6H3iaY", + "resources_slides": "" }, "vector": [ 0, @@ -846736,6 +844316,7 @@ 0, 0, 0, + 0, 6, 0, 0, @@ -847444,10 +845025,6 @@ 0, 0, 0, - 6, - 6, - 6, - 6, 0, 0, 0, @@ -847530,7 +845107,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -847624,7 +845200,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -847634,7 +845209,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -847954,7 +845528,7 @@ 0, 0, 0, - 2, + 0, 0, 0, 0, @@ -848047,11 +845621,12 @@ 2, 0, 0, + 2, + 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -848064,9 +845639,9 @@ }, { "session": { - "id": "warren-winter", - "sourceId": "9PWLDW", - "title": "Warren Winter", + "id": "web3-poetry-jam", + "sourceId": "V79DXK", + "title": "Web3 Poetry Jam", "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", "track": "Entertainment", "type": "Music", @@ -848079,10 +845654,11 @@ "language": "en", "speakers": [], "eventId": "devcon-7", - "slot_start": 1731577500000, - "slot_end": 1731580200000, + "slot_start": 1731484800000, + "slot_end": 1731486600000, "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1KC8s2MGqxozkSjf4Ogbdu9s8XFZLZgkj32ySca-LrnQ" + "resources_presentation": "https://docs.google.com/presentation/d/1XSH7eVjgLTTnQVBK8jxg0l8v8m1RayeW3qpih1FAFHY", + "resources_slides": "" }, "vector": [ 0, @@ -849395,11 +846971,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, 2, 0, 0, @@ -849421,36 +846992,44 @@ }, { "session": { - "id": "web3-poetry-day-1", - "sourceId": "VDMFMR", - "title": "Web3 Poetry - Day 1", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", + "id": "web3-security-is-embarrasing", + "sourceId": "VNFNDM", + "title": "Web3 Security is Embarrasing", + "description": "The explosive growth of Web3 has brought about innovation, decentralization, and financial opportunity. But let’s be honest—Web3 security is a disaster. In this talk, we’ll confront embarrassing truths: drainer attacks, weak wallet protections, and overlooked vulnerabilities. But we won’t stop there; I’ll share practical fixes to protect users and show how Web3 developers can raise the bar. If we want Web3 to thrive, we have to stop attackers beating us with low-effort attacks. We can do better!", + "track": "Security", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "keywords": [ + "phishing", + "protection" + ], + "tags": [ + "Security", + "Sustainability", + "User Experience" + ], "language": "en", - "speakers": [], + "sources_swarmHash": "cbe9a41a18b6b6f38c6379e9112f03daa416a430143cc5aacad5d75ef4fa3041", + "sources_youtubeId": "4dr7sL42GAw", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "andrew-macpherson" + ], "eventId": "devcon-7", - "slot_start": 1731398400000, - "slot_end": 1731402000000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1YlWqriBn80NWKxkgkyOqcJWz0Dtul50_teTrfXcFHJA" + "slot_start": 1731573000000, + "slot_end": 1731574800000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1lEsNi0su_iRPEMbDkw-4CNthY3CMQvM_6ClpF3sBGNM", + "resources_slides": "https://drive.google.com/file/d/1znq8jpKAR9gwVW753mlGIwpgUrBHEKn5/view" }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -850169,6 +847748,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -850198,6 +847778,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -850213,6 +847794,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -850546,6 +848128,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -850756,92 +848339,11 @@ 0, 0, 0, - 0, - 2, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ] - }, - { - "session": { - "id": "web3-poetry-day-3", - "sourceId": "GN8LTB", - "title": "Web3 Poetry - Day 3", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [], - "eventId": "devcon-7", - "slot_start": 1731571200000, - "slot_end": 1731574800000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/16EbmLxT3rCfmlW9Mc5CErRzSrp05fgvj7stvb6H3iaY" - }, - "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, + 2, 0, 0, 0, @@ -850854,6 +848356,75 @@ 0, 0, 0, + 0 + ] + }, + { + "session": { + "id": "web3-user-research-101", + "sourceId": "7YZGVW", + "title": "Web3 User Research 101", + "description": "Everything you’ve wanted to know about talking to users in web3 and were too afraid to ask! This workshop will give participants a crash course in user research and UX first principles, then guide them through the process of conducting a research project from start to finish - with a focus on web3 users specifically.", + "track": "Usability", + "type": "Workshop", + "expertise": "Beginner", + "audience": "Design", + "featured": true, + "doNotRecord": false, + "tags": [ + "Best Practices", + "User Experience", + "UI/UX", + "User Research", + "Design Thinking", + "101", + "Best Practices", + "Design Thinking", + "UI/UX", + "User Experience", + "User Research" + ], + "keywords": [ + "101" + ], + "duration": 6420, + "language": "en", + "sources_swarmHash": "27a77bf4fed4058eaa45474e58169a82061e00054ba64ee5ceaf5efbec8dd25e", + "sources_youtubeId": "--SIpz6SfAo", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6733f1f63a168eb5353dea86", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "eventId": "devcon-7", + "slot_start": 1731398400000, + "slot_end": 1731405600000, + "slot_roomId": "classroom-c", + "resources_presentation": "https://docs.google.com/presentation/d/1WDegVtKo7rojZIBJT9EVkbEcih7LrcH0QIwcJFOGr6Y", + "resources_slides": "https://drive.google.com/file/d/1vPKxU0_5aZ_feGt6TJfEr2htRifMHvBA/view", + "speakers": [ + "mindy-harrell", + "kristina-mayman" + ] + }, + "vector": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 6, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -851556,6 +849127,10 @@ 0, 0, 0, + 6, + 6, + 0, + 0, 0, 0, 0, @@ -851597,6 +849172,8 @@ 0, 0, 0, + 6, + 0, 0, 0, 0, @@ -851610,6 +849187,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -851637,6 +849215,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -851664,6 +849243,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -851720,6 +849300,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -852117,6 +849698,27 @@ 2, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 2, 0, 0, @@ -852130,30 +849732,53 @@ 0, 0, 0, + 2, + 0, 0 ] }, { "session": { - "id": "web3-poetry-jam", - "sourceId": "V79DXK", - "title": "Web3 Poetry Jam", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", - "audience": "Engineering", + "id": "wen-p2p-electronic-cash-system", + "sourceId": "ZFX3ZF", + "title": "Wen p2p Electronic Cash System?", + "description": "16 years have passed since Bitcoin whitepaper came out. Bitcoin was created as cypherpunk cash replacement. Cash means easy payments. But bitcoin found its PMF as 'digital gold', not as 'digital cash'. What happened to cash? What needs to happen for mass adoption of crypto payments?\r\nWe will go through the history of failed attempts. We'll end up with a hopeful analysis of why it's different in 2024 (spoiler alert: stablecoin adoption, cheap L2s, AA).", + "track": "Real World Ethereum", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Product", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "tags": [ + "Conviction", + "Payment", + "Account Abstraction", + "stablecoin", + "Account Abstraction", + "Conviction", + "Payment" + ], + "keywords": [ + "payments", + "cash", + "stablecoins" + ], + "duration": 1549, "language": "en", - "speakers": [], + "sources_swarmHash": "63b3f30cc56be0d58d80b8295e68bf9bdc088a286e0e5c0e86738ee20d1e2e1c", + "sources_youtubeId": "Kw3rxKFUEKc", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673321cc3a168eb53554b6fe", "eventId": "devcon-7", - "slot_start": 1731484800000, - "slot_end": 1731486600000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1XSH7eVjgLTTnQVBK8jxg0l8v8m1RayeW3qpih1FAFHY" + "slot_start": 1731402000000, + "slot_end": 1731403800000, + "slot_roomId": "stage-6", + "resources_presentation": "https://docs.google.com/presentation/d/1JImpxFx5TF-6ESwxVVo3QOw9b3RrwbHwCF5idb0IZDY", + "resources_slides": "https://drive.google.com/file/d/1dMvlJ2kGv4x2idmRzv87LISnMMOksadw/view", + "speakers": [ + "konrad-urban" + ] }, "vector": [ 0, @@ -852162,9 +849787,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -852320,6 +849942,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -852957,6 +850580,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -853046,6 +850670,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -853325,6 +850950,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -853353,6 +850979,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -853463,6 +851090,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -853470,10 +851098,6 @@ 0, 0, 0, - 0, - 2, - 0, - 0, 2, 0, 0, @@ -853483,55 +851107,60 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 0 ] }, { "session": { - "id": "web3-security-is-embarrasing", - "sourceId": "VNFNDM", - "title": "Web3 Security is Embarrasing", - "description": "The explosive growth of Web3 has brought about innovation, decentralization, and financial opportunity. But let’s be honest—Web3 security is a disaster. In this talk, we’ll confront embarrassing truths: drainer attacks, weak wallet protections, and overlooked vulnerabilities. But we won’t stop there; I’ll share practical fixes to protect users and show how Web3 developers can raise the bar. If we want Web3 to thrive, we have to stop attackers beating us with low-effort attacks. We can do better!", - "track": "Security", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "id": "western-liberalism-to-world-liberalism", + "sourceId": "H8N9CP", + "title": "Western liberalism to world liberalism", + "description": "Western liberalism to world liberalism", + "track": "Real World Ethereum", + "type": "Panel", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, - "keywords": [ - "phishing", - "protection" - ], "tags": [ - "Security", - "Sustainability", - "User Experience" + "Ethereum for Good", + "Free Speech", + "Network State" ], - "language": "en", - "speakers": [ - "andrew-macpherson" + "keywords": [ + "liberalism" ], + "duration": 3360, + "language": "en", + "sources_swarmHash": "e336a0a31600a9db8af0f230c399f40339c116fcbd6c474fe7fe65fcd24f4a0d", + "sources_youtubeId": "CJUy9mkK_SU", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "67382a751b0f83434d8efc5f", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731573000000, - "slot_end": 1731574800000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1lEsNi0su_iRPEMbDkw-4CNthY3CMQvM_6ClpF3sBGNM" + "slot_start": 1731654000000, + "slot_end": 1731657600000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1mFj4uTFAQzEJkPvNyUIUkiMCWsX4MObr3w2Rk-bN8Qw", + "resources_slides": "https://drive.google.com/file/d/1V_VIcPA4EZLIRB235-r19UJRvJcd-7hA/view", + "speakers": [ + "diego-fernandez", + "bruno-macaes", + "vitalik-buterin", + "afra-zhao-wang", + "ahmed-gatnash" + ] }, "vector": [ - 6, - 0, - 0, - 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -853718,6 +851347,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -853959,6 +851589,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -854062,6 +851693,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -854103,6 +851735,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -854273,7 +851906,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -854289,11 +851921,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -854302,6 +851929,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -854321,6 +851949,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -854399,6 +852028,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -854624,7 +852254,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -854840,12 +852469,10 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, + 2, 0, 0, 0, @@ -854858,50 +852485,37 @@ }, { "session": { - "id": "web3-user-research-101", - "sourceId": "7YZGVW", - "title": "Web3 User Research 101", - "description": "Everything you’ve wanted to know about talking to users in web3 and were too afraid to ask! This workshop will give participants a crash course in user research and UX first principles, then guide them through the process of conducting a research project from start to finish - with a focus on web3 users specifically.", - "track": "Usability", - "type": "Workshop", + "id": "what-defi-founders-can-learn-from-web2", + "sourceId": "QB8CGR", + "title": "What DeFi Founders Can Learn From Web2", + "description": "Most DeFi founders come from crypto native backgrounds, but there is much to learn from the operational mechanics and metrics of web2 companies. \r\n\r\nThis talk will be a brief tutorial about web2 business mechanics, specifically SaaS. Concepts like unit economics, CAC, LTV, ARPU and the science of building and growing scalable companies.", + "track": "Real World Ethereum", + "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Design", - "featured": true, + "audience": "Business", + "featured": false, "doNotRecord": false, - "tags": [ - "Best Practices", - "User Experience", - "UI/UX", - "User Research", - "Design Thinking", - "101", - "Best Practices", - "Design Thinking", - "UI/UX", - "User Experience", - "User Research" - ], + "tags": [], "keywords": [ - "101" + "Metrics", + "Unit economics", + "Growth" ], - "duration": 6420, + "duration": 551, "language": "en", - "sources_swarmHash": "27a77bf4fed4058eaa45474e58169a82061e00054ba64ee5ceaf5efbec8dd25e", - "sources_youtubeId": "--SIpz6SfAo", + "sources_swarmHash": "2a6da17012439090d0f3e3a01b43f095606ddc22b273ee597298003c1d4338d5", + "sources_youtubeId": "BYAUg-nibMs", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6733f1f63a168eb5353dea86", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731398400000, - "slot_end": 1731405600000, - "slot_roomId": "classroom-c", - "resources_presentation": "https://docs.google.com/presentation/d/1WDegVtKo7rojZIBJT9EVkbEcih7LrcH0QIwcJFOGr6Y", - "resources_slides": null, + "slot_start": 1731480600000, + "slot_end": 1731481200000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1Gix77PnI2mYDQXanQIb49GstVRHx_-5qwgYKGNsIxzs", + "resources_slides": "https://drive.google.com/file/d/1Hs1_6UZ4ttrG-1KbtyHy-LCTw--sx29L/view", "speakers": [ - "kristina-mayman", - "mindy-harrell" + "mike-silagadze" ] }, "vector": [ @@ -854911,9 +852525,10 @@ 0, 0, 0, + 6, + 0, 0, 0, - 6, 0, 0, 0, @@ -855626,7 +853241,6 @@ 0, 0, 0, - 6, 6, 0, 0, @@ -855672,7 +853286,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -855687,7 +853300,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -855715,7 +853327,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -855743,7 +853354,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -855800,7 +853410,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -856197,7 +853806,7 @@ 0, 0, 0, - 2, + 0, 0, 0, 0, @@ -856226,6 +853835,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -856234,52 +853844,44 @@ 0, 0, 0, - 2, 0, 0 ] }, { "session": { - "id": "wen-p2p-electronic-cash-system", - "sourceId": "ZFX3ZF", - "title": "Wen p2p Electronic Cash System?", - "description": "16 years have passed since Bitcoin whitepaper came out. Bitcoin was created as cypherpunk cash replacement. Cash means easy payments. But bitcoin found its PMF as 'digital gold', not as 'digital cash'. What happened to cash? What needs to happen for mass adoption of crypto payments?\r\nWe will go through the history of failed attempts. We'll end up with a hopeful analysis of why it's different in 2024 (spoiler alert: stablecoin adoption, cheap L2s, AA).", + "id": "what-does-systemic-institutional-collapse-look-like", + "sourceId": "EJUTA3", + "title": "What Does Systemic Institutional Collapse Look Like?", + "description": "And when your governance goes wrong, how bad can it get? Lets look at the modern Middle East as a case study.", "track": "Real World Ethereum", "type": "Talk", - "expertise": "Intermediate", - "audience": "Product", + "expertise": "Beginner", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Conviction", - "Payment", - "Account Abstraction", - "stablecoin", - "Account Abstraction", - "Conviction", - "Payment" - ], - "keywords": [ - "payments", - "cash", - "stablecoins" + "Civil", + "Resistance" ], - "duration": 1549, + "keywords": [], + "duration": 945, "language": "en", - "sources_swarmHash": "63b3f30cc56be0d58d80b8295e68bf9bdc088a286e0e5c0e86738ee20d1e2e1c", - "sources_youtubeId": "Kw3rxKFUEKc", + "sources_swarmHash": "80178f213f2594441524b73d86c9224624118d61c2ed16dd78c3a2efac02b5fc", + "sources_youtubeId": "yoNrWn4mUlE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673321cc3a168eb53554b6fe", + "sources_streamethId": "6737016f1b0f83434dbd8d00", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731402000000, - "slot_end": 1731403800000, - "slot_roomId": "stage-6", - "resources_presentation": "https://docs.google.com/presentation/d/1JImpxFx5TF-6ESwxVVo3QOw9b3RrwbHwCF5idb0IZDY", - "resources_slides": null, + "slot_start": 1731651000000, + "slot_end": 1731652200000, + "slot_roomId": "stage-1", + "resources_presentation": "https://docs.google.com/presentation/d/1Xh5mjcx0whviYN-YFZ-Y0vVWcycLpTyfwEI-cWNKTMk", + "resources_slides": "https://drive.google.com/file/d/1FvCoVK3-Eo9XOqXy8oaYSvl48CNCOpdB/view", "speakers": [ - "konrad-urban" + "ahmed-gatnash" ] }, "vector": [ @@ -856444,8 +854046,6 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, @@ -857006,6 +854606,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -857085,7 +854686,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -857175,7 +854775,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -857456,7 +855055,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -857485,7 +855083,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -857576,6 +855173,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -857604,7 +855203,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -857613,61 +855211,55 @@ 0, 0, 0, - 0, 0 ] }, { "session": { - "id": "western-liberalism-to-world-liberalism", - "sourceId": "H8N9CP", - "title": "Western liberalism to world liberalism", - "description": "Western liberalism to world liberalism", - "track": "Real World Ethereum", - "type": "Panel", - "expertise": "Beginner", - "audience": "Community", + "id": "what-dont-we-know-understanding-security-vulnerabilities-in-snarks", + "sourceId": "NL3A7T", + "title": "What don't we know? Understanding Security Vulnerabilities in SNARKs", + "description": "Zero-knowledge proofs (ZKPs) have evolved from being a theoretical concept providing privacy and verifiability to having practical, real-world implementations, with SNARKs (Succinct Non-Interactive Argument of Knowledge) emerging as one of the most significant innovations. Prior work has mainly focused on designing more efficient SNARK systems and providing security proofs for them. Many think of SNARKs as \"just math,\" implying that what is proven to be correct and secure is correct in practice.", + "track": "Security", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Ethereum for Good", - "Free Speech", - "Network State" + "Security" ], "keywords": [ - "liberalism" + "ZKPs", + "Security" ], - "duration": 3360, + "duration": 1540, "language": "en", - "sources_swarmHash": "e336a0a31600a9db8af0f230c399f40339c116fcbd6c474fe7fe65fcd24f4a0d", - "sources_youtubeId": "CJUy9mkK_SU", + "sources_swarmHash": "73f82e4075b6c3ec2a21aeda21e6795a208a239ac164b2168f95d285ab44d739", + "sources_youtubeId": "njXVouCOBQY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67382a751b0f83434d8efc5f", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": "6736d27174749a4b8926b8b4", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d27174749a4b8926b8b4.vtt", + "transcript_text": " Hello, thanks for the introduction. So today I'm going to talk about vulnerabilities, mainly in the implementation of Snarks or ZK-Snarks, and also on what can go wrong when we deploy SNARKs in production. This is joint work with collaborators from TUM, the Ethereum Foundation, ZK Security, the Scroll Foundation and also Imperial College London. Okay, so let's start. Okay, what is the state of ZK-AP applications today? We have ZK-K rollups that have become very popular in the last two years. They have more than 5 billion USD in TVL in them. We have Zcash, which is a payment system. It was deployed, I think, the first version around 2015, 2016. We have many ZK applications both for infrastructure such as ZK bridges but also for private payments, for using wallets without having to use seed phrases like ZK login. We have private programmable L1s and L2s like Mina, Aleo, Azdec, and also we have some off-chain applications. And although all of those systems have been deployed, we haven't seen any major exploits like the DAO exploit we had in smart contracts. And although we haven't seen any exploit, there have been bugs in the systems deployed in production. So Zcash had a vulnerability. Sorry for the pictures, might be a bit small, but I will go through them. So Zcash had a vulnerability for, I think, more than a year in it. People from within the Zcash, found it, and patched it. Then one of the most popular mixers had a vulnerability that if someone exploited, it could have basically drained the smart contracts of that protocol. One of the most popular ZK rollups had a major vulnerability that someone could again potentially could have exploited and get everything out of that roll-up. And also I would say that in audits in ZKE protocols, even in top-notch protocols, if you compare it with top-notch smart contract protocols, the ratio of critical vulnerabilities, it's even higher. So there are many vulnerabilities. And people for many years suggested that ZKPs are very difficult and very hard, and not many people actually understand them. And also they have suggested that to exploit a ZK protocol, it's much more difficult to exploit, for example, a smart contract vulnerability. I would say the first one is not true anymore, right? Because if you see the number of presentations in ZK in DEF CON this year and compared with three years ago, we have an exponential increase. And also, although it might be true that some ZK vulnerabilities is difficult to exploit, I would say that some of them are pretty simple. And for example, here we have a circumcircuit. It's a very old circuit, but anyone who has written a circumcircuit could probably understand what's going on here. And still there are such vulnerabilities in ZK protocols that I think are pretty easy to exploit. So there is a huge risk if there are still vulnerabilities in deployed protocols to be able to exploit them at some point. Okay, so let's start with explaining what are the properties of a ZKE protocol. We have knowledge tenderness, which basically means that a dishonest prover cannot convince a verifier of an invalid statement except with a negligible probability. We have perfect completeness, which means that if you have a valid statement, a prover will always be able to convince an honest verifier of the correctness of that statement. And also we have zero knowledge, which means that the proof pi that we produce with the zero knowledge proof does not reveal anything about the witness we are proving. So what is our threat model in a ZK world, right? We have three adversaries. So what is our threat model in the ZK world, right? We have three adversaries. We have the network adversary who observes the system and its public values but cannot interact with the system. We have the adversarial user, which basically is able to submit some inputs for proof generation in an honest and non-malicious prover. And finally, we have the adversarial prover, which is the most common thread model, and it's our thread model when we actually need the ZK property, right? But I would argue that even if we don't need it, if we want to have a fully permissionless system, then that's the adversary we have. And it has the ability to produce proofs and has the ability basically to do everything to try to trick the verifier. To give you an example of what I mean with the second category, because it might be a bit confusing, consider ZK rollups at the moment, right? Where we have a single centralized trusted L2 node that is both the sequencer and the prover so users can only submit the transactions there and then that centralized node will produce a proof so in that case we have an adversarial user basically and what can be the impact of ZK vulnerabilities. So, we might be able to try to break sadness, which means that a prover can convince a verifier of a false statement, and that could result in basically, for example, in ZK roll-up, to get all the funds out of it. We can break completeness, which means that a verifier cannot verify proofs, or basically that the prover might generate verify proofs, or basically that the prover might generate invalid proofs, right? And for example, such a vulnerability could basically have a high impact in the liveness of ZQ rollups. And we might also break zero knowledge, which means we have some information leakage. Okay, so what we did is we analyzed 141 bugs and vulnerabilities from audit reports, from vulnerability disclosures, and from bug trackers. And our goal was to split those vulnerabilities in layers and understand what can go wrong in each layer and also create a taxonomy of vulnerabilities. So let's start with that figure. So in the real work, non-SNARK work, we'll have a relation, a specification, some idea that we want to actually create a ZKP about. And we might have some public and private inputs. So the first step is to manually encode that specification, that idea, in a circuit and get the circuit implementation. So we figure out that in that level, it's where most of the vulnerabilities happen. And the main reason, in our understanding, is because it's confusing for most developers to write circuits because they have to think both about computation and also about constraints and they might do very aggressive optimizations there and they might try to apply some tricks and that typically leads to vulnerabilities. So we identified three main vulnerabilities. Other constraint vulnerabilities which means that you forgot some constraints or some of your variables are partially constrained. And that typically leads to sadness vulnerabilities, which is the worst vulnerability that can happen in a ZK system. Then we have over-constrained vulnerabilities, which is the exact opposite. That most typically will lead to completeness issues. And we also have computation or hint errors, which is just on the computation part. And accordingly, you might have messed up constraints, but the root cause was in the computation part. So we did a complete root cause analysis and I will share with you a QR code for our paper to look into examples and to look on how you can fix some of those vulnerabilities, etc. But very briefly here, we have categorized them in three main root cause classes. First is that when developing circuits circuits we have a different programming model and that could lead to many vulnerabilities. Secondly we observed that the root cause of vulnerabilities were optimizations and also having cryptography at the outer layer and in very low level DSLs that could introduce many vulnerabilities and also common errors like in any software, like specification issues or API misuses, etc. So, the next layer is the front-end, which is basically composed from two components, a compiler and a witness generator. The compiler will take the circuit and will try to produce an intermediate representation that it's on what our proof system works on top, for example R1CS, and then the witness generator will take the circuit, will take the public and the private variables we have, and it will produce a witness. And the next one is a backend. The backend is composed of three main functions, setup, proving, and verification, and things can go wrong in all those functions. So the vulnerabilities we identified here in the frontend is incorrect constraint compilation and errors in witness generation and in the previous presentation we saw how things can go bad there and it's very critical to actually trust and to be able to have correct implementations of front-ends and in the back-end the situation is quite similar. From our data we found out that unsafe verifier is a very common issue and can lead to major vulnerabilities. Let's go to the next one. The next one and the last one is the integration layer, which is basically you can think of one and the last one is the integration layer, which is basically, you can think of it in the blockchain space as the JavaScript that is responsible to run your prover client side and create the proof, and also the smart contract that consumes that proof and calls the verifier you have implemented or it was produced automatically and try to do some things. And we had some very interesting vulnerabilities in that layer. I want to focus in the first one, which is passing unchecked data. And what does that mean? Sometimes, as already said, we might try to do some optimizations in the circuits. And for example, one thing that is pretty common is for people to say, OK, in that circuit, let's have some implicit assumptions that our inputs are in a specific range. And then delegate that check to the actual code that will call the verifier. So in that example, we forgot to do such a check, and that could lead to major vulnerabilities then in our infrastructure. And in the last year or so, there has been a major change in some architectures, where instead of circuits, we have ZKVM circuits right so the developers now only care about writing some program typical in the high-level language like Rust and then compile that program and giving it as an input to ZKVM. Still, circuit bugs can happen in the ZKVM itself, and I would say a subtle new threat here is traditional compilation errors that might happen to the Rust compiler, for example. That could lead to have invalid proofs. So that's something that people should take into consideration when using ZKVM cells. So another way to see what we currently described is in a hierarchical way. And here I have an example of all the stack when we use the circum-programming languages and SNAC-JS with GROT16 Azure-proof system. I have two new layers here. One is full arithmetic elliptic curves, which have nothing to do with ZK, but when we construct and implement a proof system, we have to have such a very efficient library and things can go wrong there. And also things can go wrong in the hardware, in the operating system, in the blockchain we are using, right? So you should always be very, basically think about what you are going to use and apply all traditional best security practices we know from other fields. And one last thing is that in the proof system, there could be errors there, there could be errors in the initial description, in the papers of proof systems. So if something goes wrong there, it doesn't matter if you have formally verified the circumcircuits, if you have the best backend or frontend, it could be exploitable. And that basically it's true for any layer. So if your frontend or the backend it's vulnerable, then even if you have formally verify your circuits, they could be exploitable. Okay, so we did that analysis and now I want to present some of the results. So we categorize the bugs in all those layers and also based on their impacts. And we can see that circuits was the number one threat in the whole infrastructure of using ZKPs. And also most of the vulnerabilities can result in soundness issues. So what can we do? Fortunately, there has been a lot of development and a lot of research of creating security tools for ZK circuits, specifically for ZK EVMs or ZK EVMs and also in the last month two new papers were published and tools, Circus which was presented in the previous talk and also MTZK which is great because such novel tools can detect infrastructure bugs in circuits in ZPs, but I would say there are still a lot of work that needs to be done. For example, most of the circuit tools, they target a specific DSL, and also they typically target a specific vulnerability class. And then we have some tools like static analysis tools like circumspect, which might have tons of false positives. And then we have some really nice tools and very novel tools like PyCos that try to formally verify and find any under-constrained issues in the circuits but unfortunately those tools do not scale that well. So there's a lot of space to do to have innovation and try to build better tools and here I have a list of security tools. You can scan that QR code and it's basically GitHub repository. If I don't have any of the tools that you are know of, please add them. And yeah, we need to do a better job here. And one also major issue I see in this space is that we don't have good tools for writing tests. And most of the code base that user knowledge proves is that we don't have good tools for writing tests. And most of the code base that user knowledge proves are unfortunately not that good in having complete test suites and try to understand in the testing part both soundness and completeness issues. Okay, so in conclusion, why do we have bugs? One of the reasons is that because ZKPs are not just mods. There are implementations and many things can go wrong in those implementations. Why else? This is a quote from Ron Rivest in a completely different context, but I really like it. And I would say that in the ZK space, unfortunately, we have given to the poor developer enough rope with which to hunt himself. Circuit languages are typically very low level, so they don't have good abstractions for developers to write safe code. We expose a lot of cryptography to the outer layers, and also there is a lot of complexity and a different threat model than what developers are used to. And there is a lack of specification throughout the whole infrastructure and the whole stack for using GKPs, so we need to write more specifications. So what can we do? Basically, we have to negate everything from the previous slide. We need more layering resources, which I think we are doing a great job in that as a community. We need to write specifications and get used to write specifications because if you have complete specification, then we know exactly what text we should put in each layer and what vulnerabilities can happen in each layer and that's how we can help developers but also auditors into doing a better and what vulnerabilities can happen in its layer. And that's how we can help developers, but also auditors into doing a better job in trying to find vulnerabilities in those systems. We need easier and more secure programming languages, which I think it's kind of where we are heading to. For example, Noir is a great language that it's much more safer than writing circuits in Siricom or Hello2. But in some cases, people will still need to write circuits in Hello2 or Siricom because they need to do some specific optimizations or they need to deploy to specific blockchains, for example. And then we need better testing and security tooling from simple frameworks to write unit tests, to do property-based testing, to formal verification. And not just formal verification. So that's it. I have there a link with our paper where you can find many examples and how to try to avoid some of those pitfalls. And we also have a blog post that we publish many blogs about ZK security in general. So, thanks a lot. Thank you, Stefanos. This was enlightening. All right, people. As usual, you can ask your questions here. We're going to go through them in order. And let's take the first one. Several times in your slides, you referred to witnesses. What are witnesses? Are those private inputs? So a witness, I would say, it's composed from both the private inputs, the public inputs, and all the intermediate steps and the outputs for our circuit. So I will say it's a trace that then we create a proof about that trace. Thank you. All right, next question. I've put a bunch of those, so do ask serious questions, please. We have a bit of time. What is your favorite bug ever? What is the most interesting bug you've ever found? That's a very good question. I think I can't pick one, but I would say typically the simple bugs, right? For example, the bug I have in one of the first slides, that could have led to basically draining one of the major mixers we have in the space. But also bugs that have to do with using cryptography in the circuits. And typically due to some optimizations or some logic errors in those circuits, there could be like pretty interesting exploits that someone can do. Pretty cool, thank you. Alright, the next one. You're doing research, looking for bugs, you're paying your bills and buying your foods by finding bugs. Can we consider you have a bug-based diet? We can consider that, yeah. I hope that in some future world there won't be that many bugs and maybe I will have a better diet. But unfortunately at the moment we have tons of bugs. Fantastic, thank you. What are your thoughts on TEE? Okay, that's the question of DevCon, I feel. Everyone asking that question. I would say you have different security assumptions when you use TEs, right? I think it can work along with ZKEs, but they can't replace ZKEs. You have a much weaker threat model when you are working with TEs. So, yeah, people should use them when they have to use them, but also don't trust them like a black box that will do everything for you and you are secure if you use a TE. Wonderful, thank you. What can we do to make more secure languages like Noir faster compared to Sercom, particularly with respect to gas cost? How do we make it more efficient? So gas cost, I will say that it's independent kind of what programming language you're using. It's more about like what proof system you are using, right? And if that proof system has very efficient verification, that's the main factor. But also more general in the circuit layer. I would say that indeed someone, if you don't like really use unsafe in Noir, which then breaks the whole purpose of using Noir, you can write more optimized circuits at that point in Siricom. But I would hope that we will have major advances in compilers for ZKPs, and then we can have compiler optimizations that are very strong, like in any other field, and rely on those optimizations to get pretty optimized circuits. But if we do that, then we need very, very solid testing for our compilers to detect any issues in those optimizations. Thank you. You mentioned in your slide that, you know, sometimes we give too much rope to the users to hang themselves with. I think the design in ZK circuits is difficult, but also using them is not very commonplace, right? A lot of users are not used to using this kind of systems and what goes in, what goes out, what you can do with them, what is safe behavior to do that. You mentioned learning resources. Do you think there's something to do with users also to explain to them what are the benefits and what should be done? Or is it entirely on the app developer? Yeah, yeah. That's a great question. I think as researchers, it's our responsibility to create learning resources that are easy to follow by almost everyone. So I think we are doing kind of a good job there. For example, at ZK Security, we published a book on Hello2. And basically, many teams in that space develop pretty nice learning resources. And what I really like is that they also have a section about security vulnerabilities and what you should look at when you use a specific DSL. So, yeah, I think we are doing a great job on that. And in a few years, it will be even better. Fantastic. Stefanos, thank you. We're over time. So thank you for your talk. Thank you. Thanks a lot.", "eventId": "devcon-7", - "slot_start": 1731654000000, - "slot_end": 1731657600000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1mFj4uTFAQzEJkPvNyUIUkiMCWsX4MObr3w2Rk-bN8Qw", - "resources_slides": null, + "slot_start": 1731643200000, + "slot_end": 1731645000000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1b-4F9L2PRDflpHb2iAzeGwsuH6cvqfh3FMJsnOPZOtc", + "resources_slides": "https://drive.google.com/file/d/1A8f_scWEPOUSkiJ5gVpj0s-RXLBtPn2P/view", "speakers": [ - "afra-zhao-wang", - "ahmed-gatnash", - "bruno-macaes", - "diego-fernandez", - "vitalik-buterin" + "stefanos-chaliasos" ] }, "vector": [ + 6, 0, 0, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -857854,7 +855446,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -858097,7 +855688,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -858201,7 +855791,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -858243,7 +855832,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -858384,11 +855972,11 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -858413,6 +856001,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -858439,7 +856028,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -858459,7 +856047,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -858538,8 +856125,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -858981,10 +856566,12 @@ 0, 0, 0, + 2, + 0, + 0, 0, 0, 0, - 2, 0, 0, 0, @@ -858997,37 +856584,42 @@ }, { "session": { - "id": "what-defi-founders-can-learn-from-web2", - "sourceId": "QB8CGR", - "title": "What DeFi Founders Can Learn From Web2", - "description": "Most DeFi founders come from crypto native backgrounds, but there is much to learn from the operational mechanics and metrics of web2 companies. \r\n\r\nThis talk will be a brief tutorial about web2 business mechanics, specifically SaaS. Concepts like unit economics, CAC, LTV, ARPU and the science of building and growing scalable companies.", - "track": "Real World Ethereum", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Business", + "id": "what-is-the-status-of-epbs-and-its-future-iterations", + "sourceId": "3MUYVQ", + "title": "What is the status of ePBS and its future iterations", + "description": "We will go over the implementation and research status of ePBS (EIP-7732) and the future iterations and mechanisms it enables.We will describe in detail the main benefits to the protocol that are not directly related to any PBS system. We will showcase the tradeoffs that are present on each design decision and how the separation of validation between the consensus and execution layer in fact frees research with less technical debt and more independent mechanisms for future upgrades.", + "track": "Core Protocol", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], + "tags": [ + "PBS", + "fork", + "choice", + "PBS" + ], "keywords": [ - "Metrics", - "Unit economics", - "Growth" + "PBS", + "consensus", + "fork-choice" ], - "duration": 551, + "duration": 1483, "language": "en", - "sources_swarmHash": "2a6da17012439090d0f3e3a01b43f095606ddc22b273ee597298003c1d4338d5", - "sources_youtubeId": "BYAUg-nibMs", + "sources_swarmHash": "f5e5ad50e09c6e119cd1571e3ae0c3a54ebaf8460e392a4fd0abc96593c84a31", + "sources_youtubeId": "w-VwYHq1FA4", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673438b39dbb7a90e14a7f53", "eventId": "devcon-7", - "slot_start": 1731480600000, - "slot_end": 1731481200000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1Gix77PnI2mYDQXanQIb49GstVRHx_-5qwgYKGNsIxzs", - "resources_slides": null, + "slot_start": 1731472200000, + "slot_end": 1731474000000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1hihFfnTMBS1Mmp0aS3oHwzA-PX43SVRFqlRfNkbtOwU", + "resources_slides": "https://drive.google.com/file/d/1Un8pdIcw2kPvOil4YyRjg4yXkkFBWG3_/view", "speakers": [ - "mike-silagadze" + "potuz" ] }, "vector": [ @@ -859035,8 +856627,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -859755,15 +857345,9 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -859826,6 +857410,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -860214,6 +857799,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -860327,6 +857913,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -860345,14 +857932,14 @@ 0, 0, 0, + 2, 0, 0, - 2, 0, + 2, 0, 0, 0, - 2, 0, 0, 0, @@ -860367,38 +857954,40 @@ }, { "session": { - "id": "what-does-systemic-institutional-collapse-look-like", - "sourceId": "EJUTA3", - "title": "What Does Systemic Institutional Collapse Look Like?", - "description": "And when your governance goes wrong, how bad can it get? Lets look at the modern Middle East as a case study.", - "track": "Real World Ethereum", + "id": "whats-going-into-the-pectra-upgrade", + "sourceId": "9WTJRX", + "title": "What’s Going Into the Pectra Upgrade?", + "description": "A talk explaining the core EIPs going into the Pectra upgrade and the core EIPs still TBD for inclusion in Pectra. The talk will also touch on Pectra timing and fork scoping for the next hard fork after Pectra. Finally, the talk will share insights about the governance process of Ethereum in light of Pectra and takeaways about the priorities of Ethereum protocol developers.", + "track": "Core Protocol", "type": "Talk", "expertise": "Beginner", "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Civil", - "Resistance" + "fork", + "hard" ], - "keywords": [], - "duration": 945, + "keywords": [ + "Pectra", + "Governance", + "Hard forks" + ], + "duration": 1515, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "9c19d1c251eda5ae03524a901f817d1fb823edb289430285e2f1c606f649b80f", + "sources_youtubeId": "ufIDBCgdGwY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6737016f1b0f83434dbd8d00", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731651000000, - "slot_end": 1731652200000, + "slot_start": 1731391200000, + "slot_end": 1731393000000, "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1Xh5mjcx0whviYN-YFZ-Y0vVWcycLpTyfwEI-cWNKTMk", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1aEeDer7GTTFvo4hdDKqx3zqCVAtFdk2XqVNuiRomMTc", + "resources_slides": "https://drive.google.com/file/d/13HFL9PFs43pDYmamE0T0Pi_eMAOwDr0F/view", "speakers": [ - "ahmed-gatnash" + "christine-kim" ] }, "vector": [ @@ -860406,8 +857995,6 @@ 0, 0, 0, - 0, - 0, 6, 0, 0, @@ -861125,12 +858712,11 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -861581,6 +859167,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -861696,9 +859283,6 @@ 0, 0, 2, - 2, - 0, - 0, 0, 0, 0, @@ -861738,43 +859322,51 @@ }, { "session": { - "id": "what-dont-we-know-understanding-security-vulnerabilities-in-snarks", - "sourceId": "NL3A7T", - "title": "What don't we know? Understanding Security Vulnerabilities in SNARKs", - "description": "Zero-knowledge proofs (ZKPs) have evolved from being a theoretical concept providing privacy and verifiability to having practical, real-world implementations, with SNARKs (Succinct Non-Interactive Argument of Knowledge) emerging as one of the most significant innovations. Prior work has mainly focused on designing more efficient SNARK systems and providing security proofs for them. Many think of SNARKs as \"just math,\" implying that what is proven to be correct and secure is correct in practice.", - "track": "Security", - "type": "Talk", + "id": "whats-in-your-dose", + "sourceId": "BRUGUL", + "title": "What's In Your Dose?", + "description": "Pandemic responses require robust technical tools such as molecular diagnostic tests, novel immunization reagents, and recovery surveillance tools. Pandemic responses depend on public trust in these tools and their good faith deployment. Verification strategies to enhance public trust and cooperation will improve the performance of molecular tools in future pandemics.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Security" + "Decentralization", + "Public good" ], "keywords": [ - "ZKPs", - "Security" + "Molecular", + "Biology.", + "", + "Public", + "Health.", + "", + "Public", + "Trust." ], - "duration": 1540, + "duration": 907, "language": "en", - "sources_swarmHash": "73f82e4075b6c3ec2a21aeda21e6795a208a239ac164b2168f95d285ab44d739", - "sources_youtubeId": "njXVouCOBQY", + "sources_swarmHash": "be8d4b2923608f8527fd4ad82d690569c33c76afabc4fce1d2968e8cd0993e26", + "sources_youtubeId": "F_SxA6W5hzQ", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6736d27174749a4b8926b8b4", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6736d27174749a4b8926b8b4.vtt", - "transcript_text": " Hello, thanks for the introduction. So today I'm going to talk about vulnerabilities, mainly in the implementation of Snarks or ZK-Snarks, and also on what can go wrong when we deploy SNARKs in production. This is joint work with collaborators from TUM, the Ethereum Foundation, ZK Security, the Scroll Foundation and also Imperial College London. Okay, so let's start. Okay, what is the state of ZK-AP applications today? We have ZK-K rollups that have become very popular in the last two years. They have more than 5 billion USD in TVL in them. We have Zcash, which is a payment system. It was deployed, I think, the first version around 2015, 2016. We have many ZK applications both for infrastructure such as ZK bridges but also for private payments, for using wallets without having to use seed phrases like ZK login. We have private programmable L1s and L2s like Mina, Aleo, Azdec, and also we have some off-chain applications. And although all of those systems have been deployed, we haven't seen any major exploits like the DAO exploit we had in smart contracts. And although we haven't seen any exploit, there have been bugs in the systems deployed in production. So Zcash had a vulnerability. Sorry for the pictures, might be a bit small, but I will go through them. So Zcash had a vulnerability for, I think, more than a year in it. People from within the Zcash, found it, and patched it. Then one of the most popular mixers had a vulnerability that if someone exploited, it could have basically drained the smart contracts of that protocol. One of the most popular ZK rollups had a major vulnerability that someone could again potentially could have exploited and get everything out of that roll-up. And also I would say that in audits in ZKE protocols, even in top-notch protocols, if you compare it with top-notch smart contract protocols, the ratio of critical vulnerabilities, it's even higher. So there are many vulnerabilities. And people for many years suggested that ZKPs are very difficult and very hard, and not many people actually understand them. And also they have suggested that to exploit a ZK protocol, it's much more difficult to exploit, for example, a smart contract vulnerability. I would say the first one is not true anymore, right? Because if you see the number of presentations in ZK in DEF CON this year and compared with three years ago, we have an exponential increase. And also, although it might be true that some ZK vulnerabilities is difficult to exploit, I would say that some of them are pretty simple. And for example, here we have a circumcircuit. It's a very old circuit, but anyone who has written a circumcircuit could probably understand what's going on here. And still there are such vulnerabilities in ZK protocols that I think are pretty easy to exploit. So there is a huge risk if there are still vulnerabilities in deployed protocols to be able to exploit them at some point. Okay, so let's start with explaining what are the properties of a ZKE protocol. We have knowledge tenderness, which basically means that a dishonest prover cannot convince a verifier of an invalid statement except with a negligible probability. We have perfect completeness, which means that if you have a valid statement, a prover will always be able to convince an honest verifier of the correctness of that statement. And also we have zero knowledge, which means that the proof pi that we produce with the zero knowledge proof does not reveal anything about the witness we are proving. So what is our threat model in a ZK world, right? We have three adversaries. So what is our threat model in the ZK world, right? We have three adversaries. We have the network adversary who observes the system and its public values but cannot interact with the system. We have the adversarial user, which basically is able to submit some inputs for proof generation in an honest and non-malicious prover. And finally, we have the adversarial prover, which is the most common thread model, and it's our thread model when we actually need the ZK property, right? But I would argue that even if we don't need it, if we want to have a fully permissionless system, then that's the adversary we have. And it has the ability to produce proofs and has the ability basically to do everything to try to trick the verifier. To give you an example of what I mean with the second category, because it might be a bit confusing, consider ZK rollups at the moment, right? Where we have a single centralized trusted L2 node that is both the sequencer and the prover so users can only submit the transactions there and then that centralized node will produce a proof so in that case we have an adversarial user basically and what can be the impact of ZK vulnerabilities. So, we might be able to try to break sadness, which means that a prover can convince a verifier of a false statement, and that could result in basically, for example, in ZK roll-up, to get all the funds out of it. We can break completeness, which means that a verifier cannot verify proofs, or basically that the prover might generate verify proofs, or basically that the prover might generate invalid proofs, right? And for example, such a vulnerability could basically have a high impact in the liveness of ZQ rollups. And we might also break zero knowledge, which means we have some information leakage. Okay, so what we did is we analyzed 141 bugs and vulnerabilities from audit reports, from vulnerability disclosures, and from bug trackers. And our goal was to split those vulnerabilities in layers and understand what can go wrong in each layer and also create a taxonomy of vulnerabilities. So let's start with that figure. So in the real work, non-SNARK work, we'll have a relation, a specification, some idea that we want to actually create a ZKP about. And we might have some public and private inputs. So the first step is to manually encode that specification, that idea, in a circuit and get the circuit implementation. So we figure out that in that level, it's where most of the vulnerabilities happen. And the main reason, in our understanding, is because it's confusing for most developers to write circuits because they have to think both about computation and also about constraints and they might do very aggressive optimizations there and they might try to apply some tricks and that typically leads to vulnerabilities. So we identified three main vulnerabilities. Other constraint vulnerabilities which means that you forgot some constraints or some of your variables are partially constrained. And that typically leads to sadness vulnerabilities, which is the worst vulnerability that can happen in a ZK system. Then we have over-constrained vulnerabilities, which is the exact opposite. That most typically will lead to completeness issues. And we also have computation or hint errors, which is just on the computation part. And accordingly, you might have messed up constraints, but the root cause was in the computation part. So we did a complete root cause analysis and I will share with you a QR code for our paper to look into examples and to look on how you can fix some of those vulnerabilities, etc. But very briefly here, we have categorized them in three main root cause classes. First is that when developing circuits circuits we have a different programming model and that could lead to many vulnerabilities. Secondly we observed that the root cause of vulnerabilities were optimizations and also having cryptography at the outer layer and in very low level DSLs that could introduce many vulnerabilities and also common errors like in any software, like specification issues or API misuses, etc. So, the next layer is the front-end, which is basically composed from two components, a compiler and a witness generator. The compiler will take the circuit and will try to produce an intermediate representation that it's on what our proof system works on top, for example R1CS, and then the witness generator will take the circuit, will take the public and the private variables we have, and it will produce a witness. And the next one is a backend. The backend is composed of three main functions, setup, proving, and verification, and things can go wrong in all those functions. So the vulnerabilities we identified here in the frontend is incorrect constraint compilation and errors in witness generation and in the previous presentation we saw how things can go bad there and it's very critical to actually trust and to be able to have correct implementations of front-ends and in the back-end the situation is quite similar. From our data we found out that unsafe verifier is a very common issue and can lead to major vulnerabilities. Let's go to the next one. The next one and the last one is the integration layer, which is basically you can think of one and the last one is the integration layer, which is basically, you can think of it in the blockchain space as the JavaScript that is responsible to run your prover client side and create the proof, and also the smart contract that consumes that proof and calls the verifier you have implemented or it was produced automatically and try to do some things. And we had some very interesting vulnerabilities in that layer. I want to focus in the first one, which is passing unchecked data. And what does that mean? Sometimes, as already said, we might try to do some optimizations in the circuits. And for example, one thing that is pretty common is for people to say, OK, in that circuit, let's have some implicit assumptions that our inputs are in a specific range. And then delegate that check to the actual code that will call the verifier. So in that example, we forgot to do such a check, and that could lead to major vulnerabilities then in our infrastructure. And in the last year or so, there has been a major change in some architectures, where instead of circuits, we have ZKVM circuits right so the developers now only care about writing some program typical in the high-level language like Rust and then compile that program and giving it as an input to ZKVM. Still, circuit bugs can happen in the ZKVM itself, and I would say a subtle new threat here is traditional compilation errors that might happen to the Rust compiler, for example. That could lead to have invalid proofs. So that's something that people should take into consideration when using ZKVM cells. So another way to see what we currently described is in a hierarchical way. And here I have an example of all the stack when we use the circum-programming languages and SNAC-JS with GROT16 Azure-proof system. I have two new layers here. One is full arithmetic elliptic curves, which have nothing to do with ZK, but when we construct and implement a proof system, we have to have such a very efficient library and things can go wrong there. And also things can go wrong in the hardware, in the operating system, in the blockchain we are using, right? So you should always be very, basically think about what you are going to use and apply all traditional best security practices we know from other fields. And one last thing is that in the proof system, there could be errors there, there could be errors in the initial description, in the papers of proof systems. So if something goes wrong there, it doesn't matter if you have formally verified the circumcircuits, if you have the best backend or frontend, it could be exploitable. And that basically it's true for any layer. So if your frontend or the backend it's vulnerable, then even if you have formally verify your circuits, they could be exploitable. Okay, so we did that analysis and now I want to present some of the results. So we categorize the bugs in all those layers and also based on their impacts. And we can see that circuits was the number one threat in the whole infrastructure of using ZKPs. And also most of the vulnerabilities can result in soundness issues. So what can we do? Fortunately, there has been a lot of development and a lot of research of creating security tools for ZK circuits, specifically for ZK EVMs or ZK EVMs and also in the last month two new papers were published and tools, Circus which was presented in the previous talk and also MTZK which is great because such novel tools can detect infrastructure bugs in circuits in ZPs, but I would say there are still a lot of work that needs to be done. For example, most of the circuit tools, they target a specific DSL, and also they typically target a specific vulnerability class. And then we have some tools like static analysis tools like circumspect, which might have tons of false positives. And then we have some really nice tools and very novel tools like PyCos that try to formally verify and find any under-constrained issues in the circuits but unfortunately those tools do not scale that well. So there's a lot of space to do to have innovation and try to build better tools and here I have a list of security tools. You can scan that QR code and it's basically GitHub repository. If I don't have any of the tools that you are know of, please add them. And yeah, we need to do a better job here. And one also major issue I see in this space is that we don't have good tools for writing tests. And most of the code base that user knowledge proves is that we don't have good tools for writing tests. And most of the code base that user knowledge proves are unfortunately not that good in having complete test suites and try to understand in the testing part both soundness and completeness issues. Okay, so in conclusion, why do we have bugs? One of the reasons is that because ZKPs are not just mods. There are implementations and many things can go wrong in those implementations. Why else? This is a quote from Ron Rivest in a completely different context, but I really like it. And I would say that in the ZK space, unfortunately, we have given to the poor developer enough rope with which to hunt himself. Circuit languages are typically very low level, so they don't have good abstractions for developers to write safe code. We expose a lot of cryptography to the outer layers, and also there is a lot of complexity and a different threat model than what developers are used to. And there is a lack of specification throughout the whole infrastructure and the whole stack for using GKPs, so we need to write more specifications. So what can we do? Basically, we have to negate everything from the previous slide. We need more layering resources, which I think we are doing a great job in that as a community. We need to write specifications and get used to write specifications because if you have complete specification, then we know exactly what text we should put in each layer and what vulnerabilities can happen in each layer and that's how we can help developers but also auditors into doing a better and what vulnerabilities can happen in its layer. And that's how we can help developers, but also auditors into doing a better job in trying to find vulnerabilities in those systems. We need easier and more secure programming languages, which I think it's kind of where we are heading to. For example, Noir is a great language that it's much more safer than writing circuits in Siricom or Hello2. But in some cases, people will still need to write circuits in Hello2 or Siricom because they need to do some specific optimizations or they need to deploy to specific blockchains, for example. And then we need better testing and security tooling from simple frameworks to write unit tests, to do property-based testing, to formal verification. And not just formal verification. So that's it. I have there a link with our paper where you can find many examples and how to try to avoid some of those pitfalls. And we also have a blog post that we publish many blogs about ZK security in general. So, thanks a lot. Thank you, Stefanos. This was enlightening. All right, people. As usual, you can ask your questions here. We're going to go through them in order. And let's take the first one. Several times in your slides, you referred to witnesses. What are witnesses? Are those private inputs? So a witness, I would say, it's composed from both the private inputs, the public inputs, and all the intermediate steps and the outputs for our circuit. So I will say it's a trace that then we create a proof about that trace. Thank you. All right, next question. I've put a bunch of those, so do ask serious questions, please. We have a bit of time. What is your favorite bug ever? What is the most interesting bug you've ever found? That's a very good question. I think I can't pick one, but I would say typically the simple bugs, right? For example, the bug I have in one of the first slides, that could have led to basically draining one of the major mixers we have in the space. But also bugs that have to do with using cryptography in the circuits. And typically due to some optimizations or some logic errors in those circuits, there could be like pretty interesting exploits that someone can do. Pretty cool, thank you. Alright, the next one. You're doing research, looking for bugs, you're paying your bills and buying your foods by finding bugs. Can we consider you have a bug-based diet? We can consider that, yeah. I hope that in some future world there won't be that many bugs and maybe I will have a better diet. But unfortunately at the moment we have tons of bugs. Fantastic, thank you. What are your thoughts on TEE? Okay, that's the question of DevCon, I feel. Everyone asking that question. I would say you have different security assumptions when you use TEs, right? I think it can work along with ZKEs, but they can't replace ZKEs. You have a much weaker threat model when you are working with TEs. So, yeah, people should use them when they have to use them, but also don't trust them like a black box that will do everything for you and you are secure if you use a TE. Wonderful, thank you. What can we do to make more secure languages like Noir faster compared to Sercom, particularly with respect to gas cost? How do we make it more efficient? So gas cost, I will say that it's independent kind of what programming language you're using. It's more about like what proof system you are using, right? And if that proof system has very efficient verification, that's the main factor. But also more general in the circuit layer. I would say that indeed someone, if you don't like really use unsafe in Noir, which then breaks the whole purpose of using Noir, you can write more optimized circuits at that point in Siricom. But I would hope that we will have major advances in compilers for ZKPs, and then we can have compiler optimizations that are very strong, like in any other field, and rely on those optimizations to get pretty optimized circuits. But if we do that, then we need very, very solid testing for our compilers to detect any issues in those optimizations. Thank you. You mentioned in your slide that, you know, sometimes we give too much rope to the users to hang themselves with. I think the design in ZK circuits is difficult, but also using them is not very commonplace, right? A lot of users are not used to using this kind of systems and what goes in, what goes out, what you can do with them, what is safe behavior to do that. You mentioned learning resources. Do you think there's something to do with users also to explain to them what are the benefits and what should be done? Or is it entirely on the app developer? Yeah, yeah. That's a great question. I think as researchers, it's our responsibility to create learning resources that are easy to follow by almost everyone. So I think we are doing kind of a good job there. For example, at ZK Security, we published a book on Hello2. And basically, many teams in that space develop pretty nice learning resources. And what I really like is that they also have a section about security vulnerabilities and what you should look at when you use a specific DSL. So, yeah, I think we are doing a great job on that. And in a few years, it will be even better. Fantastic. Stefanos, thank you. We're over time. So thank you for your talk. Thank you. Thanks a lot.", + "sources_streamethId": "6735b9069dbb7a90e1872a41", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735b9069dbb7a90e1872a41.vtt", + "transcript_text": " So thank you all for inviting me to come and speak to you. I want to talk to you a little bit about some of the molecular biology work that's going on in my lab at the University of South Carolina, but I'm going to talk a little bit about the philosophical implications of how pandemics do more than just harm individuals. They actually harm the social trust that exists between individuals, and a proper pandemic preparedness plan has to have in place widgets that can deal with the virus, but also information management schemes so that we can prevent the social harm that happens to societies by the loss of trust that naturally occurs between people when a virus is creeping through and harming people and you don't know what is happening. So you're going to see that this is a tour through some molecular biology work, but I want you to notice ways that information management and blockchain kind of technologies can make our societies more robust to future pandemics. So early on, I and several other people noticed that the SARS-CoV-2 virus had some really weird characteristics. If you do an alignment of this virus against its most recent common ancestors, you find out that it has a lot of mutations that are what are called non-synonymous mutations that cluster in the receptor binding domain. It looks very engineered. It's a circumstantial evidence, but it looks like it had help. If it didn't have help, it's a remarkable coincidence. Nonetheless, we made a bunch of PCR primers. Regardless of where it came from, we made PCR primers and started developing a test in our laboratory just so that we could figure out who had it and who didn't and maybe keep the University of South Carolina functioning properly. We made this kind of a PCR test with our own primers and protocols and proved that the PCR really will detect the virus and it'll detect it down to single molecules in the well. This PCR assay does not have a false positive problem. You can cycle forever on a negative sample and it will never fire, but it can accurately determine how much virus is in any sample. We then kind of accidentally discovered that raw saliva works better than the nasal swabs. We discovered this for a funny reason during the pandemic lockdowns. We ran out of nasal swabs and ran out of RNA purification reagents. So we just tried it on raw saliva and it turned out that it worked fine. It worked even better than the standard procedures. And it's kind of funny given all the gatekeeping that has gone on about nasal swabs and the type of swabs that you have to use and all that sort of stuff. You don't even need any of that stuff. I tracked the virus as it went through lots of individuals, students at the University of South Carolina, and families. And I could prove to myself that the virus was real and that the PCR protocol was actually predicting illness. This is a family here where some of the people got it and some of them didn't. And the PCR signal predicted perfectly who was going to get sick and who never did. This family on the right, that's me. And the blue curve is me whenever I brought it home and gave it to my wife and two daughters, and then they got sick later. So I had access to validating, verifiable information that the PCR results were tracking with symptomology. Most people didn't have access to this kind of verifying information, and after a while, they kind of became suspicious of, hey, is this thing really telling me the truth or not? And they didn't know anybody like me that they could ask to see, is this real or not? Another kind of interesting phenomenon about this virus, which probably we'll see this again, is the asymptomatic super spreader phenomenon. Now, you may have heard about this. I know it's real. This guy here was the brother of one of my students that was working in the lab. And we were testing everybody every day as we were developing this test. And we were using all of our friends and family members as negative controls. This guy blew a positive and he had more virus in his saliva than there are people in China. But he had zero symptoms. And on day three, he went on a seven-mile run and clocked his normal time. But this guy could infect the whole world if he tried, and he would never have known about it. So this phenomenon is actually real. There are silent super spreaders that are walking among us, and reasonable people grew suspicious about, hey, maybe this PCR is bogus. Maybe this is lying to us or something like that. Because it's not really working when we're trying to just quarantine people based on being sick. And again, future pandemics need to respond to this kind of situation in a way that preserves social trust. We need to plan on this as being a real thing. And I think that the information blockchain might be one solution to this problem. Here's a result of a whole bunch of positives that I'm showing you from just one month of testing. We did like 300,000 tests over a two-year period, and this is one month of positives. And I want you to notice the distribution of CT values. In PCR space, it's minus the log of the CT value is kind of related to the log of how much starting material there is. So these very low CT values down here at 10, they have a lot of virus. And these higher CT values around 30 or 32, they have a small amount of virus. Those people at the bottom in that circle have a billion times more, it's a log scale, they got a billion times more virus than the average. Those are our super spreaders. And that's probably what happened in the Wuhan market. Someone was infected and didn't know it because they were an asymptomatic super spreader, but they had enough virus that they could infect everybody that was shopping that day or that week. This happens. It's a very sneaky virus and it causes mistrust to develop because people are getting sick, and you don't know who they were exposed to. Are they somehow worshiping the wrong god, or are they in the wrong political party, or are they guilty of wrong think? And then the natural human brain's tendency to turn on each other takes over and it fractures social trust and society begins to deteriorate. This is what this really, really happened during the pandemic and we're still dealing with the consequences of it now. Okay, a few words about the mRNA vaccine. The mRNA vaccines came on the scene. They were deployed under an emergency situation. We didn't have time for public trust to develop organically. We had to go for broke because it was a dire situation. Initially, it was reported that these vaccines were 90-something percent effective at preventing infection. That only lasted for about a month. And then they became very effective at preventing infection. That only lasted for about a month, and then they became very ineffective at preventing transmission. They do have what appears to be durable protection from death about 80% of the time. It does reduce the probability of dying. This is still hotly debated because people don't trust the information that they're getting from various sources. We need ways of having raw data being uploaded to some sort of an information blockchain so that people can see how the data is accumulating and not be suspicious of whoever is gathering the data so that they can come to their own decision about whether this is real or not. Anyway, there's a lot of side effects that are unexplained and they tend to be flippantly dismissed, and this causes more distrust. I use a lot of sequencing technology in my lab. We're gene jocks. We're cancer gene jocks. We sequence things all the time, and we can discover mutations or DNA sequences that we didn't know we were looking for. I did this on vials at a vaccine, okay? This is basically an internet dare that somebody put me up to. Somebody else had done this, and I said, ah, this is crazy. So I did it myself, and lo and behold, I discovered a bunch of DNA pieces in the mRNA vaccines. I thought, well, this might be the explanation for some of these side effects that people are seeing. We should check into this and see if these little bits of DNA are getting into the human genome of vaccinated people. The public deserves to know what they're taking. If it's a vaccine or anything else, the public deserves to know what's in it. And molecular biology tools can do this for you. This is kind of the sequencing result that came out of my lab using a modern, very cheap, deployable sequencing platform. I'll tell you about it later if you ask me about it. This technology, you can use it in the field if you want to. You could sequence something in the jungle on your laptop. It really does work. Anyway, these little pieces here, all the little pieces of DNA we discovered in the vaccine, there's a lot of them. And using this technology, you don't have to know what you're looking for and you can see what's in your saliva, what's in your vaccine, what's in your food, what's in your environment. All these things can be answered by sequencing, but public trust can only be generated by making this data on some sort of a public ledger and it be collected by multiple sources, and everybody can just inspect it to see what's there. Think of this kind of data as a form of a contract, you know, or some sort of a blockchain of currency that you might want to generate public confidence in. So we are doing some studies now where we're monitoring the genome integrity of vaccinated people just to see if this stuff is ever getting in there. I made some PCR primers against the DNA sequence that I found. This is what real-time PCR looks like. In case you want to know, the cycle number here at the bottom is how many cycles of the machine does it take before you start seeing product. And the more target was in the well to begin with, the fewer cycles it takes before you start seeing stuff. And the less product is there, the more cycles it takes. So that's why it's backwards and it's a log scale. But we can quantify down to the molecule number. We can literally count molecules in any sample with this kind of technology. And it doesn't suffer from a false positive from too many cycles. It's reliable. So we did an experiment. Can the little pieces of DNA that are in the vaccine ever get into the human genome and modify it? I know this was theoretically possible, and I said this publicly about a year ago. Well, we did an experiment to prove it. We took some normal human colon organoids. These are normal cells, and we vaccinated them in the lab by just putting the vaccine in the media and let it incubate for a little while and then washed them and then grew them and washed them over a month and kept washing them and passaging them. And then we did the PCR on the genomic DNA. And lo and behold, there's stuff there. And it's the exact same frequency that I predicted about a year ago. About 1 in 1,000 to 1 in 10,000 cells have taken up different pieces of this vaccine and it's a permanent fixture of their genomes now. This is not surprising based on molecular biology first principles. We've been doing this for years in the laboratory. This proves that it happens with this contaminating DNA, which is why I was so weirded out about this stuff when I first discovered it. We've been looking at a bunch of tumors from cancers that have shown up since the vaccine rolled out, and we just looked at like 50 tumors so far that were not really selected for anything other than the fact that they developed in the last three years and we've got a couple of them that appear to be positive for bits of this DNA. Now we don't know if this is a driver event or if it's a passenger event and we're doing more investigations to try to figure out where it landed and if it has anything to do with the tumor or not but it does happen so this is what we need to the public needs a careful accounting of this so that their confidence can be, or lack thereof, can be appropriate and based on data, not based on marketing or anything like that. So a final couple of words about another kind of genome modification. It's called DNA methylation. So your genome has methyl groups added to cytosine residues that's CpG loci in your genome and these modifications are part of a developmental program that's been going on since you were first conceived and continues until you die where you slowly change the methylation landscape of stem cell genes and eventually those stem cell genes begin to be silenced, and you lose stem cells as you grow older. Aging is just a natural extension of the developmental process that goes on in utero, and you eventually just run out of stem cells, and you die. We can tell how old you are by looking at the methyl groups on lots of loci in your genome. We've known for a little while now that when your DNA methylation age is disconnected from your physical age, especially if your DNA age is too old, you're at high risk for all-cause mortality. This is really a thing. And there's a lot of interest in finding out what kind of things or interventions can I do that will make me younger, right? That will delay aging. And we can have surrogate markers for this with this DNA methylation aging. The same sequencing platform that I was telling you about earlier will measure methylation on the fly at the same time we're figuring out sequence alterations. So, you know, sequence analysis of normal blood cells can agnostically monitor for sequence changes and methylation changes and figure out if some sort of an intervention has altered the methylation age. All right, so I did this on a sample that was just available to me. This is me pre- and post-vaccination, and there are spots in the genome that the methylation age changed. This is a real thing. This is one sample, so it's scientifically meaningless right now, but it's to prove that this is something that is worth doing, in my view, to monitor what happens to people pre- and post-COVID and pre- and post-methylation to see are these interventions helping our methyl age? Are they harming our methyl age? Once you know what you're looking for, it's very cheap and easy to do it. Final thoughts. Pandemics stress test the trust that people have in each other. Low-trust societies fare worse than high trust societies. Molecular biology tools can measure these things, but molecular biology tools can best increase public trust and future pandemic preparedness if we make the information available to the public so that anyone can see it. We should do this now. And I'll stop there. Thank you for your attention. Thank you so much, Philip, for one.", "eventId": "devcon-7", - "slot_start": 1731643200000, - "slot_end": 1731645000000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1b-4F9L2PRDflpHb2iAzeGwsuH6cvqfh3FMJsnOPZOtc", - "resources_slides": null, + "slot_start": 1731571500000, + "slot_end": 1731572400000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1RgW3g8Dx3KqmsQIkx6vtDH-Q1Sykokl4An1TOH01ltI", + "resources_slides": "https://drive.google.com/file/d/1slAuBmp5HLngSMVxEP9X-arUQ8NbW_6e/view", "speakers": [ - "stefanos-chaliasos" + "phillip-j-buckhaults" ] }, "vector": [ + 0, 6, 0, 0, @@ -862526,14 +860118,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -862632,12 +860216,14 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -863093,8 +860679,8 @@ 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -863111,42 +860697,40 @@ }, { "session": { - "id": "what-is-the-status-of-epbs-and-its-future-iterations", - "sourceId": "3MUYVQ", - "title": "What is the status of ePBS and its future iterations", - "description": "We will go over the implementation and research status of ePBS (EIP-7732) and the future iterations and mechanisms it enables.We will describe in detail the main benefits to the protocol that are not directly related to any PBS system. We will showcase the tradeoffs that are present on each design decision and how the separation of validation between the consensus and execution layer in fact frees research with less technical debt and more independent mechanisms for future upgrades.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", - "featured": false, + "id": "white-rabbit-world-premiere", + "sourceId": "7CFGTS", + "title": "White Rabbit World Premiere", + "description": "White Rabbit is the first crowdfunded anime on Ethereum. It is about the metaphorical journey of going down the crypto rabbit hole. White Rabbit follows Mirai, who embarks on a path to discover the meaning of free will and self-sovereignty. There will be a seed phrase scavenger hunt in the final act of the film.\r\n\r\nDirected by pplpleasr and Maciej Kuciara, run time 30 minutes", + "track": "Entertainment", + "type": "Music", + "expertise": "Beginner", + "audience": "Design", + "featured": true, "doNotRecord": false, "tags": [ - "PBS", - "fork", - "choice", - "PBS" + "Account", + "Abstraction" ], "keywords": [ - "PBS", - "consensus", - "fork-choice" + "animation", + "film", + "nft" ], - "duration": 1483, + "duration": 2331, "language": "en", - "sources_swarmHash": "f5e5ad50e09c6e119cd1571e3ae0c3a54ebaf8460e392a4fd0abc96593c84a31", - "sources_youtubeId": "w-VwYHq1FA4", + "sources_swarmHash": "537d2d5f2354801bf1e3b64510b42e28f61350e8051c9eb4e09dca64a9516975", + "sources_youtubeId": "kuQMm0J1SK8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673438b39dbb7a90e14a7f53", + "sources_streamethId": "673499879dbb7a90e1ea2b83", "eventId": "devcon-7", - "slot_start": 1731472200000, - "slot_end": 1731474000000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1hihFfnTMBS1Mmp0aS3oHwzA-PX43SVRFqlRfNkbtOwU", - "resources_slides": null, + "slot_start": 1731497400000, + "slot_end": 1731500100000, + "slot_roomId": "main-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1IhRTtp7JRxxcgFhG5DluJWQD1KNt28d8UsxmQ7icfhc", + "resources_slides": "", "speakers": [ - "potuz" + "pplpleasr" ] }, "vector": [ @@ -863154,13 +860738,12 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -863876,9 +861459,9 @@ 0, 0, 0, - 6, 0, 0, + 6, 0, 0, 0, @@ -863940,7 +861523,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -864330,7 +861912,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -864446,7 +862027,7 @@ 0, 0, 2, - 0, + 2, 0, 0, 0, @@ -864468,9 +862049,6 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, 0, @@ -864480,76 +862058,59 @@ 0, 0, 0, + 2, 0, 0 ] }, { "session": { - "id": "whats-going-into-the-pectra-upgrade", - "sourceId": "9WTJRX", - "title": "What’s Going Into the Pectra Upgrade?", - "description": "A talk explaining the core EIPs going into the Pectra upgrade and the core EIPs still TBD for inclusion in Pectra. The talk will also touch on Pectra timing and fork scoping for the next hard fork after Pectra. Finally, the talk will share insights about the governance process of Ethereum in light of Pectra and takeaways about the priorities of Ethereum protocol developers.", - "track": "Core Protocol", - "type": "Talk", + "id": "who-needs-a-wallet-anyway", + "sourceId": "ZZKKRZ", + "title": "Who needs a wallet anyway?", + "description": "This talk confronts the community’s obsession with decentralization purity at the cost of usability. This session explores how to hide the complexities of crypto, enabling seamless integration for users who may not even realize they are using a wallet. We’ll cover simplifying user interactions, making wallets function invisibly, maintaining benefits like permissionless innovation, managing thousands of wallets, and real-world applications. It’s time to push for real, user-friendly innovation.", + "track": "Usability", + "type": "Lightning Talk", "expertise": "Beginner", - "audience": "Community", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "fork", - "hard" + "Permissionless", + "Developer Infrastructure", + "Decentralization", + "Environment", + "User Experience", + "trusted", + "wallet", + "execution", + "Developer Infrastructure", + "Permissionless", + "User Experience" ], "keywords": [ - "Pectra", - "Governance", - "Hard forks" + "Trusted", + "Execution", + "Environments" ], - "duration": 1515, + "duration": 555, "language": "en", - "sources_swarmHash": "9c19d1c251eda5ae03524a901f817d1fb823edb289430285e2f1c606f649b80f", - "sources_youtubeId": "ufIDBCgdGwY", + "sources_swarmHash": "dcba0214c791f887977ae84378b09be85862162e256dc4fea0db787f53e98d83", + "sources_youtubeId": "iNLHWc5toYo", "sources_ipfsHash": "", "sources_livepeerId": "", "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731391200000, - "slot_end": 1731393000000, - "slot_roomId": "stage-1", - "resources_presentation": "https://docs.google.com/presentation/d/1aEeDer7GTTFvo4hdDKqx3zqCVAtFdk2XqVNuiRomMTc", - "resources_slides": null, + "slot_start": 1731393600000, + "slot_end": 1731394200000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1pVk3HgI3jY_eVj3C7F4jVkcdwrwbVFi9NzWDCgBBUFg", + "resources_slides": "https://drive.google.com/file/d/1G8Xm4-jRcL7DkEuY_Vffwk-nLnNBELVd/view", "speakers": [ - "christine-kim" + "itai-turbahn" ] }, "vector": [ - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -864558,6 +862119,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -865250,7 +862812,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -865278,6 +862839,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -865313,6 +862875,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -865350,6 +862913,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -865397,6 +862961,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -865466,6 +863031,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -865488,6 +863054,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -865559,6 +863126,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -865703,8 +863271,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -865804,6 +863370,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -865819,7 +863386,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -865846,8 +863412,25 @@ 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 2, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -865859,50 +863442,47 @@ }, { "session": { - "id": "whats-in-your-dose", - "sourceId": "BRUGUL", - "title": "What's In Your Dose?", - "description": "Pandemic responses require robust technical tools such as molecular diagnostic tests, novel immunization reagents, and recovery surveillance tools. Pandemic responses depend on public trust in these tools and their good faith deployment. Verification strategies to enhance public trust and cooperation will improve the performance of molecular tools in future pandemics.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", + "id": "who-wins-ethereum-block-building-auctions-and-why", + "sourceId": "VKQ8NC", + "title": "Who Wins Ethereum Block Building Auctions and Why?", + "description": "Today, top 3 block builders produce over 90% of blocks on Ethereum via MEV-boost auction. The block builder market's dynamics evolve rapidly and has significant impact on the development of private mempools, wallets/apps orderflow auctions, and censorship resistance topic. In this talk, we share an overview of why the top builders win the most market share, using orderflow composition and bidding behavioral data. We hope to highlight the centralizing risks and failures of current market design.", + "track": "Cryptoeconomics", + "type": "Talk", "expertise": "Intermediate", "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Decentralization", - "Public good" + "blocks", + "auction" ], "keywords": [ - "Molecular", - "Biology.", - "", - "Public", - "Health.", - "", - "Public", - "Trust." + "MEV", + "PBS", + "Block Auction" ], - "duration": 907, + "duration": 1465, "language": "en", - "sources_swarmHash": "be8d4b2923608f8527fd4ad82d690569c33c76afabc4fce1d2968e8cd0993e26", - "sources_youtubeId": "F_SxA6W5hzQ", + "sources_swarmHash": "9b726abfb28b4a0b5f846390cde2e1cf0c5dea9ef98862d60dc5e6d80f4b8c49", + "sources_youtubeId": "fP9PFx1ooQE", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735b9069dbb7a90e1872a41", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735b9069dbb7a90e1872a41.vtt", - "transcript_text": " So thank you all for inviting me to come and speak to you. I want to talk to you a little bit about some of the molecular biology work that's going on in my lab at the University of South Carolina, but I'm going to talk a little bit about the philosophical implications of how pandemics do more than just harm individuals. They actually harm the social trust that exists between individuals, and a proper pandemic preparedness plan has to have in place widgets that can deal with the virus, but also information management schemes so that we can prevent the social harm that happens to societies by the loss of trust that naturally occurs between people when a virus is creeping through and harming people and you don't know what is happening. So you're going to see that this is a tour through some molecular biology work, but I want you to notice ways that information management and blockchain kind of technologies can make our societies more robust to future pandemics. So early on, I and several other people noticed that the SARS-CoV-2 virus had some really weird characteristics. If you do an alignment of this virus against its most recent common ancestors, you find out that it has a lot of mutations that are what are called non-synonymous mutations that cluster in the receptor binding domain. It looks very engineered. It's a circumstantial evidence, but it looks like it had help. If it didn't have help, it's a remarkable coincidence. Nonetheless, we made a bunch of PCR primers. Regardless of where it came from, we made PCR primers and started developing a test in our laboratory just so that we could figure out who had it and who didn't and maybe keep the University of South Carolina functioning properly. We made this kind of a PCR test with our own primers and protocols and proved that the PCR really will detect the virus and it'll detect it down to single molecules in the well. This PCR assay does not have a false positive problem. You can cycle forever on a negative sample and it will never fire, but it can accurately determine how much virus is in any sample. We then kind of accidentally discovered that raw saliva works better than the nasal swabs. We discovered this for a funny reason during the pandemic lockdowns. We ran out of nasal swabs and ran out of RNA purification reagents. So we just tried it on raw saliva and it turned out that it worked fine. It worked even better than the standard procedures. And it's kind of funny given all the gatekeeping that has gone on about nasal swabs and the type of swabs that you have to use and all that sort of stuff. You don't even need any of that stuff. I tracked the virus as it went through lots of individuals, students at the University of South Carolina, and families. And I could prove to myself that the virus was real and that the PCR protocol was actually predicting illness. This is a family here where some of the people got it and some of them didn't. And the PCR signal predicted perfectly who was going to get sick and who never did. This family on the right, that's me. And the blue curve is me whenever I brought it home and gave it to my wife and two daughters, and then they got sick later. So I had access to validating, verifiable information that the PCR results were tracking with symptomology. Most people didn't have access to this kind of verifying information, and after a while, they kind of became suspicious of, hey, is this thing really telling me the truth or not? And they didn't know anybody like me that they could ask to see, is this real or not? Another kind of interesting phenomenon about this virus, which probably we'll see this again, is the asymptomatic super spreader phenomenon. Now, you may have heard about this. I know it's real. This guy here was the brother of one of my students that was working in the lab. And we were testing everybody every day as we were developing this test. And we were using all of our friends and family members as negative controls. This guy blew a positive and he had more virus in his saliva than there are people in China. But he had zero symptoms. And on day three, he went on a seven-mile run and clocked his normal time. But this guy could infect the whole world if he tried, and he would never have known about it. So this phenomenon is actually real. There are silent super spreaders that are walking among us, and reasonable people grew suspicious about, hey, maybe this PCR is bogus. Maybe this is lying to us or something like that. Because it's not really working when we're trying to just quarantine people based on being sick. And again, future pandemics need to respond to this kind of situation in a way that preserves social trust. We need to plan on this as being a real thing. And I think that the information blockchain might be one solution to this problem. Here's a result of a whole bunch of positives that I'm showing you from just one month of testing. We did like 300,000 tests over a two-year period, and this is one month of positives. And I want you to notice the distribution of CT values. In PCR space, it's minus the log of the CT value is kind of related to the log of how much starting material there is. So these very low CT values down here at 10, they have a lot of virus. And these higher CT values around 30 or 32, they have a small amount of virus. Those people at the bottom in that circle have a billion times more, it's a log scale, they got a billion times more virus than the average. Those are our super spreaders. And that's probably what happened in the Wuhan market. Someone was infected and didn't know it because they were an asymptomatic super spreader, but they had enough virus that they could infect everybody that was shopping that day or that week. This happens. It's a very sneaky virus and it causes mistrust to develop because people are getting sick, and you don't know who they were exposed to. Are they somehow worshiping the wrong god, or are they in the wrong political party, or are they guilty of wrong think? And then the natural human brain's tendency to turn on each other takes over and it fractures social trust and society begins to deteriorate. This is what this really, really happened during the pandemic and we're still dealing with the consequences of it now. Okay, a few words about the mRNA vaccine. The mRNA vaccines came on the scene. They were deployed under an emergency situation. We didn't have time for public trust to develop organically. We had to go for broke because it was a dire situation. Initially, it was reported that these vaccines were 90-something percent effective at preventing infection. That only lasted for about a month. And then they became very effective at preventing infection. That only lasted for about a month, and then they became very ineffective at preventing transmission. They do have what appears to be durable protection from death about 80% of the time. It does reduce the probability of dying. This is still hotly debated because people don't trust the information that they're getting from various sources. We need ways of having raw data being uploaded to some sort of an information blockchain so that people can see how the data is accumulating and not be suspicious of whoever is gathering the data so that they can come to their own decision about whether this is real or not. Anyway, there's a lot of side effects that are unexplained and they tend to be flippantly dismissed, and this causes more distrust. I use a lot of sequencing technology in my lab. We're gene jocks. We're cancer gene jocks. We sequence things all the time, and we can discover mutations or DNA sequences that we didn't know we were looking for. I did this on vials at a vaccine, okay? This is basically an internet dare that somebody put me up to. Somebody else had done this, and I said, ah, this is crazy. So I did it myself, and lo and behold, I discovered a bunch of DNA pieces in the mRNA vaccines. I thought, well, this might be the explanation for some of these side effects that people are seeing. We should check into this and see if these little bits of DNA are getting into the human genome of vaccinated people. The public deserves to know what they're taking. If it's a vaccine or anything else, the public deserves to know what's in it. And molecular biology tools can do this for you. This is kind of the sequencing result that came out of my lab using a modern, very cheap, deployable sequencing platform. I'll tell you about it later if you ask me about it. This technology, you can use it in the field if you want to. You could sequence something in the jungle on your laptop. It really does work. Anyway, these little pieces here, all the little pieces of DNA we discovered in the vaccine, there's a lot of them. And using this technology, you don't have to know what you're looking for and you can see what's in your saliva, what's in your vaccine, what's in your food, what's in your environment. All these things can be answered by sequencing, but public trust can only be generated by making this data on some sort of a public ledger and it be collected by multiple sources, and everybody can just inspect it to see what's there. Think of this kind of data as a form of a contract, you know, or some sort of a blockchain of currency that you might want to generate public confidence in. So we are doing some studies now where we're monitoring the genome integrity of vaccinated people just to see if this stuff is ever getting in there. I made some PCR primers against the DNA sequence that I found. This is what real-time PCR looks like. In case you want to know, the cycle number here at the bottom is how many cycles of the machine does it take before you start seeing product. And the more target was in the well to begin with, the fewer cycles it takes before you start seeing stuff. And the less product is there, the more cycles it takes. So that's why it's backwards and it's a log scale. But we can quantify down to the molecule number. We can literally count molecules in any sample with this kind of technology. And it doesn't suffer from a false positive from too many cycles. It's reliable. So we did an experiment. Can the little pieces of DNA that are in the vaccine ever get into the human genome and modify it? I know this was theoretically possible, and I said this publicly about a year ago. Well, we did an experiment to prove it. We took some normal human colon organoids. These are normal cells, and we vaccinated them in the lab by just putting the vaccine in the media and let it incubate for a little while and then washed them and then grew them and washed them over a month and kept washing them and passaging them. And then we did the PCR on the genomic DNA. And lo and behold, there's stuff there. And it's the exact same frequency that I predicted about a year ago. About 1 in 1,000 to 1 in 10,000 cells have taken up different pieces of this vaccine and it's a permanent fixture of their genomes now. This is not surprising based on molecular biology first principles. We've been doing this for years in the laboratory. This proves that it happens with this contaminating DNA, which is why I was so weirded out about this stuff when I first discovered it. We've been looking at a bunch of tumors from cancers that have shown up since the vaccine rolled out, and we just looked at like 50 tumors so far that were not really selected for anything other than the fact that they developed in the last three years and we've got a couple of them that appear to be positive for bits of this DNA. Now we don't know if this is a driver event or if it's a passenger event and we're doing more investigations to try to figure out where it landed and if it has anything to do with the tumor or not but it does happen so this is what we need to the public needs a careful accounting of this so that their confidence can be, or lack thereof, can be appropriate and based on data, not based on marketing or anything like that. So a final couple of words about another kind of genome modification. It's called DNA methylation. So your genome has methyl groups added to cytosine residues that's CpG loci in your genome and these modifications are part of a developmental program that's been going on since you were first conceived and continues until you die where you slowly change the methylation landscape of stem cell genes and eventually those stem cell genes begin to be silenced, and you lose stem cells as you grow older. Aging is just a natural extension of the developmental process that goes on in utero, and you eventually just run out of stem cells, and you die. We can tell how old you are by looking at the methyl groups on lots of loci in your genome. We've known for a little while now that when your DNA methylation age is disconnected from your physical age, especially if your DNA age is too old, you're at high risk for all-cause mortality. This is really a thing. And there's a lot of interest in finding out what kind of things or interventions can I do that will make me younger, right? That will delay aging. And we can have surrogate markers for this with this DNA methylation aging. The same sequencing platform that I was telling you about earlier will measure methylation on the fly at the same time we're figuring out sequence alterations. So, you know, sequence analysis of normal blood cells can agnostically monitor for sequence changes and methylation changes and figure out if some sort of an intervention has altered the methylation age. All right, so I did this on a sample that was just available to me. This is me pre- and post-vaccination, and there are spots in the genome that the methylation age changed. This is a real thing. This is one sample, so it's scientifically meaningless right now, but it's to prove that this is something that is worth doing, in my view, to monitor what happens to people pre- and post-COVID and pre- and post-methylation to see are these interventions helping our methyl age? Are they harming our methyl age? Once you know what you're looking for, it's very cheap and easy to do it. Final thoughts. Pandemics stress test the trust that people have in each other. Low-trust societies fare worse than high trust societies. Molecular biology tools can measure these things, but molecular biology tools can best increase public trust and future pandemic preparedness if we make the information available to the public so that anyone can see it. We should do this now. And I'll stop there. Thank you for your attention. Thank you so much, Philip, for one.", + "sources_streamethId": "6735848a9dbb7a90e1e9de85", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731571500000, - "slot_end": 1731572400000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1RgW3g8Dx3KqmsQIkx6vtDH-Q1Sykokl4An1TOH01ltI", - "resources_slides": null, + "slot_start": 1731558600000, + "slot_end": 1731560400000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1sCbCcL_kcX8oEU3I_BJLpuFgt1wzgpYDENnympxQ7iI", + "resources_slides": "https://drive.google.com/file/d/1pNpXn9zpIEXiWMd7WM8K9oAkPovJ39kc/view", "speakers": [ - "phillip-j-buckhaults" + "burak-oz", + "danning-sui" ] }, "vector": [ + 0, 0, 6, 0, @@ -866632,6 +864212,7 @@ 0, 0, 6, + 6, 0, 0, 0, @@ -866756,15 +864337,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, 0, 0, 0, @@ -866805,6 +864377,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -866986,6 +864559,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -867239,40 +864813,36 @@ }, { "session": { - "id": "white-rabbit-world-premiere", - "sourceId": "7CFGTS", - "title": "White Rabbit World Premiere", - "description": "White Rabbit is the first crowdfunded anime on Ethereum. It is about the metaphorical journey of going down the crypto rabbit hole. White Rabbit follows Mirai, who embarks on a path to discover the meaning of free will and self-sovereignty. There will be a seed phrase scavenger hunt in the final act of the film.\r\n\r\nDirected by pplpleasr and Maciej Kuciara, run time 30 minutes", - "track": "Entertainment", - "type": "Music", - "expertise": "Beginner", - "audience": "Design", - "featured": true, + "id": "why-defi-matters-on-ethereum", + "sourceId": "E7GFJC", + "title": "Why DeFi matters on Ethereum", + "description": "Why DeFi matters on Ethereum, and why Ethereum is the best place for DeFi.", + "track": "Real World Ethereum", + "type": "Panel", + "expertise": "", + "audience": "Engineering", + "featured": false, "doNotRecord": false, - "tags": [ - "Account", - "Abstraction" - ], - "keywords": [ - "animation", - "film", - "nft" - ], - "duration": 2331, + "tags": [], + "keywords": [], + "duration": 3320, "language": "en", - "sources_swarmHash": "537d2d5f2354801bf1e3b64510b42e28f61350e8051c9eb4e09dca64a9516975", - "sources_youtubeId": "kuQMm0J1SK8", + "sources_swarmHash": "df5c65e42388ca8d8c9582a368d558bba237216fb411806b3238eb8a8168a461", + "sources_youtubeId": "C4MIV9oQUYk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673499879dbb7a90e1ea2b83", + "sources_streamethId": "6735dfaf9dbb7a90e1bf8303", "eventId": "devcon-7", - "slot_start": 1731497400000, - "slot_end": 1731500100000, + "slot_start": 1731578400000, + "slot_end": 1731582000000, "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1IhRTtp7JRxxcgFhG5DluJWQD1KNt28d8UsxmQ7icfhc", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/14OuUArkp-1DdYuHEylurELQO49RZZh5IHebMv6N4LAU", + "resources_slides": "", "speakers": [ - "pplpleasr" + "tascha", + "loi-luu", + "kain-warwick", + "namik-muduroglu" ] }, "vector": [ @@ -867282,9 +864852,6 @@ 0, 0, 0, - 0, - 0, - 0, 6, 0, 0, @@ -867509,6 +865076,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -868005,13 +865573,13 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, 0, + 6, + 6, + 6, 0, 0, 0, @@ -868573,8 +866141,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -868590,6 +866156,7 @@ 0, 0, 0, + 2, 0, 0, 2, @@ -868605,56 +866172,54 @@ 0, 0, 0, - 2, - 0, 0 ] }, { "session": { - "id": "who-needs-a-wallet-anyway", - "sourceId": "ZZKKRZ", - "title": "Who needs a wallet anyway?", - "description": "This talk confronts the community’s obsession with decentralization purity at the cost of usability. This session explores how to hide the complexities of crypto, enabling seamless integration for users who may not even realize they are using a wallet. We’ll cover simplifying user interactions, making wallets function invisibly, maintaining benefits like permissionless innovation, managing thousands of wallets, and real-world applications. It’s time to push for real, user-friendly innovation.", - "track": "Usability", - "type": "Lightning Talk", - "expertise": "Beginner", + "id": "why-erc-7683-is-broken-and-how-to-fix-it", + "sourceId": "YT3SSN", + "title": "Why ERC 7683 is broken and how to fix it", + "description": "While I appreciate the authors spending time on this problem statement and thinking about standardising flows, ERC 7683 is deeply flawed it still forces offchain agents to understand the order they are trying to fulfill and it doesnt give users any guarantees of execution or understanding of whats happening under the hood, I think its because its standardising things on the \"intent\" layer where instead we need to standardise more downstream so information like security can be better presented", + "track": "Layer 2", + "type": "Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Permissionless", - "Developer Infrastructure", - "Decentralization", - "Environment", - "User Experience", - "trusted", - "wallet", - "execution", - "Developer Infrastructure", - "Permissionless", - "User Experience" + "Appchains", + "Cross-L2", + "Token bridging", + "Accessibility", + "erc-7683", + "intent", + "Accessibility", + "Appchains", + "Cross-L2", + "Token bridging" ], "keywords": [ - "Trusted", - "Execution", - "Environments" + "chain-abstraction", + "intents" ], - "duration": 555, + "duration": 1275, "language": "en", - "sources_swarmHash": "dcba0214c791f887977ae84378b09be85862162e256dc4fea0db787f53e98d83", - "sources_youtubeId": "iNLHWc5toYo", + "sources_swarmHash": "5fc0bc4aab0210e4bbcfda418f6debbf3e708920939b5efb8bc1b105399cf8f4", + "sources_youtubeId": "TOkUi0asAd0", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "67344f0d9dbb7a90e1accefa", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67344f0d9dbb7a90e1accefa.vtt", + "transcript_text": " Thank you so much for sticking through. I know everyone's hungry for lunch, so am I. Hopefully, this will be a solid one, after which you think you will need a break. Super glad that you are still up and alive. Sweet guys, I think to get into it, I sort of wanted to talk about this upcoming thing called Intents, which is supposed to change how we interact with chains going forward. And a bunch of projects are starting to look at it in a pretty deep level, including people at Uniswap, Cowswap, and a bunch of other protocols. So I just wanted to chat about an upcoming proposal there and talk through some of the design choices that have been made and why those might not be right and how could we potentially fix it. This is not a diss track. I'm using the EIP or ERC as sort of like a channel to talk about this stuff. Not really talking about this specific implementation. I work at Socket. I'm a web host. Also go by VC. You can find me after the talk. But if you want to check out the exact proposal I'm talking about here, there's a quick QR code. I think this is by Mark Tura from Uniswap. And this is what Uniswap is going to be using for the cross-chain swap stuff, generally speaking. I think we always begin this topic with what really are intents, right? Unfortunately, that is something I don't want to get into for this particular doc, right? It always sort of goes into something you don't want to get into. But I think broadly speaking, you want to do something on-chain, you specify it, someone else takes care of it. That's the rough mental model you can have in mind. And this is different than transactions, because in transactions you sort of like send the transaction on-chain saying exactly what you want to do, but here you just talk about the end goal you want to achieve, and someone else takes care of the execution. That's roughly how to think about it. I'm going to talk about a particular design pattern here. Goals of this particular ERC, 7683, are about how do we make it such that fillers aren't fragmented. This probably doesn't make sense right away, so let me give you guys a quick walkthrough about what really is happening here. So you see a nice farmer emoji there? That is our normal blockchain user today who is sort of like signing something that looks like a struct where he's sort of like talking about what chains he wants to go between, what's the input between, what's the input asset, what's the output asset, things like that. And he signs this stuff and sends it on chain to a particular settlement contract on some source network where a bunch of these robots, these bots sort of look up to this source chain contract for what are pending intents that need to be filled. Then they sort of like fill it on the destination. And then there's a oracle that sort of lets the settlement contract on source chain know that hey, the request that you had has been fulfilled by this particular bot on destination. So really simple. User sends funds on source. The bots fill it on the destination. And then the oracle sort of conveys the message that things are done, really. I think filler fragmentation starts here. So now we have two structs, which basically means now we have two intent protocols. The only difference between the first one and the second one is that the second one has an extra parameter called call data. Let's assume everything else is the same. This is the only difference, right? Due to which now we have these two separate contracts, and earlier while we had had eight fillers solving for this particular intent protocol, as soon as we have two intent protocols, these are now split in two. As more and more intent protocols go live, fillers get fragmented because they need to do more work. They need to do more integrations and stuff like that, super annoying stuff. This is harmful for everyone. Not exactly harmful, but really undesirable, specifically because there aren't too many fillers anyway in the space. There are probably 15 to 20 entities that are operating in this particular role. And if you start fragmenting those, we don't know where we'll end up. So we want competition over here. So we want as many fillers as possible. role and if you start fragmenting those, we don't know where we'll end up. So we want competition over here. So we want as many fillers as possible. So fragmentation is something we want to avoid here. Fragmentation is bad for all three of these parties. It's bad for the user because if people aren't competing, you're going to get the worst possible price. If you don't have an intent standard, then you don't actually understand what you are", "eventId": "devcon-7", - "slot_start": 1731393600000, - "slot_end": 1731394200000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1pVk3HgI3jY_eVj3C7F4jVkcdwrwbVFi9NzWDCgBBUFg", - "resources_slides": null, + "slot_start": 1731479400000, + "slot_end": 1731481200000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1MNzcD3lH260PkgaznRJQQW41lkxoYMoKXT73MHMNPfg", + "resources_slides": "https://drive.google.com/file/d/1hSjtyhdmrJ0ifk7fMlYIdCBcGzLX5F2e/view", "speakers": [ - "itai-turbahn" + "vaibhav-chellani" ] }, "vector": [ @@ -868665,7 +866230,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -869388,38 +866952,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -869463,18 +866995,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -869581,14 +867101,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -869604,8 +867116,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -869634,6 +867144,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -869660,8 +867171,10 @@ 0, 0, 0, + 2, 0, 0, + 2, 0, 0, 0, @@ -869677,7 +867190,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -869789,6 +867301,37 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -869922,7 +867465,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -869957,7 +867499,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -869974,8 +867515,27 @@ 0, 0, 0, + 0, + 0, + 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 2, 0, + 0, + 0, 2, 0, 0, @@ -869994,57 +867554,53 @@ }, { "session": { - "id": "who-wins-ethereum-block-building-auctions-and-why", - "sourceId": "VKQ8NC", - "title": "Who Wins Ethereum Block Building Auctions and Why?", - "description": "Today, top 3 block builders produce over 90% of blocks on Ethereum via MEV-boost auction. The block builder market's dynamics evolve rapidly and has significant impact on the development of private mempools, wallets/apps orderflow auctions, and censorship resistance topic. In this talk, we share an overview of why the top builders win the most market share, using orderflow composition and bidding behavioral data. We hope to highlight the centralizing risks and failures of current market design.", - "track": "Cryptoeconomics", + "id": "why-ethereums-issuance-policy-is-redacted", + "sourceId": "39HYEG", + "title": "Why Ethereum's Issuance Policy is [redacted]?", + "description": "This talk explores the status quo of staking economics, its drawbacks as we see them and what the future of staking economics could look like.", + "track": "Core Protocol", "type": "Talk", "expertise": "Intermediate", "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "blocks", - "auction" + "ACD", + "Staking", + "Economics", + "ACD", + "Economics", + "Staking" ], "keywords": [ - "MEV", - "PBS", - "Block Auction" + "none" ], - "duration": 1465, + "duration": 1694, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "0118431db811d58100ca9b2f46e6661abd7baab68e55a3055f821c149862bfa0", + "sources_youtubeId": "cUgKXBq017g", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735848a9dbb7a90e1e9de85", + "sources_streamethId": "6736ecef1b0f83434d629108", "transcript_vtt": "No VTT link provided", "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731558600000, - "slot_end": 1731560400000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1sCbCcL_kcX8oEU3I_BJLpuFgt1wzgpYDENnympxQ7iI", - "resources_slides": null, + "slot_start": 1731552300000, + "slot_end": 1731554100000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1H2muDBPNRQn-IIusKik3f5fD_tsi9lmseX7GwmbUAh8", + "resources_slides": "https://drive.google.com/file/d/1VXeY7G-WzJNouMEPJ447HTB7-aTCvfmR/view", "speakers": [ - "burak-oz", - "danning-sui" + "caspar-schwarz-schilling", + "ansgar-dietrichs" ] }, "vector": [ - 0, - 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -870098,6 +867654,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -870765,8 +868322,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -870776,6 +868331,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -870823,6 +868379,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -870916,6 +868473,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -870932,7 +868490,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -871076,6 +868633,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -871115,7 +868673,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -871370,36 +868927,43 @@ }, { "session": { - "id": "why-defi-matters-on-ethereum", - "sourceId": "E7GFJC", - "title": "Why DeFi matters on Ethereum", - "description": "Why DeFi matters on Ethereum, and why Ethereum is the best place for DeFi.", - "track": "Real World Ethereum", - "type": "Panel", - "expertise": "", + "id": "why-vpns-are-scams-and-what-to-do-about-it", + "sourceId": "TRMC3L", + "title": "Why VPNs are scams and what to do about it", + "description": "Existing VPNs are essentially scams. Free VPNs and most centralized VPNs (such as ExpressVPN, owned by Kape) are effectively data harvesting companies. Decentralized VPNs usually have a few large servers and offer barely any more privacy than centralized VPNs. What is missing is 1) onion-routing packets like Tor 2) adding noise (fake traffic) 3) censorship-resistance and 4) mixing packets from different users together. We'll explore how technologies work to defeat even AI adversaries.", + "track": "Cypherpunk & Privacy", + "type": "Lightning Talk", + "expertise": "Intermediate", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 3320, + "tags": [ + "censorship", + "resistance", + "Decentralization", + "Privacy", + "Use Cases" + ], + "keywords": [ + "VPNs", + "mixnets", + "censorship-resistance" + ], + "duration": 538, "language": "en", - "sources_swarmHash": "df5c65e42388ca8d8c9582a368d558bba237216fb411806b3238eb8a8168a461", - "sources_youtubeId": "HeVcrMp_erg", + "sources_swarmHash": "a1d36033bf5ebaf4e1f8ed35812948388d4ba3cb56f13648118d2e9ba837ede6", + "sources_youtubeId": "4Ir-fptXPr8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735dfaf9dbb7a90e1bf8303", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731578400000, - "slot_end": 1731582000000, - "slot_roomId": "main-stage", - "resources_presentation": "https://docs.google.com/presentation/d/14OuUArkp-1DdYuHEylurELQO49RZZh5IHebMv6N4LAU", - "resources_slides": null, + "slot_start": 1731389400000, + "slot_end": 1731390000000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1X40WVD7E27evrL1uMb90tNX_OrjLhOmaw9pd-qrbFB4", + "resources_slides": "https://drive.google.com/file/d/1YHaGta0yxyKPo2DImTbCg7zUcFeEhQZG/view", "speakers": [ - "tascha", - "loi-luu", - "kain-warwick", - "namik-muduroglu" + "harry-halpin" ] }, "vector": [ @@ -871408,7 +868972,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -871631,14 +869194,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -872052,6 +869607,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -872136,9 +869692,6 @@ 0, 0, 0, - 6, - 6, - 6, 0, 0, 0, @@ -872241,6 +869794,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -872263,6 +869817,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -872276,6 +869831,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -872707,6 +870263,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -872721,6 +870279,7 @@ 2, 0, 0, + 0, 2, 0, 0, @@ -872739,49 +870298,45 @@ }, { "session": { - "id": "why-erc-7683-is-broken-and-how-to-fix-it", - "sourceId": "YT3SSN", - "title": "Why ERC 7683 is broken and how to fix it", - "description": "While I appreciate the authors spending time on this problem statement and thinking about standardising flows, ERC 7683 is deeply flawed it still forces offchain agents to understand the order they are trying to fulfill and it doesnt give users any guarantees of execution or understanding of whats happening under the hood, I think its because its standardising things on the \"intent\" layer where instead we need to standardise more downstream so information like security can be better presented", - "track": "Layer 2", + "id": "wizard-build-your-own-p-iop-protocol-in-15-min", + "sourceId": "W78CYD", + "title": "Wizard: build your own P-IOP protocol in 15 min!", + "description": "Wizard is a new open-source framework allowing you to write your own ZK proving scheme. Wizard is one of the backbones of Linea zkEVM's prover and it can be used to implement advanced protocols easily. In this session I will guide you through an implementation of Plonk using just a few lines of code.", + "track": "Applied Cryptography", "type": "Talk", "expertise": "Intermediate", - "audience": "Engineering", + "audience": "Research", "featured": false, "doNotRecord": false, "tags": [ - "Appchains", - "Cross-L2", - "Token bridging", - "Accessibility", - "erc-7683", - "intent", - "Accessibility", - "Appchains", - "Cross-L2", - "Token bridging" + "Protocol Design", + "Frameworks", + "SNARK", + "polynomial-iop", + "Frameworks", + "Protocol Design", + "SNARK" ], "keywords": [ - "chain-abstraction", - "intents" + "Polynomial-IOP" ], - "duration": 1275, + "duration": 1471, "language": "en", - "sources_swarmHash": "5fc0bc4aab0210e4bbcfda418f6debbf3e708920939b5efb8bc1b105399cf8f4", - "sources_youtubeId": "TOkUi0asAd0", + "sources_swarmHash": "d38438171620ecd34967ddd26ca2f7cf37da87509735a26d94d1c7bfff1a4873", + "sources_youtubeId": "4N43UH5hb14", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67344f0d9dbb7a90e1accefa", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67344f0d9dbb7a90e1accefa.vtt", - "transcript_text": " Thank you so much for sticking through. I know everyone's hungry for lunch, so am I. Hopefully, this will be a solid one, after which you think you will need a break. Super glad that you are still up and alive. Sweet guys, I think to get into it, I sort of wanted to talk about this upcoming thing called Intents, which is supposed to change how we interact with chains going forward. And a bunch of projects are starting to look at it in a pretty deep level, including people at Uniswap, Cowswap, and a bunch of other protocols. So I just wanted to chat about an upcoming proposal there and talk through some of the design choices that have been made and why those might not be right and how could we potentially fix it. This is not a diss track. I'm using the EIP or ERC as sort of like a channel to talk about this stuff. Not really talking about this specific implementation. I work at Socket. I'm a web host. Also go by VC. You can find me after the talk. But if you want to check out the exact proposal I'm talking about here, there's a quick QR code. I think this is by Mark Tura from Uniswap. And this is what Uniswap is going to be using for the cross-chain swap stuff, generally speaking. I think we always begin this topic with what really are intents, right? Unfortunately, that is something I don't want to get into for this particular doc, right? It always sort of goes into something you don't want to get into. But I think broadly speaking, you want to do something on-chain, you specify it, someone else takes care of it. That's the rough mental model you can have in mind. And this is different than transactions, because in transactions you sort of like send the transaction on-chain saying exactly what you want to do, but here you just talk about the end goal you want to achieve, and someone else takes care of the execution. That's roughly how to think about it. I'm going to talk about a particular design pattern here. Goals of this particular ERC, 7683, are about how do we make it such that fillers aren't fragmented. This probably doesn't make sense right away, so let me give you guys a quick walkthrough about what really is happening here. So you see a nice farmer emoji there? That is our normal blockchain user today who is sort of like signing something that looks like a struct where he's sort of like talking about what chains he wants to go between, what's the input between, what's the input asset, what's the output asset, things like that. And he signs this stuff and sends it on chain to a particular settlement contract on some source network where a bunch of these robots, these bots sort of look up to this source chain contract for what are pending intents that need to be filled. Then they sort of like fill it on the destination. And then there's a oracle that sort of lets the settlement contract on source chain know that hey, the request that you had has been fulfilled by this particular bot on destination. So really simple. User sends funds on source. The bots fill it on the destination. And then the oracle sort of conveys the message that things are done, really. I think filler fragmentation starts here. So now we have two structs, which basically means now we have two intent protocols. The only difference between the first one and the second one is that the second one has an extra parameter called call data. Let's assume everything else is the same. This is the only difference, right? Due to which now we have these two separate contracts, and earlier while we had had eight fillers solving for this particular intent protocol, as soon as we have two intent protocols, these are now split in two. As more and more intent protocols go live, fillers get fragmented because they need to do more work. They need to do more integrations and stuff like that, super annoying stuff. This is harmful for everyone. Not exactly harmful, but really undesirable, specifically because there aren't too many fillers anyway in the space. There are probably 15 to 20 entities that are operating in this particular role. And if you start fragmenting those, we don't know where we'll end up. So we want competition over here. So we want as many fillers as possible. role and if you start fragmenting those, we don't know where we'll end up. So we want competition over here. So we want as many fillers as possible. So fragmentation is something we want to avoid here. Fragmentation is bad for all three of these parties. It's bad for the user because if people aren't competing, you're going to get the worst possible price. If you don't have an intent standard, then you don't actually understand what you are", + "sources_streamethId": "67346da49dbb7a90e1d16a65", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67346da49dbb7a90e1d16a65.vtt", + "transcript_text": " Transcription by ESO. Translation by — to you the tech we developed to develop the proof system of Linear. Linear actually uses many different proof systems for many different use cases. It is a complex system. What I'm going to present to you is the main one that we use for proving the execution, so concretely proving EVM execution. So, I shall present myself. I'm Alexandre. I've been working in this space for seven years now, and been focused on cryptography for five years now. I've been working on INEA since the beginning, even doing research on roll-up even before that. So, yeah, just to talk a little bit about myself. Okay, so I'd like to involve the audience a little bit. Who knows what a ZK-EVM is? All right, so that's a good half, a good third. So a ZK-EVM layer is a layer two solution which is whose purpose is to help Ethereum scalability. The idea is, instead of sending transactions on the Ethereum mainnet, you send them to a third party, Linear. Linear runs an Ethereum virtual machine, will process the transaction. It will be processed separately from the mainnet. So the contracts that are deployed on mainnet are not the same, necessarily the same as the ones that are deployed on linear. The states are separated. But we can bridge between the two. I'm not going to describe much more how we bridge between Ethereum and linear. But what is important to remember here is that we basically bundle the execution of many transactions and submit one proof at the end. We give you one number, every finalization finalizes on average 65,000 transactions at once. And since we are using snark proof systems, meaning they have a very short verifier time which is mostly independent from how many transactions are executed, that's how we get scalability. Verifying the final proof which is a Planck proof takes a few milliseconds but it's for 65,000 transactions. So that's why it's much cheaper and that's how ZK EVMs can achieve much lower gas price. All right. So maybe a few words on ZKP and SNARK. ZKP is, I mean, usually we say ZKP but most of the time what we use in production are ZKPs, I mean, usually we say ZKP, but most of the time what we use in production are ZK argument of knowledge. And in order for a protocol to be a ZK argument of knowledge, it needs three properties. You need zero knowledge. Zero knowledge means that the proof does not reveal more than what it should reveal. You need to have completeness, meaning that if you want to prove something that is true, you should always be capable of generating the proof. Meaning, for instance, if you want to know that you proved the square root of some number, it should work for every number and not just one number. And the most important one is argument is knowledge soundness, or essentially computational knowledge soundness. It is how you say that you know, that you prove that you know what you're proving you know. And so, as I mentioned earlier, we are building a ZKVM, but really the ZK is not about zero knowledge or hiding things it's really about proving that we know valid execution traces. We'll explain more on that later but especially we need more properties. We want the verification to be succinct, to be small and we want also the protocol to be non-interactive. It should be a single proof, a single message, and from that we should be capable of verifying a proof. Okay, so let's apply all that to the EVM. The Ethereum virtual machine is a state machine on which we can execute and a transaction in GVM is like an instruction from the user of that virtual machine. It is specialized for running smart contracts. It has a lot and a lot of features that interact in a complex way between each other. And not every computation that are easy to do on the EVM are necessarily easy to prove. So for instance, I'm thinking about the Ketchak hash function that every smart contract uses all the time, like it's free. It's really not free to prove. And so if we want to solve that problem, so of course we need to deal with the inherent complexity of the EVM, but we also need a proof system that is flexible enough to solve all the problems that we need to solve to execute the EVM. Concretely, in order to prove the EVM, we have traces of execution that are instantiated by polynomials. We call them colon in our framework. And it could be summed up as a collection of errors that communicate with each other by sub-argument. And there are many different sub-arguments that are possible to use. We can have lookups. We can have projection queries. We also support variants of lookups that we call conditional lookups or fractional lookups, we can have projection queries, we also support variants of lookups that we call conditional lookups or fractional lookups. So in order to make this work, we need a proof system that is really flexible and can deal with all the polymorphism that is inherent to proving linear zarythmetization. Okay, so in order to do that, we designed the wizard framework, which is the main gateway between describing the constraints to represent EVM and actually proving things. It has a very neat particularity in that in the wizard framework, you write protocols in an ideal model and you don't have to worry about how you're going to commit to things or which arguments you're going to use. You just say what you want and then you have a list of techniques like a menu which you apply to that protocol statement and it will create for you a proof system, as complicated as you need it to be. And so that's perfect for us. It turns out that this is how, in academia, they describe complex protocols. Like if you take, for example, the GROSS-16 proof system, if you had a look at the paper before, you would see that they say, okay, so this is the constraint system at the beginning, then we do something that is called QAP, and then we do something that is called NILP, and then it continues, and at the end you have a concrete proof system, which is a GROSS-16 proof system, but that does not translate into the implementation. People just take the final protocol at the end that you apply after every step of compilation of growth 16, and there is a growth 16 implementation. So what we do is that we actually implement every possible step so that this step can be reused for other proof systems, and we don't have to really mentally work out the whole protocol. In the case of linear, it would just be impossible. And on top of that, we made it in such a way that if anybody wants to add their own compilers or do their own tweak or add their own type of constraints, the framework will allow that without changing the core of it. All right. allow that without changing the core of it. Alright, so as I mentioned earlier, the proof system, the protocol that you are going to construct, you have to describe it in an ideal model. And this ideal model involves what we call the Wizard Oracle. The Wizard Oracle is, from the point of view of the protocol designer, a trusted third party. It knows everything. It remembers everything. It does computation for free. It is always honest. It's like something you really... If it existed in reality, we would not need cryptography. So that does not mean that what we build will not be secure. It just means that the Oracle will concretely be instantiated by something else in the future as we compile the protocol. So the protocol can also be described in a multi-round fashion. I mentioned at the beginning that we need non-interactivity, but the protocol sort this out using the Fiat-Shamir trick. It puts some limitation. It means that the verifier can only send random challenges to the prover, but that's a common limitation that every protocol has nowadays. It's very uncommon to be in a contrary situation. And so essentially, the prover can use the oracle by sending a big, large message to it, and the oracle will just remember and notify the verifier that, hey, the prover did this part of the work. You can ask questions. So the verifier can ask questions, and the oracle responds to the question without needing to do any computation. It's like a godlike entity, and it is always honest. So you don't have to worry about him lying. So as I mentioned the prover, the verifier and the oracle can send messages to each other and here comes the first primitive of the framework which is what we call colons. So colons can be of any sort. We have what we call committed column. Committed column means it is sent to the oracle. And basically being sent to the oracle means that the prover cannot change its mind about what was sent to the oracle. You can only send something once to the oracle. Otherwise, you're cheating. And the protocol will always ensure that. But on top of that we have what we call pre-computed columns. So pre-computed columns, they can be of two types. They can be sent to the verifier or sent to the oracle and they are known beforehand. So that's something that is part of the protocol description, actually. They always have the same values. You can think, for instance, the Planck circuit description, which is instantiated by several polynomials. We are going to see how we can implement Planck in 10 minutes. So I'm just putting myself forward a little bit. You need those columns as part of the proving keys, and they describe the plumb circuits. And on top of that, you can send proofs, and proofs means a message that is sent directly to the verifier. And there are other types. Actually, we have eight types of different variants of column type. The columns also have a predefined size. It can be one. It can be one, it has a power of two, it's due to a limitation, a current limitation in the framework, and they have a round assignment. And the round number is essentially describing at which round of interaction the column is associated by the prover. That's for the main part. And then, as I said, for some columns that are sent to the oracle, or for some groups of columns that are sent to the oracle, the verifier can ask questions about these columns to the oracle. So that's what we call queries. It's a common term used in academia. If you know about FRI, they do random position opening queries, so that's what they mean when they mean query. In polynomial IOP protocol, there would be univariate openings. In our framework, query stands for at the same time constraints. It would be questions that have a yes or no answer, like is this value the square of this other value? The answer can always be yes or no. Most of the time, it is served as a constraint. And here we describe it as a query. And we also have open questions that are like polynomial opening, position opening, and so these expect a response from the oracle that is other than yes or no. So we support many, many different types of queries. It can be lookups, it can be univariate evaluation, it can be inner product between several columns. So essentially, most of the folklore is there. And we implemented it because we needed it for the concrete implementation of vortex and linear arithmetization. All right. So as I said, once you have a protocol description, the only thing you need to do is to describe how you want to go from this description in an ideal world with ideal oracle into a concrete protocol that is secure in the standard model. So here is the base description that allows us to go from without IOP to polynomial IOP at the end. But in practice, this would not be sufficient. We would also need a polynomial commitment to turn this into a concrete protocol. So this part of the code does not describe how we do the polynomial commitment, but how we go to this point. Okay, so now let's get onto a practical example. So here is the Planck constraints description. So we have a set of columns. Qs are describing a Planck circuit. Xa, Xb, XC are describing the witness. And so usually we add another column on the right that is for the public inputs and that we are going to use. On top of that, Planck has some copy constraints which can be instantiated by a permutation argument, which I'm going to show you how to do. Okay, so let's implement Plonk. So as I mentioned, we need to define our protocol, then we can compile it, and after we can run it, so running the prover and verifying it. We can also automatically recurse it, but we are not going to cover that today. All right. Okay, so first of all, defining the protocol. This is done by specifying a function. So the whole framework is in Go. Most of the prover stack of linear is using Gnark, and the linear prover is also implemented in Go, as it is also relying on Gnark's implementation. So the defined function has this simple signature, and the builder is an object that is going to store everything we said to declare an entity in the protocol. So either queries, columns, or so on. And we can also specify checks to be done by the verifier. So let's go into that. OK. So here are the verifier. So let's go into there. Okay, so here are the columns description. So you can recognize the column that we saw at the beginning. So the queues column are for the circuit description. They should be the same no matter what we try to prove. So they go into pre-computed. The XXBXC are commitment, they have to be sent to the oracle and PI for the public input is inserted as a proof object because it has to be revealed to the prover. It's a bit counterintuitive that we call that proof but proof means a message sent to the pro community. It's part of the proof. Even if it's a nonsense from an academia perspective. And also a number of public inputs. Because the PI is, colon is larger than the actual number of public inputs, because every colon should have the same size. And also the value of the queue should be known beforehand, of course, because that's the circuit description. Okay. So now we can declare the queries. So on your right you have a global constraint, which is an arithmetic expression that has to vanish on all the rows of every column that it's touching. We can recognize the equation of the Planck gate constraints at the beginning. And we have a fixed permutation, which is instantiated by some forced permutation that has the concatenation of XA, XB, XC invariant. And that's how Planck proves the copy constraints. Then finally, we need to add a verifier check. This is to ensure that the PI that is sent to the verifier is well formed and that it should be padded on the right with zeros. Okay. So now once we have that, we can compile that into an actual protocol. So here I added the part that converts the PIOP into a concrete protocol because I added the vortex.compile, vortex being the polynomial commitment that we use. And now we just have to run it. So the only thing we need to specify is how concretely we are going to assign our columns. Because this is the only thing that is unknown at this stage, after reading the protocol description. So, yeah. We just provide it and we assign it. It's four lines of code. And so, yeah. Now, so we have some things that allow us to write Plunk constraints manually. But I don't know if you have tried writing Plunk circuit by hand, but this is really difficult. And it turns out that Gnark offers a very nice front-end to write circuits. So let's just write a wrapper of what we just wrote using Gnark so that we can use a Gnark circuit description. So I did the implementation. It was a bit longer than 100 lines, but it was essentially a few automated stuff. Okay, so let's do a circuit. So let's use Fibonacci as a use case. So my circuit, you have two values, U0, U1 as input, and you want to have the 50th number of the Fibonacci sequence generated by U0 and U1. U0 and U1 being public parameters. So on your right, you have the circuit writing in NARC, so you can see that it's fairly easy and much simpler than writing a circuit by hand. And then we just have to run it. And that's it. You just create your proof function that is explaining how to assign the colon. You run wizard.prove, and it's going to generate a proof for you using vertex-threaded polynomial commitment, and you can verify that in one line. All right. I have six seconds for the polynomial commitment, and you can verify that in one line. All right. I have six seconds for the future improvement, so we want to add more queries, and we think we can also remove the necessity to specify runs in the protocol as it should be inferred automatically. All right. That's it. You can check out the code here. Amazing, Alexandre. You can check out the code here. Yeah. Amazing. Thanks so much for the great introduction of Wizard. So a reminder that if you scan the QR code, you will attend the session, and you can ask questions, and you can also claim an NFT. And you can also vote. So if you have a question that you really want it to be answered, vote for them. So let's start with the top one. Does WSIR support lookup tables? And can it be used to implement lookup tables based on CKVMs? Absolutely. So the way you would do it is, for instance, say, so what you can do, first of all, if you want to do a range check, so that's a big use case for lookup table, you already have a range query. So you just take one column and you say, I have this query that just enforces the whole column to be within bound, and that's all you do. A second way, if you want to do more complicated range checks, like XOR, for instance, then you would have to specify three columns for your XOR. One column for the left side, the right side, and one for the result. And in this column, you put all the possibilities. So maybe say for 8 bits to 8 bits, you would have 2 to the 16th possibility. So you write down all of that in your table. And then you create a lookup constraint between this table and a triplet of columns for which you want to enforce XOR constraint. And you can also add a conditional lookups. You can have a fourth column that contains zeros or one and that activates the XOR constraints or not. Great, thanks for the answer. The next question is can you create different custom gates and at which instance do you decide which row corresponds to each kind of gate? So when you generate a global constraint this is essentially what is your custom gate then it's going to apply over everything but the wizard framework is more abstract than this. Essentially, there is a general technique to do it, which is to say that you add a selectors column that says which constraint is going to apply for each, and you have some product of your constraints custom gate expression multiplied by an indicative that asks whether this constraint is active here or not, and you would merge everything into a single global constraint in the end. So yes, you could implement custom gate. Actually, that's what they do all the time when they specify the EVM. Great. The next question is about recursion. So is recursion something that would be implemented in Wizard, or would it be separately like a commitment? So there is a separate way you can do recursion. So we do it inside of the Wizard at the same time and outside. The first way, we have a compilation step that is called self-recursion that usually goes just after vertex. The text of vertex proof and re-arithmetize it. And we can do proofs of that again, and we repeat, and we can shrink the proof. That's because vertex, as a single polynomial commitment, has a square root, very fair time. But applying log-login application of self-recursion, you get constant size proof. Great. And the last question that we have in the queue, at least for now, is that it's great that you can define an ideal protocol programmatically, and it seems that that does make it easy. Does that make it easy or possible to support automated formal verification or UC proofs of security? I'm not too sure what it would entail exactly to formally verify. So we could formally verify the standard set of compilers that we have. I think this is at least a necessity. But then there is a protocol description. It should be formally verified. This, I don't know how to do it. I don't know and I can't tell you how to make it easy but it would be a great use case I agree it would be great maybe after that conversation", "eventId": "devcon-7", - "slot_start": 1731479400000, - "slot_end": 1731481200000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1MNzcD3lH260PkgaznRJQQW41lkxoYMoKXT73MHMNPfg", - "resources_slides": null, + "slot_start": 1731486600000, + "slot_end": 1731488400000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1FkV9X3aQwU20vdTZXHXBpHGRAISg06VrxYifChRhnIo", + "resources_slides": "https://drive.google.com/file/d/18BC5R9QvxaV67ChCL7bKhOa1dXXomtfA/view", "speakers": [ - "vaibhav-chellani" + "alexandre-belling" ] }, "vector": [ @@ -872792,10 +870347,10 @@ 0, 0, 0, - 6, 0, 0, 0, + 6, 0, 0, 0, @@ -873579,6 +871134,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -873596,7 +871152,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -873638,6 +871193,13 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -873709,7 +871271,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -873736,10 +871297,8 @@ 0, 0, 0, - 2, 0, 0, - 2, 0, 0, 0, @@ -873813,6 +871372,22 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -873867,28 +871442,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -874096,15 +871649,12 @@ 0, 0, 0, - 0, - 0, - 0, 2, 0, 0, 0, - 2, 0, + 2, 0, 0, 0, @@ -874121,59 +871671,38 @@ }, { "session": { - "id": "why-ethereums-issuance-policy-is-redacted", - "sourceId": "39HYEG", - "title": "Why Ethereum's Issuance Policy is [redacted]?", - "description": "This talk explores the status quo of staking economics, its drawbacks as we see them and what the future of staking economics could look like.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "wmb-81321", + "sourceId": "S8MPDK", + "title": "WMB 81321", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "ACD", - "Staking", - "Economics", - "ACD", - "Economics", - "Staking" - ], - "keywords": [ - "none" - ], - "duration": 1694, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "0118431db811d58100ca9b2f46e6661abd7baab68e55a3055f821c149862bfa0", - "sources_youtubeId": "cUgKXBq017g", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6736ecef1b0f83434d629108", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731552300000, - "slot_end": 1731554100000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1H2muDBPNRQn-IIusKik3f5fD_tsi9lmseX7GwmbUAh8", - "resources_slides": null, - "speakers": [ - "ansgar-dietrichs", - "caspar-schwarz-schilling" - ] + "slot_start": 1731661200000, + "slot_end": 1731664800000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1IuOY3B48xD6oQfkmw66ZED8btQYpPlx-woDEIDkmuwQ", + "resources_slides": "" }, "vector": [ 0, 0, 0, 0, - 6, - 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -874221,7 +871750,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -874900,7 +872428,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -874949,7 +872476,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -875043,7 +872569,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -875204,7 +872729,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -875477,9 +873001,9 @@ 0, 0, 0, - 2, 0, 0, + 2, 0, 0, 2, @@ -875494,39 +873018,49 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "why-many-deployed-snarks-are-extremely-risky", - "sourceId": "BVSHEA", - "title": "Why many deployed SNARKs are extremely risky", - "description": "We analyze the real-world security of FRI, a key component in many SNARKs securing billions in blockchain transactions. We discover alarming gaps between conjectured and provable security in deployed FRI parameters. Most cases show 21-63 bits weaker provable security than conjectured. This leaves systems vulnerable if better attacks emerge. We propose guidelines for achieving 100 bits of provable security and a method for parameter tuning, aiming to enhance SNARK security in L2s+blockchains.", - "track": "Applied Cryptography", + "id": "working-together-with-unity-blazor-nethereum-and-mud", + "sourceId": "SDUYDQ", + "title": "Working together with Unity, Blazor, Nethereum and MUD", + "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and explores how Unity, Blazor, Nethereum, and MUD integrate to build blockchain-based games and applications. It covers the overall architecture and structure of .NET projects, including smart contract integration and core logic. Key topics include Nethereum's integration with MUD systems and tables, extended code generation to support MUD, deployment strategies, bulk saving, data synchronization, and testing.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", "type": "Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, "keywords": [ - "Concrete", - "security" + "Nethereum", + "MUD", + "Unity" ], "tags": [ - "Cryptography", - "Security", - "SNARK" + "Architecture", + "Frameworks", + "Gaming" ], "language": "en", + "sources_swarmHash": "3f3e9761b23a20f24c5e1a858e77d43e2e026297532cb8b79bdc4d1007b4598a", + "sources_youtubeId": "27FnZdCxvos", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "speakers": [ - "pratyush-ranjan-tiwari" + "juan-blanco" ], "eventId": "devcon-7", - "slot_start": 1731645000000, - "slot_end": 1731646800000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1p5nM9CjRl-N6-aj7yjsMvrsos4m3GrpVgekyXpMOGfM" + "slot_start": 1731568500000, + "slot_end": 1731570000000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1cgSTfVg9G2fBhaLSYwdokUa1BNjwvijZP4qAjaifH3Q", + "resources_slides": "https://drive.google.com/file/d/1lmAcG8PiEAzL6F0XryXgv0Rp8IYgcfay/view" }, "vector": [ 0, @@ -875539,10 +873073,9 @@ 0, 0, 0, - 6, - 0, 0, 0, + 6, 0, 0, 0, @@ -876280,11 +873813,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -876293,7 +873821,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -876346,6 +873873,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -876388,11 +873916,13 @@ 0, 0, 0, + 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -876565,7 +874095,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -876847,7 +874376,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -876860,48 +874388,55 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "why-vpns-are-scams-and-what-to-do-about-it", - "sourceId": "TRMC3L", - "title": "Why VPNs are scams and what to do about it", - "description": "Existing VPNs are essentially scams. Free VPNs and most centralized VPNs (such as ExpressVPN, owned by Kape) are effectively data harvesting companies. Decentralized VPNs usually have a few large servers and offer barely any more privacy than centralized VPNs. What is missing is 1) onion-routing packets like Tor 2) adding noise (fake traffic) 3) censorship-resistance and 4) mixing packets from different users together. We'll explore how technologies work to defeat even AI adversaries.", - "track": "Cypherpunk & Privacy", + "id": "wtf-are-based-rollups-and-preconfs", + "sourceId": "UG79AE", + "title": "Wtf are based rollups and preconfs?", + "description": "The rollup-centric roadmap is critical for scaling Ethereum but has introduced fragmentation of users, developers, and liquidity. But don't worry, based rollups are here to save the day! But wtf is a “based rollup”? And wtf are these “pre-confs” that usually get talked about together?\r\n\r\nThe focus of this talk is to demystify these concepts and try and get more people engaged in the based rollup ecosystem, which has the potential to heal Ethereum’s fragmentation problem.", + "track": "Layer 2", "type": "Lightning Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "expertise": "Beginner", + "audience": "Developer", "featured": false, "doNotRecord": false, "tags": [ - "censorship", - "resistance", - "Decentralization", - "Privacy", - "Use Cases" + "Validator Experience", + "Layer 2s", + "Rollups", + "sequencer", + "preconfs", + "pre-confirmations", + "Layer 2s", + "Rollups", + "Validator Experience" ], "keywords": [ - "VPNs", - "mixnets", - "censorship-resistance" + "Based Rollup", + "Preconfirmations", + "Sequencing" ], - "duration": 538, + "duration": 462, "language": "en", - "sources_swarmHash": "a1d36033bf5ebaf4e1f8ed35812948388d4ba3cb56f13648118d2e9ba837ede6", - "sources_youtubeId": "4Ir-fptXPr8", + "sources_swarmHash": "a0f742f4e79679bf245603fb9d0b0337c6bb2cbbe6a4e47ea5a1b04b9dc8bed6", + "sources_youtubeId": "j4wLhmXaZn8", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673838b41b0f83434df58cff", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673838b41b0f83434df58cff.vtt", + "transcript_text": " All right. Thanks for coming, everyone. I know it's kind of early. So this is a lightning talk on based roll-ups and pre-comps. And as you know, these lightning talks are extremely quick. So this isn't going to answer all of the questions. It's really going to try and lay the foundation and motivation for why different teams are working on this. So I would want to start with like, what is the big motivation here? And the big goal of these base rollups really is to help solve this fragmentation issue we're starting to see in the L2 space and to restore some value capture back to the base layer. So how did we get here? Well, Ethereum is always in this tricky position. The goalposts always move. Gas was too expensive. We created this roll-up centric roadmap. We succeeded in offloading all of this execution. Things got cheaper. TPS increased. But ETH is dead. So we really want to try and address some of these problems head-on. And the big focus of this talk is around fragmentation. So currently these L2s aren't interoperable with each other. They fragment liquidity, they fragment users, they also fragment developers. You have to pick a winning ecosystem to deploy on or deploy across many, which starts to spread your resources. And what we're really seeing is this kind of convergence on what I'm calling intra-op. You have interoperability within your ecosystem but not across these ecosystems. So how do we fix this fragmentation problem? Well, one easy solution is that we just agree on one entity to sequence all of these rollups. And that sounds pretty centralizing. So can we do this in a way that preserves a lot of the values that we care about? So enter based rollups. The idea here, this is a quote from Justin's paper. The TLDR here is it's a based rollup when it's sequenced by Ethereum validators. So in this picture, on the left-hand side here, we have centralized sequencing. The idea is you have these unordered transactions. A centralized sequencer's job is to order them for the rollup. These little squiggly things are the rollups at the bottom here. Okay. As we move to the right, we're increasing in decentralization, and we're unlocking interoperability. So with shared sequencing, you have multiple parties that are all agreeing, according to some leader election mechanism, on who has the ability to sequence all of the roll-ups. And as we move all the way to the right, we enter this based sequencing mode. The idea is that the transactions for these L2s will be sequenced directly by Ethereum validators. And how does this help? How does this unlock interoperability? The idea is that we have these write locks over L2 state. When an Ethereum validator is going to propose a block, they have a write lock over the entire L1 block and all of the L2 blocks that are going to be included. And when we have a bunch of rollups that are all agreeing to be sequenced by this validator, it unlocks this ability for you to start passing messages across these rollups that are all agreeing to be sequenced by this validator, it unlocks this ability for you to start passing messages across these rollups. We don't need these bridges. We're able to do these more seamlessly. So this has limitations. One of the big issues with based rollups is that they have really 12 second block times. A lot of users wanna come to L2s because they care about that snappy UX, those instant transactions. We can always reduce the L1 block times, but that's a very long, arduous process that has a lot of unknowns and centralization vectors. So, pre-confs, this is another one of these new terms, that stands for pre-confirmations. A pre-conf is a commitment made by these validators to users about doing something related to block proposals. So this could mean I'm giving a guarantee to a user that I'll include their transaction when it's my turn to propose a block, or I can even give a stronger guarantee, like this will be the state after executing your transaction. And if I break my promise as the pre-confer, then I can get slashed on various means. So to kind of wrap this up, like how does this all come together? So the user over here would be able to send their roll-up transactions to be sequenced by an Ethereum validator. They, in response, give back this pre-confirmation signature, which is like this receipt for the users, guaranteeing that their transaction will be included or it'll be executed inside of the rollup. And if the validator does break this promise, they can be slashed by submitting evidence to the slashing contract. And what does this enable? Well, it solves a lot of these UX problems. And when we start to enter this execution pre-comps, we really make it to a place where we can actually outperform these Alt-L1s by giving these very instant transactions back to users. And this all comes without modifications to the base layer. So hopefully this maybe piqued people's interest on this topic, but of course, in a five-minute lightning talk, there's still many, many things to be explored. So thank you all for joining. Okay, yeah, we have a few questions. Right, yeah, so the first question here, how does this notion scale if they need to validate all of the L2 transactions? So this is a great question. So I think there's kind of two worlds here. Like one is sequencing itself doesn't imply execution, so it doesn't have to take on all of the load. But realistically, there's been a lot of work to get the data out there. And, you know, I think that's a big part of the reason why we're doing this. And, you know, I think that's a big part of the reason why we're doing this. So, I think that's a big part of the reason why we're doing this. And, you know, I think that's a big part of the reason why we're doing this. for me, these pre-conf networks will likely require consensus. Is this the biggest drawback? So definitely over the past year, it started from this very dark forest, unknown, and over time, we've started to untangle it. And some of the bigger questions are now just around pricing. But really, you don't need an actual consensus protocol to build this. You're able to just broadcast actual consensus protocol to build this. You're able to just broadcast these messages directly to the users. And if the user doesn't get their pre-confirmation, they're able to go and slash. And maybe one last question. Why is Spire better than Puffer? Why is Spire better than Puffer? Well, we're all here building based rollups. So, yeah. I understand. So everyone has their own vision for the best approach. Thank you very much. So thank you. Please give a round of applause to our speaker. Thank you. Thank you. Thank you.", "eventId": "devcon-7", - "slot_start": 1731389400000, - "slot_end": 1731390000000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1X40WVD7E27evrL1uMb90tNX_OrjLhOmaw9pd-qrbFB4", - "resources_slides": null, + "slot_start": 1731642000000, + "slot_end": 1731642600000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1XBmbnq_59WsG85OTcNpUu6A8prP6pC2w2YjOs_3x7-Y", + "resources_slides": "https://drive.google.com/file/d/16ia-BPY-_Aijs5eSGwaF9obnGQmhp4iH/view", "speakers": [ - "harry-halpin" + "jason-vranek" ] }, "vector": [ @@ -876910,13 +874445,9 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, 0, 0, + 6, 0, 0, 0, @@ -877547,7 +875078,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -877648,6 +875178,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -877695,8 +875226,11 @@ 0, 0, 0, + 2, 0, 0, + 2, + 2, 0, 0, 0, @@ -877720,6 +875254,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -877735,7 +875270,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -877758,7 +875292,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -877767,12 +875300,13 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -878206,8 +875740,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -878234,52 +875766,50 @@ 0, 0, 0, - 0, - 0, 0 ] }, { "session": { - "id": "wizard-build-your-own-p-iop-protocol-in-15-min", - "sourceId": "W78CYD", - "title": "Wizard: build your own P-IOP protocol in 15 min!", - "description": "Wizard is a new open-source framework allowing you to write your own ZK proving scheme. Wizard is one of the backbones of Linea zkEVM's prover and it can be used to implement advanced protocols easily. In this session I will guide you through an implementation of Plonk using just a few lines of code.", - "track": "Applied Cryptography", - "type": "Talk", + "id": "wtf-is-the-pessimistic-proof", + "sourceId": "DAZLVG", + "title": "WTF is the pessimistic proof", + "description": "Cryptographic safety for the AggLayer requires a novel solution. It’s called the pessimistic proof and it treats all chains suspiciously. The AggLayer will be a decentralized protocol that scales blockchains by unifying liquidity, users, and state. The Pessimistic proof is a proof generated to securely grant this shared liquidity, and it will be technically explained in this flash talk by one of the developers.", + "track": "Layer 2", + "type": "Lightning Talk", "expertise": "Intermediate", - "audience": "Research", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Protocol Design", - "Frameworks", - "SNARK", - "polynomial-iop", - "Frameworks", - "Protocol Design", - "SNARK" + "ZKP", + "liquidity", + "shared", + "agglayer", + "ZKP" ], "keywords": [ - "Polynomial-IOP" + "aggLayer", + "shared liquidity" ], - "duration": 1471, + "duration": 470, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "87252c19c72c00aedc54a7899e5869478571c3e1307c1201d88c95a260eb9e1d", + "sources_youtubeId": "2Nf4VCylYtk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "67346da49dbb7a90e1d16a65", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67346da49dbb7a90e1d16a65.vtt", - "transcript_text": " Transcription by ESO. Translation by — to you the tech we developed to develop the proof system of Linear. Linear actually uses many different proof systems for many different use cases. It is a complex system. What I'm going to present to you is the main one that we use for proving the execution, so concretely proving EVM execution. So, I shall present myself. I'm Alexandre. I've been working in this space for seven years now, and been focused on cryptography for five years now. I've been working on INEA since the beginning, even doing research on roll-up even before that. So, yeah, just to talk a little bit about myself. Okay, so I'd like to involve the audience a little bit. Who knows what a ZK-EVM is? All right, so that's a good half, a good third. So a ZK-EVM layer is a layer two solution which is whose purpose is to help Ethereum scalability. The idea is, instead of sending transactions on the Ethereum mainnet, you send them to a third party, Linear. Linear runs an Ethereum virtual machine, will process the transaction. It will be processed separately from the mainnet. So the contracts that are deployed on mainnet are not the same, necessarily the same as the ones that are deployed on linear. The states are separated. But we can bridge between the two. I'm not going to describe much more how we bridge between Ethereum and linear. But what is important to remember here is that we basically bundle the execution of many transactions and submit one proof at the end. We give you one number, every finalization finalizes on average 65,000 transactions at once. And since we are using snark proof systems, meaning they have a very short verifier time which is mostly independent from how many transactions are executed, that's how we get scalability. Verifying the final proof which is a Planck proof takes a few milliseconds but it's for 65,000 transactions. So that's why it's much cheaper and that's how ZK EVMs can achieve much lower gas price. All right. So maybe a few words on ZKP and SNARK. ZKP is, I mean, usually we say ZKP but most of the time what we use in production are ZKPs, I mean, usually we say ZKP, but most of the time what we use in production are ZK argument of knowledge. And in order for a protocol to be a ZK argument of knowledge, it needs three properties. You need zero knowledge. Zero knowledge means that the proof does not reveal more than what it should reveal. You need to have completeness, meaning that if you want to prove something that is true, you should always be capable of generating the proof. Meaning, for instance, if you want to know that you proved the square root of some number, it should work for every number and not just one number. And the most important one is argument is knowledge soundness, or essentially computational knowledge soundness. It is how you say that you know, that you prove that you know what you're proving you know. And so, as I mentioned earlier, we are building a ZKVM, but really the ZK is not about zero knowledge or hiding things it's really about proving that we know valid execution traces. We'll explain more on that later but especially we need more properties. We want the verification to be succinct, to be small and we want also the protocol to be non-interactive. It should be a single proof, a single message, and from that we should be capable of verifying a proof. Okay, so let's apply all that to the EVM. The Ethereum virtual machine is a state machine on which we can execute and a transaction in GVM is like an instruction from the user of that virtual machine. It is specialized for running smart contracts. It has a lot and a lot of features that interact in a complex way between each other. And not every computation that are easy to do on the EVM are necessarily easy to prove. So for instance, I'm thinking about the Ketchak hash function that every smart contract uses all the time, like it's free. It's really not free to prove. And so if we want to solve that problem, so of course we need to deal with the inherent complexity of the EVM, but we also need a proof system that is flexible enough to solve all the problems that we need to solve to execute the EVM. Concretely, in order to prove the EVM, we have traces of execution that are instantiated by polynomials. We call them colon in our framework. And it could be summed up as a collection of errors that communicate with each other by sub-argument. And there are many different sub-arguments that are possible to use. We can have lookups. We can have projection queries. We also support variants of lookups that we call conditional lookups or fractional lookups, we can have projection queries, we also support variants of lookups that we call conditional lookups or fractional lookups. So in order to make this work, we need a proof system that is really flexible and can deal with all the polymorphism that is inherent to proving linear zarythmetization. Okay, so in order to do that, we designed the wizard framework, which is the main gateway between describing the constraints to represent EVM and actually proving things. It has a very neat particularity in that in the wizard framework, you write protocols in an ideal model and you don't have to worry about how you're going to commit to things or which arguments you're going to use. You just say what you want and then you have a list of techniques like a menu which you apply to that protocol statement and it will create for you a proof system, as complicated as you need it to be. And so that's perfect for us. It turns out that this is how, in academia, they describe complex protocols. Like if you take, for example, the GROSS-16 proof system, if you had a look at the paper before, you would see that they say, okay, so this is the constraint system at the beginning, then we do something that is called QAP, and then we do something that is called NILP, and then it continues, and at the end you have a concrete proof system, which is a GROSS-16 proof system, but that does not translate into the implementation. People just take the final protocol at the end that you apply after every step of compilation of growth 16, and there is a growth 16 implementation. So what we do is that we actually implement every possible step so that this step can be reused for other proof systems, and we don't have to really mentally work out the whole protocol. In the case of linear, it would just be impossible. And on top of that, we made it in such a way that if anybody wants to add their own compilers or do their own tweak or add their own type of constraints, the framework will allow that without changing the core of it. All right. allow that without changing the core of it. Alright, so as I mentioned earlier, the proof system, the protocol that you are going to construct, you have to describe it in an ideal model. And this ideal model involves what we call the Wizard Oracle. The Wizard Oracle is, from the point of view of the protocol designer, a trusted third party. It knows everything. It remembers everything. It does computation for free. It is always honest. It's like something you really... If it existed in reality, we would not need cryptography. So that does not mean that what we build will not be secure. It just means that the Oracle will concretely be instantiated by something else in the future as we compile the protocol. So the protocol can also be described in a multi-round fashion. I mentioned at the beginning that we need non-interactivity, but the protocol sort this out using the Fiat-Shamir trick. It puts some limitation. It means that the verifier can only send random challenges to the prover, but that's a common limitation that every protocol has nowadays. It's very uncommon to be in a contrary situation. And so essentially, the prover can use the oracle by sending a big, large message to it, and the oracle will just remember and notify the verifier that, hey, the prover did this part of the work. You can ask questions. So the verifier can ask questions, and the oracle responds to the question without needing to do any computation. It's like a godlike entity, and it is always honest. So you don't have to worry about him lying. So as I mentioned the prover, the verifier and the oracle can send messages to each other and here comes the first primitive of the framework which is what we call colons. So colons can be of any sort. We have what we call committed column. Committed column means it is sent to the oracle. And basically being sent to the oracle means that the prover cannot change its mind about what was sent to the oracle. You can only send something once to the oracle. Otherwise, you're cheating. And the protocol will always ensure that. But on top of that we have what we call pre-computed columns. So pre-computed columns, they can be of two types. They can be sent to the verifier or sent to the oracle and they are known beforehand. So that's something that is part of the protocol description, actually. They always have the same values. You can think, for instance, the Planck circuit description, which is instantiated by several polynomials. We are going to see how we can implement Planck in 10 minutes. So I'm just putting myself forward a little bit. You need those columns as part of the proving keys, and they describe the plumb circuits. And on top of that, you can send proofs, and proofs means a message that is sent directly to the verifier. And there are other types. Actually, we have eight types of different variants of column type. The columns also have a predefined size. It can be one. It can be one, it has a power of two, it's due to a limitation, a current limitation in the framework, and they have a round assignment. And the round number is essentially describing at which round of interaction the column is associated by the prover. That's for the main part. And then, as I said, for some columns that are sent to the oracle, or for some groups of columns that are sent to the oracle, the verifier can ask questions about these columns to the oracle. So that's what we call queries. It's a common term used in academia. If you know about FRI, they do random position opening queries, so that's what they mean when they mean query. In polynomial IOP protocol, there would be univariate openings. In our framework, query stands for at the same time constraints. It would be questions that have a yes or no answer, like is this value the square of this other value? The answer can always be yes or no. Most of the time, it is served as a constraint. And here we describe it as a query. And we also have open questions that are like polynomial opening, position opening, and so these expect a response from the oracle that is other than yes or no. So we support many, many different types of queries. It can be lookups, it can be univariate evaluation, it can be inner product between several columns. So essentially, most of the folklore is there. And we implemented it because we needed it for the concrete implementation of vortex and linear arithmetization. All right. So as I said, once you have a protocol description, the only thing you need to do is to describe how you want to go from this description in an ideal world with ideal oracle into a concrete protocol that is secure in the standard model. So here is the base description that allows us to go from without IOP to polynomial IOP at the end. But in practice, this would not be sufficient. We would also need a polynomial commitment to turn this into a concrete protocol. So this part of the code does not describe how we do the polynomial commitment, but how we go to this point. Okay, so now let's get onto a practical example. So here is the Planck constraints description. So we have a set of columns. Qs are describing a Planck circuit. Xa, Xb, XC are describing the witness. And so usually we add another column on the right that is for the public inputs and that we are going to use. On top of that, Planck has some copy constraints which can be instantiated by a permutation argument, which I'm going to show you how to do. Okay, so let's implement Plonk. So as I mentioned, we need to define our protocol, then we can compile it, and after we can run it, so running the prover and verifying it. We can also automatically recurse it, but we are not going to cover that today. All right. Okay, so first of all, defining the protocol. This is done by specifying a function. So the whole framework is in Go. Most of the prover stack of linear is using Gnark, and the linear prover is also implemented in Go, as it is also relying on Gnark's implementation. So the defined function has this simple signature, and the builder is an object that is going to store everything we said to declare an entity in the protocol. So either queries, columns, or so on. And we can also specify checks to be done by the verifier. So let's go into that. OK. So here are the verifier. So let's go into there. Okay, so here are the columns description. So you can recognize the column that we saw at the beginning. So the queues column are for the circuit description. They should be the same no matter what we try to prove. So they go into pre-computed. The XXBXC are commitment, they have to be sent to the oracle and PI for the public input is inserted as a proof object because it has to be revealed to the prover. It's a bit counterintuitive that we call that proof but proof means a message sent to the pro community. It's part of the proof. Even if it's a nonsense from an academia perspective. And also a number of public inputs. Because the PI is, colon is larger than the actual number of public inputs, because every colon should have the same size. And also the value of the queue should be known beforehand, of course, because that's the circuit description. Okay. So now we can declare the queries. So on your right you have a global constraint, which is an arithmetic expression that has to vanish on all the rows of every column that it's touching. We can recognize the equation of the Planck gate constraints at the beginning. And we have a fixed permutation, which is instantiated by some forced permutation that has the concatenation of XA, XB, XC invariant. And that's how Planck proves the copy constraints. Then finally, we need to add a verifier check. This is to ensure that the PI that is sent to the verifier is well formed and that it should be padded on the right with zeros. Okay. So now once we have that, we can compile that into an actual protocol. So here I added the part that converts the PIOP into a concrete protocol because I added the vortex.compile, vortex being the polynomial commitment that we use. And now we just have to run it. So the only thing we need to specify is how concretely we are going to assign our columns. Because this is the only thing that is unknown at this stage, after reading the protocol description. So, yeah. We just provide it and we assign it. It's four lines of code. And so, yeah. Now, so we have some things that allow us to write Plunk constraints manually. But I don't know if you have tried writing Plunk circuit by hand, but this is really difficult. And it turns out that Gnark offers a very nice front-end to write circuits. So let's just write a wrapper of what we just wrote using Gnark so that we can use a Gnark circuit description. So I did the implementation. It was a bit longer than 100 lines, but it was essentially a few automated stuff. Okay, so let's do a circuit. So let's use Fibonacci as a use case. So my circuit, you have two values, U0, U1 as input, and you want to have the 50th number of the Fibonacci sequence generated by U0 and U1. U0 and U1 being public parameters. So on your right, you have the circuit writing in NARC, so you can see that it's fairly easy and much simpler than writing a circuit by hand. And then we just have to run it. And that's it. You just create your proof function that is explaining how to assign the colon. You run wizard.prove, and it's going to generate a proof for you using vertex-threaded polynomial commitment, and you can verify that in one line. All right. I have six seconds for the polynomial commitment, and you can verify that in one line. All right. I have six seconds for the future improvement, so we want to add more queries, and we think we can also remove the necessity to specify runs in the protocol as it should be inferred automatically. All right. That's it. You can check out the code here. Amazing, Alexandre. You can check out the code here. Yeah. Amazing. Thanks so much for the great introduction of Wizard. So a reminder that if you scan the QR code, you will attend the session, and you can ask questions, and you can also claim an NFT. And you can also vote. So if you have a question that you really want it to be answered, vote for them. So let's start with the top one. Does WSIR support lookup tables? And can it be used to implement lookup tables based on CKVMs? Absolutely. So the way you would do it is, for instance, say, so what you can do, first of all, if you want to do a range check, so that's a big use case for lookup table, you already have a range query. So you just take one column and you say, I have this query that just enforces the whole column to be within bound, and that's all you do. A second way, if you want to do more complicated range checks, like XOR, for instance, then you would have to specify three columns for your XOR. One column for the left side, the right side, and one for the result. And in this column, you put all the possibilities. So maybe say for 8 bits to 8 bits, you would have 2 to the 16th possibility. So you write down all of that in your table. And then you create a lookup constraint between this table and a triplet of columns for which you want to enforce XOR constraint. And you can also add a conditional lookups. You can have a fourth column that contains zeros or one and that activates the XOR constraints or not. Great, thanks for the answer. The next question is can you create different custom gates and at which instance do you decide which row corresponds to each kind of gate? So when you generate a global constraint this is essentially what is your custom gate then it's going to apply over everything but the wizard framework is more abstract than this. Essentially, there is a general technique to do it, which is to say that you add a selectors column that says which constraint is going to apply for each, and you have some product of your constraints custom gate expression multiplied by an indicative that asks whether this constraint is active here or not, and you would merge everything into a single global constraint in the end. So yes, you could implement custom gate. Actually, that's what they do all the time when they specify the EVM. Great. The next question is about recursion. So is recursion something that would be implemented in Wizard, or would it be separately like a commitment? So there is a separate way you can do recursion. So we do it inside of the Wizard at the same time and outside. The first way, we have a compilation step that is called self-recursion that usually goes just after vertex. The text of vertex proof and re-arithmetize it. And we can do proofs of that again, and we repeat, and we can shrink the proof. That's because vertex, as a single polynomial commitment, has a square root, very fair time. But applying log-login application of self-recursion, you get constant size proof. Great. And the last question that we have in the queue, at least for now, is that it's great that you can define an ideal protocol programmatically, and it seems that that does make it easy. Does that make it easy or possible to support automated formal verification or UC proofs of security? I'm not too sure what it would entail exactly to formally verify. So we could formally verify the standard set of compilers that we have. I think this is at least a necessity. But then there is a protocol description. It should be formally verified. This, I don't know how to do it. I don't know and I can't tell you how to make it easy but it would be a great use case I agree it would be great maybe after that conversation", + "sources_streamethId": "67370c6574749a4b898bd059", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67370c6574749a4b898bd059.vtt", + "transcript_text": " Hi, thank you, Miros, for the presentation, and thank you, everybody, for coming. My name is Ignasi Ramos. I work at Polygon, at the protocol team, and today I will be presenting what the frog is the pessimistic proof. But before going straight to the pessimistic proof let me give you a bit of context and nowadays one of the problems of blockchain not being mainstream in my opinion is the fragmentation there are a lot of good projects and they have to deploy in all the chains but the worst thing is that for the user experience, this is very, very bad, because they may want to use for one reason or another, more than one chain, two chains, whatever number of chains. So if they want to move assets from one chain to another one, sometimes there are delays. Sometimes it's difficult to track high fees. They have to wait a lot. This is why, from Polygongon we propose the aggregation layer. The aggregation layer connects sovereign chains together, unifying liquidity, users and state, but with the feel of a single chain, a multi-chain web that is better for the user experience, better for network effects and better for the user experience, better for network effects, and better for security. We think, for example, like Internet, where everything is built at the top of the same protocol, the TCP IP. In the aggregation layer, we will be aggregating chains in a unified bridge, and they do not have to be only EBM chain compatible. They can only be like what we call sovereign chains, for example, a Solana, whatever chain that they maintain, like an estate of their balances. But with this system, there is a problem. What happens if one of these chains is compromised? What happens if one of these chains has a bug that the bridge gets drained? What will happen is that the other chains will be affected because of the insecurity of one of the other chains. This is where we introduce the pessimistic proof. The pessimistic proof has this name because it pessimistically thinks that any of the chains that are added in the aggregation layer can be vulnerable. So it tries to secure this. How does it do it? The solution is to architect the aggregation layer in a way that assumes that every prover can be unsound. The pessimistic proof guarantees that even if a prover for a particular chain is unsound, that prover cannot drain more funds than are currently deposited on that chain. In this way, a soundness issue cannot infect the rest of the ecosystem. So this means that every time a chain wants to submit their bridges to be consolidated, he will have to attach a proof proving that he has enough balance to execute those bridges. The proving system that will handle all of this, the proof generation and the verification, can be any of these advanced set, like general VMs, like for example JISC, for example RISC0, or SP1. So, let's go a bit deeper on the pessimistic proof. Now we will get a bit more tech, but it will be anyway very high, yes, very high level trees. We have the local balance tree, the local exit tree and the nullifier tree. The local balance tree is the one that handles the balance of each one of the tokens in the network. Each one of the leaf has the following information. The origin network is the one that handles the balance of each one of the tokens in the network. The origin network is the one that handles the balance of each one of the tokens in the network. Each one of the leaf has the following information. The origin network of the token, the token address and the balance. The origin network and the token address completely define a token. So this balance is the one that handles all the balances of the tokens in each one of the chains. Another one is the local exit tree. The local exit tree grows sequentially every time a bridge is created. So in each one of the leaves, he has information of a bridge event. And the third one is the nullifier tree. The nullifier tree basically is used for the aggregation layer to avoid the double spending. I will explain it later. And finally, the steps for computing this proof are these four. Well, here it's a very summarized interface. First of all, we apply the bridge exit to our local exit tree, generating the new local exit tree. After that, we apply the bridge exit and important bridges. The bridge exits are the ones that are generated from my chain that want to transfer assets to other chains. The imported bridges are the ones that are generated in other chains and want to transfer assets to my chain. So we compute them and we add them to the local balance tree, generating a new local balance tree. Finally, we check the nullifiers. We check that none of these bridges we're importing has already been computed or has already been consolidated or spent. And finally, we check that none of the balances in the local balance tree is lower than zero, meaning that we can compute all these bridges with the balances that we have in our chain. So that will be anything from my side. Thank you very much, and if you have any questions. Okay, let's give a big round of applause for Ignacy. Thank you, Ignacy, for this presentation. So these proofs, apart from their name, they are actually very fun. Apart from being pessimistic, right? We have time for a few questions. I will hit you with the red box which you talk at this is a microphone so please raise your hand if you have a question for Ignacy it's not easy right yes please okay great presentation so my question is is this only for polygon Network or is it supposed to no no it will support any chain like um in the example there were like the cdks and the ckv which are polygon network but it can support like any chain that handles a balance of the state as i said it can even handle solana it's a good idea to because it's one of the changes responsible of the state transition. more? Yes, please. measure? How does this help with interoperability? Sorry, can you repeat that last part? Like pessimistic proof sounds from the... Yes, it's 100% security. Does it help, does it simplify interoperability in any way? Like you say, Solana could use pessimistic proof. Like does this help sending assets from one chain to the other? Yes, it helps in the way that you are sure that this chain has enough funds to be transferred. So the only thing you have to do is verify that proof. You don't have to, like, if in case now on the bridges wait that the transaction is finalized or in case of an optimistic roll-up, wait these days for the withdrawal. This will not be happening. That makes the interoperability very fast. Okay. Thank you all. That was it. Yes, we have, let's go, like, very fast. I'll be here around anyway. Thank you. Okay. Well. No, that's it. Thank you, Ign anyway. Thank you. Okay. Well, that's it. Thank you, Ignacy. Thanks again.", "eventId": "devcon-7", - "slot_start": 1731486600000, - "slot_end": 1731488400000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1FkV9X3aQwU20vdTZXHXBpHGRAISg06VrxYifChRhnIo", - "resources_slides": null, + "slot_start": 1731654000000, + "slot_end": 1731654600000, + "slot_roomId": "stage-4", + "resources_presentation": "https://docs.google.com/presentation/d/1BLkd5LgVpoznDQEyKsIo9P94GZyyUdEhmVBoZTS692Q", + "resources_slides": "https://drive.google.com/file/d/1qcPtx3imkQnZFRVUOzD9z1GOGiKkFPdU/view", "speakers": [ - "alexandre-belling" + "ignasi-ramos", + "jesus" ] }, "vector": [ @@ -878290,11 +875820,7 @@ 0, 0, 0, - 0, - 0, - 0, - 6, - 0, + 6, 0, 0, 0, @@ -878790,6 +876316,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -878959,6 +876486,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -879022,7 +876550,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -879080,7 +876607,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -879110,6 +876636,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -879118,6 +876645,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -879139,7 +876667,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -879319,7 +876846,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -879587,8 +877113,7 @@ 0, 0, 2, - 0, - 0, + 2, 0, 0, 0, @@ -879601,7 +877126,6 @@ 0, 0, 0, - 0, 2, 0, 0, @@ -879614,42 +877138,54 @@ 0, 0, 0, + 0, 0 ] }, { "session": { - "id": "wmb-81321", - "sourceId": "S8MPDK", - "title": "WMB 81321", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", - "audience": "Engineering", + "id": "yeomenai-elevate-your-game", + "sourceId": "WLKTYW", + "title": "Yeomen.ai - Elevate your game!", + "description": "Web3 games bring about possibilities for autonomous worlds that traditional games are not able to offer. Yeomen.ai makes the on-chain data available to the masses in simple dashboards. Yeomen.ai also offers on-chain extension of autonomous worlds to automate and transform game play. Command has an Intents based services layer to offer services on Autonomous Worlds that can expand to all Web3.\r\n\r\nYeomen.ai can work with MUD or Dojo powered onchain games and financial applications in future.", + "track": "[CLS] MUD Community-Led Session, by 0xPARC", + "type": "Talk", + "expertise": "Beginner", + "audience": "Developer", "featured": false, "doNotRecord": false, - "keywords": [], - "tags": [], + "keywords": [ + "Analytics", + "Modding", + "AI", + "Ownership", + "Marketplace" + ], + "tags": [ + "Autonomous World", + "Collective Intelligence", + "Intents" + ], "language": "en", - "speakers": [], + "sources_swarmHash": "20e03f55574e957b69dafacbe86318c6de92e40b6d84f08bb5db7f0ebc5514f8", + "sources_youtubeId": "C7DaJS79ocI", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", + "speakers": [ + "rohan-abraham", + "roshan-abraham" + ], "eventId": "devcon-7", - "slot_start": 1731661200000, - "slot_end": 1731664800000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1IuOY3B48xD6oQfkmw66ZED8btQYpPlx-woDEIDkmuwQ" + "slot_start": 1731580800000, + "slot_end": 1731582300000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1-3KWguxf1wrbuaxgSi8ewkempDUuj4_SzXw0fz2dbbU", + "resources_slides": "" }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, 0, 0, 0, @@ -879662,6 +877198,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -880388,6 +877925,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -880429,6 +877968,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -880449,6 +877989,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -880503,6 +878044,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -880958,9 +878500,8 @@ 2, 0, 0, - 2, - 0, 0, + 2, 0, 0, 0, @@ -880976,35 +878517,44 @@ }, { "session": { - "id": "working-together-with-unity-blazor-nethereum-and-mud", - "sourceId": "SDUYDQ", - "title": "Working together with Unity, Blazor, Nethereum and MUD", - "description": "This is a project demo as part of the MUD Day CLS: autonomous worlds, onchain games, and explores how Unity, Blazor, Nethereum, and MUD integrate to build blockchain-based games and applications. It covers the overall architecture and structure of .NET projects, including smart contract integration and core logic. Key topics include Nethereum's integration with MUD systems and tables, extended code generation to support MUD, deployment strategies, bulk saving, data synchronization, and testing.", + "id": "yeomenai-mud-day-demo", + "sourceId": "7DGLCG", + "title": "Yeomen.ai - MUD Day Demo", + "description": "This is a project demo for MUD Day CLS: onchain games and non-financial applications. \r\n\r\nYeomen.ai is building dashboards, automation tools, marketplaces, and platforms for autonomous worlds and onchain games built with MUD. Rohan will showcase some of these tools in this demo session.", "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", + "type": "Lightning Talk", + "expertise": "Beginner", + "audience": "Product", "featured": false, "doNotRecord": false, - "keywords": [ - "Nethereum", - "MUD", - "Unity" - ], "tags": [ - "Architecture", - "Frameworks", - "Gaming" + "Tooling", + "Gaming", + "Autonomous World", + "analytics", + "Autonomous World", + "Gaming", + "Tooling" ], + "keywords": [], + "duration": 289, "language": "en", - "speakers": [ - "juan-blanco" - ], + "sources_swarmHash": "9609159b76f8b938655bcfa12a870a0cd47ee7b167a065e54b727047328c4b6e", + "sources_youtubeId": "qWTamrBBH2E", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "673585b79dbb7a90e106bdd7", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673585b79dbb7a90e106bdd7.vtt", + "transcript_text": " Hey, hello. My name is Rohan. I'm from Yeoman. How many people here are game developers? You build games. Okay, and how many play games? Great. I have something here for every one of you, gamers and developers. So what I want to show this morning is my email. Yes, so we have dashboards. We built dashboards. So all of the data is on-chain. All of our game data data everything you play is on chain we wanted to visualize it and this is useful for both developers and gamers so let me jump into one of the games biomes so we what we do yeoman is we index all this data we then create charts and graphs that's helpful for both the developers and the gamers. So here we have biomes. We have like a million transactions indexed, and we create these charts. For example, here what we show is like most of the actions in biomes is mining actions. You have move, quite a few things. As you scroll down, you get to see activity over the lifetime. It surged in the beginning of the launch. It was quiet for some time, and we see a huge spike now. And, yeah, that's in biomes. Pretty cool these days. And as you scroll down, you get to see a lot of data. Again, all of this data is on-chain, but then we make it presentable. We make it visualized. When you visualize it, you quickly get an idea of how all this data looks. It's pretty cool. And as you scroll down, you get to see lots of charts, graphs. We have all this data indexed on chains. So you just can create all these chains. So you can chess. If you are a gamer on biomes, if you're looking at mining, for example, it's pretty early days. We have about 6% of the Neptunium mined at the moment. And as you scroll down, you get to see lots of charts. This is interesting. Biomes is a 3D world. We have a 2D representation of what is mind and what's not. So the red represents all the ores, and the white is all the ones that are mind. So this is useful for gamers and developers. And let me scroll down to the last chart. So this is all the movement within biomes. Everybody that's moved on, it's all plotted on a single 2D graph. You can see the high traffic areas in the middle and then it's quieter at the sides. As a developer, if you were to visualize your game, you get to see things like, hey, there's a hot spot here. You can probably design, you can probably build a bazaar somewhere farther, or if you want to build, be a gamer, and if you want to see, hey, where to position your wonder that you're building, you could be like, go on to a high-traffic area or something like that. So all of this data is on-chain, and this helps you quite a bit. So that's that. Okay, let me switch on to another game. So, okay, I've got two minutes. So this is a game called Drawtech, which was built by Smallbrain a while ago. What I showed you now is all the data that we've indexed. We then were able to query it, plot it into charts and graphs and all of that. I want to show you something else interesting. So what we did was we built a time travel feature for mud indexer. So basically what you see here is the canvas on DrawTech when it was launched. So this was the first block. This is probably small brain. Somebody went in and tested this. They put a tree in there. If you click on next, what it'll do is it'll fetch the very next block. This is all using a mud indexer. It goes to what it was at the point in time, or rather point in block. It gets all that. Click on next. You can see how the whole game developed. As you click on next, it goes to every subsequent block. It fetches the canvas, the colors. You can see who it was and all of that. So you can see the whole development of the game. You can go to the very last one. This is how it looks at the moment. And then if you go back in time, you can rewind from where it is. You can go back and all of that. So all of this data is on-chain. We're making it presentable, pretty, accessible, and all of that for developers and gamers. And if you... If a project's not listed here, please reach out. You just fill out this form. Send us your ABI. Send us all the information. We can get it indexed. This afternoon, we are talking about our cool new project called Command. It's pretty interesting, but I need more time to talk about it. And we'll be presenting it then", "eventId": "devcon-7", - "slot_start": 1731568500000, - "slot_end": 1731570000000, + "slot_start": 1731558600000, + "slot_end": 1731558900000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1cgSTfVg9G2fBhaLSYwdokUa1BNjwvijZP4qAjaifH3Q" + "resources_presentation": "https://docs.google.com/presentation/d/1D2DHsWzGk1OOmOYP0VkdpHHHgEYGIOx9nMKOiTdQw-Y", + "resources_slides": "", + "speakers": [ + "rohan-abraham" + ] }, "vector": [ 0, @@ -881746,7 +879296,6 @@ 0, 0, 0, - 0, 6, 0, 0, @@ -881777,6 +879326,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -881820,10 +879370,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -881832,6 +879378,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -881863,13 +879410,13 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 2, + 2, 0, 0, 0, @@ -882325,11 +879872,9 @@ 0, 0, 0, - 2, - 0, - 0, 0, 0, + 2, 0, 0, 0, @@ -882343,49 +879888,39 @@ }, { "session": { - "id": "wtf-are-based-rollups-and-preconfs", - "sourceId": "UG79AE", - "title": "Wtf are based rollups and preconfs?", - "description": "The rollup-centric roadmap is critical for scaling Ethereum but has introduced fragmentation of users, developers, and liquidity. But don't worry, based rollups are here to save the day! But wtf is a “based rollup”? And wtf are these “pre-confs” that usually get talked about together?\r\n\r\nThe focus of this talk is to demystify these concepts and try and get more people engaged in the based rollup ecosystem, which has the potential to heal Ethereum’s fragmentation problem.", - "track": "Layer 2", + "id": "you-know-whats-going-to-get-us-from-web2-to-web3-therapy", + "sourceId": "LUKWAM", + "title": "You know what’s going to get us from web2 to web3? Therapy", + "description": "2024 has been about thinking how we avoid recreating the same systems just \"over here\". And it has to start with our intentions and our ability to make decisions from a better place vs continuing to be influenced by scarcity mindsets, disregulated nervous systems and a burntout collective. \r\n\r\nI delve deeper into this here https://pop.mirror.xyz/JoTHH4cSRw967mphJqur6hWS6vQx0q89ee0WnO1o63g", + "track": "Coordination", "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Developer", + "expertise": "Intermediate", + "audience": "Community", "featured": false, "doNotRecord": false, "tags": [ - "Validator Experience", - "Layer 2s", - "Rollups", - "sequencer", - "preconfs", - "pre-confirmations", - "Layer 2s", - "Rollups", - "Validator Experience" + "future" ], "keywords": [ - "Based Rollup", - "Preconfirmations", - "Sequencing" + "thriving", + "mental health", + "future" ], - "duration": 462, + "duration": 531, "language": "en", - "sources_swarmHash": "a0f742f4e79679bf245603fb9d0b0337c6bb2cbbe6a4e47ea5a1b04b9dc8bed6", - "sources_youtubeId": "j4wLhmXaZn8", + "sources_swarmHash": "b56e50859f10264bce39a4458b8d038188b99b991e4359c0f173ef425205fdfe", + "sources_youtubeId": "mKDf6mBemhg", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673838b41b0f83434df58cff", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673838b41b0f83434df58cff.vtt", - "transcript_text": " All right. Thanks for coming, everyone. I know it's kind of early. So this is a lightning talk on based roll-ups and pre-comps. And as you know, these lightning talks are extremely quick. So this isn't going to answer all of the questions. It's really going to try and lay the foundation and motivation for why different teams are working on this. So I would want to start with like, what is the big motivation here? And the big goal of these base rollups really is to help solve this fragmentation issue we're starting to see in the L2 space and to restore some value capture back to the base layer. So how did we get here? Well, Ethereum is always in this tricky position. The goalposts always move. Gas was too expensive. We created this roll-up centric roadmap. We succeeded in offloading all of this execution. Things got cheaper. TPS increased. But ETH is dead. So we really want to try and address some of these problems head-on. And the big focus of this talk is around fragmentation. So currently these L2s aren't interoperable with each other. They fragment liquidity, they fragment users, they also fragment developers. You have to pick a winning ecosystem to deploy on or deploy across many, which starts to spread your resources. And what we're really seeing is this kind of convergence on what I'm calling intra-op. You have interoperability within your ecosystem but not across these ecosystems. So how do we fix this fragmentation problem? Well, one easy solution is that we just agree on one entity to sequence all of these rollups. And that sounds pretty centralizing. So can we do this in a way that preserves a lot of the values that we care about? So enter based rollups. The idea here, this is a quote from Justin's paper. The TLDR here is it's a based rollup when it's sequenced by Ethereum validators. So in this picture, on the left-hand side here, we have centralized sequencing. The idea is you have these unordered transactions. A centralized sequencer's job is to order them for the rollup. These little squiggly things are the rollups at the bottom here. Okay. As we move to the right, we're increasing in decentralization, and we're unlocking interoperability. So with shared sequencing, you have multiple parties that are all agreeing, according to some leader election mechanism, on who has the ability to sequence all of the roll-ups. And as we move all the way to the right, we enter this based sequencing mode. The idea is that the transactions for these L2s will be sequenced directly by Ethereum validators. And how does this help? How does this unlock interoperability? The idea is that we have these write locks over L2 state. When an Ethereum validator is going to propose a block, they have a write lock over the entire L1 block and all of the L2 blocks that are going to be included. And when we have a bunch of rollups that are all agreeing to be sequenced by this validator, it unlocks this ability for you to start passing messages across these rollups that are all agreeing to be sequenced by this validator, it unlocks this ability for you to start passing messages across these rollups. We don't need these bridges. We're able to do these more seamlessly. So this has limitations. One of the big issues with based rollups is that they have really 12 second block times. A lot of users wanna come to L2s because they care about that snappy UX, those instant transactions. We can always reduce the L1 block times, but that's a very long, arduous process that has a lot of unknowns and centralization vectors. So, pre-confs, this is another one of these new terms, that stands for pre-confirmations. A pre-conf is a commitment made by these validators to users about doing something related to block proposals. So this could mean I'm giving a guarantee to a user that I'll include their transaction when it's my turn to propose a block, or I can even give a stronger guarantee, like this will be the state after executing your transaction. And if I break my promise as the pre-confer, then I can get slashed on various means. So to kind of wrap this up, like how does this all come together? So the user over here would be able to send their roll-up transactions to be sequenced by an Ethereum validator. They, in response, give back this pre-confirmation signature, which is like this receipt for the users, guaranteeing that their transaction will be included or it'll be executed inside of the rollup. And if the validator does break this promise, they can be slashed by submitting evidence to the slashing contract. And what does this enable? Well, it solves a lot of these UX problems. And when we start to enter this execution pre-comps, we really make it to a place where we can actually outperform these Alt-L1s by giving these very instant transactions back to users. And this all comes without modifications to the base layer. So hopefully this maybe piqued people's interest on this topic, but of course, in a five-minute lightning talk, there's still many, many things to be explored. So thank you all for joining. Okay, yeah, we have a few questions. Right, yeah, so the first question here, how does this notion scale if they need to validate all of the L2 transactions? So this is a great question. So I think there's kind of two worlds here. Like one is sequencing itself doesn't imply execution, so it doesn't have to take on all of the load. But realistically, there's been a lot of work to get the data out there. And, you know, I think that's a big part of the reason why we're doing this. And, you know, I think that's a big part of the reason why we're doing this. So, I think that's a big part of the reason why we're doing this. And, you know, I think that's a big part of the reason why we're doing this. for me, these pre-conf networks will likely require consensus. Is this the biggest drawback? So definitely over the past year, it started from this very dark forest, unknown, and over time, we've started to untangle it. And some of the bigger questions are now just around pricing. But really, you don't need an actual consensus protocol to build this. You're able to just broadcast actual consensus protocol to build this. You're able to just broadcast these messages directly to the users. And if the user doesn't get their pre-confirmation, they're able to go and slash. And maybe one last question. Why is Spire better than Puffer? Why is Spire better than Puffer? Well, we're all here building based rollups. So, yeah. I understand. So everyone has their own vision for the best approach. Thank you very much. So thank you. Please give a round of applause to our speaker. Thank you. Thank you. Thank you.", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731642000000, - "slot_end": 1731642600000, + "slot_start": 1731487800000, + "slot_end": 1731488400000, "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1XBmbnq_59WsG85OTcNpUu6A8prP6pC2w2YjOs_3x7-Y", - "resources_slides": null, + "resources_presentation": "https://docs.google.com/presentation/d/1gUdSnWcxJdTYFT1JrkVP_VWgSxrlBCcEuwRk8pzgBSA", + "resources_slides": "https://drive.google.com/file/d/1zFPvbGgF5CPfM28jf1rsGDOIroDLyADW/view", "speakers": [ - "jason-vranek" + "simona-pop" ] }, "vector": [ @@ -882396,12 +879931,11 @@ 0, 0, 0, - 6, - 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -882598,6 +880132,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -883130,7 +880665,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -883178,11 +880712,8 @@ 0, 0, 0, - 2, 0, 0, - 2, - 2, 0, 0, 0, @@ -883206,7 +880737,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -883252,8 +880782,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -883709,12 +881237,14 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, + 2, + 0, + 0, 0, 0, 0, @@ -883725,46 +881255,26 @@ }, { "session": { - "id": "wtf-is-the-pessimistic-proof", - "sourceId": "DAZLVG", - "title": "WTF is the pessimistic proof", - "description": "Cryptographic safety for the AggLayer requires a novel solution. It’s called the pessimistic proof and it treats all chains suspiciously. The AggLayer will be a decentralized protocol that scales blockchains by unifying liquidity, users, and state. The Pessimistic proof is a proof generated to securely grant this shared liquidity, and it will be technically explained in this flash talk by one of the developers.", - "track": "Layer 2", - "type": "Lightning Talk", - "expertise": "Intermediate", + "id": "your-intuition-antoine-flute-and-didgeridoo", + "sourceId": "B8SMVZ", + "title": "Your intuition Antoine flute and didgeridoo", + "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", + "track": "Entertainment", + "type": "Music", + "expertise": "", "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "ZKP", - "liquidity", - "shared", - "agglayer", - "ZKP" - ], - "keywords": [ - "aggLayer", - "shared liquidity" - ], - "duration": 470, + "keywords": [], + "tags": [], "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "67370c6574749a4b898bd059", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/67370c6574749a4b898bd059.vtt", - "transcript_text": " Hi, thank you, Miros, for the presentation, and thank you, everybody, for coming. My name is Ignasi Ramos. I work at Polygon, at the protocol team, and today I will be presenting what the frog is the pessimistic proof. But before going straight to the pessimistic proof let me give you a bit of context and nowadays one of the problems of blockchain not being mainstream in my opinion is the fragmentation there are a lot of good projects and they have to deploy in all the chains but the worst thing is that for the user experience, this is very, very bad, because they may want to use for one reason or another, more than one chain, two chains, whatever number of chains. So if they want to move assets from one chain to another one, sometimes there are delays. Sometimes it's difficult to track high fees. They have to wait a lot. This is why, from Polygongon we propose the aggregation layer. The aggregation layer connects sovereign chains together, unifying liquidity, users and state, but with the feel of a single chain, a multi-chain web that is better for the user experience, better for network effects and better for the user experience, better for network effects, and better for security. We think, for example, like Internet, where everything is built at the top of the same protocol, the TCP IP. In the aggregation layer, we will be aggregating chains in a unified bridge, and they do not have to be only EBM chain compatible. They can only be like what we call sovereign chains, for example, a Solana, whatever chain that they maintain, like an estate of their balances. But with this system, there is a problem. What happens if one of these chains is compromised? What happens if one of these chains has a bug that the bridge gets drained? What will happen is that the other chains will be affected because of the insecurity of one of the other chains. This is where we introduce the pessimistic proof. The pessimistic proof has this name because it pessimistically thinks that any of the chains that are added in the aggregation layer can be vulnerable. So it tries to secure this. How does it do it? The solution is to architect the aggregation layer in a way that assumes that every prover can be unsound. The pessimistic proof guarantees that even if a prover for a particular chain is unsound, that prover cannot drain more funds than are currently deposited on that chain. In this way, a soundness issue cannot infect the rest of the ecosystem. So this means that every time a chain wants to submit their bridges to be consolidated, he will have to attach a proof proving that he has enough balance to execute those bridges. The proving system that will handle all of this, the proof generation and the verification, can be any of these advanced set, like general VMs, like for example JISC, for example RISC0, or SP1. So, let's go a bit deeper on the pessimistic proof. Now we will get a bit more tech, but it will be anyway very high, yes, very high level trees. We have the local balance tree, the local exit tree and the nullifier tree. The local balance tree is the one that handles the balance of each one of the tokens in the network. Each one of the leaf has the following information. The origin network is the one that handles the balance of each one of the tokens in the network. The origin network is the one that handles the balance of each one of the tokens in the network. Each one of the leaf has the following information. The origin network of the token, the token address and the balance. The origin network and the token address completely define a token. So this balance is the one that handles all the balances of the tokens in each one of the chains. Another one is the local exit tree. The local exit tree grows sequentially every time a bridge is created. So in each one of the leaves, he has information of a bridge event. And the third one is the nullifier tree. The nullifier tree basically is used for the aggregation layer to avoid the double spending. I will explain it later. And finally, the steps for computing this proof are these four. Well, here it's a very summarized interface. First of all, we apply the bridge exit to our local exit tree, generating the new local exit tree. After that, we apply the bridge exit and important bridges. The bridge exits are the ones that are generated from my chain that want to transfer assets to other chains. The imported bridges are the ones that are generated in other chains and want to transfer assets to my chain. So we compute them and we add them to the local balance tree, generating a new local balance tree. Finally, we check the nullifiers. We check that none of these bridges we're importing has already been computed or has already been consolidated or spent. And finally, we check that none of the balances in the local balance tree is lower than zero, meaning that we can compute all these bridges with the balances that we have in our chain. So that will be anything from my side. Thank you very much, and if you have any questions. Okay, let's give a big round of applause for Ignacy. Thank you, Ignacy, for this presentation. So these proofs, apart from their name, they are actually very fun. Apart from being pessimistic, right? We have time for a few questions. I will hit you with the red box which you talk at this is a microphone so please raise your hand if you have a question for Ignacy it's not easy right yes please okay great presentation so my question is is this only for polygon Network or is it supposed to no no it will support any chain like um in the example there were like the cdks and the ckv which are polygon network but it can support like any chain that handles a balance of the state as i said it can even handle solana it's a good idea to because it's one of the changes responsible of the state transition. more? Yes, please. measure? How does this help with interoperability? Sorry, can you repeat that last part? Like pessimistic proof sounds from the... Yes, it's 100% security. Does it help, does it simplify interoperability in any way? Like you say, Solana could use pessimistic proof. Like does this help sending assets from one chain to the other? Yes, it helps in the way that you are sure that this chain has enough funds to be transferred. So the only thing you have to do is verify that proof. You don't have to, like, if in case now on the bridges wait that the transaction is finalized or in case of an optimistic roll-up, wait these days for the withdrawal. This will not be happening. That makes the interoperability very fast. Okay. Thank you all. That was it. Yes, we have, let's go, like, very fast. I'll be here around anyway. Thank you. Okay. Well. No, that's it. Thank you, Ign anyway. Thank you. Okay. Well, that's it. Thank you, Ignacy. Thanks again.", + "speakers": [], "eventId": "devcon-7", - "slot_start": 1731654000000, - "slot_end": 1731654600000, - "slot_roomId": "stage-4", - "resources_presentation": "https://docs.google.com/presentation/d/1BLkd5LgVpoznDQEyKsIo9P94GZyyUdEhmVBoZTS692Q", - "resources_slides": null, - "speakers": [ - "ignasi-ramos", - "jesus" - ] + "slot_start": 1731389400000, + "slot_end": 1731391200000, + "slot_roomId": "music-stage", + "resources_presentation": "https://docs.google.com/presentation/d/1y6uMrtpD3uRb_lrG6TXEsSK_UJ8-x7X4UM7zvdFJaIY", + "resources_slides": "" }, "vector": [ 0, @@ -883774,6 +881284,8 @@ 0, 0, 0, + 0, + 0, 6, 0, 0, @@ -884272,7 +881784,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -884442,7 +881953,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -884593,7 +882103,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -884602,7 +882111,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -885071,8 +882579,6 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, @@ -885084,7 +882590,6 @@ 2, 0, 0, - 0, 2, 0, 0, @@ -885103,43 +882608,58 @@ }, { "session": { - "id": "yeomenai-elevate-your-game", - "sourceId": "WLKTYW", - "title": "Yeomen.ai - Elevate your game!", - "description": "Web3 games bring about possibilities for autonomous worlds that traditional games are not able to offer. Yeomen.ai makes the on-chain data available to the masses in simple dashboards. Yeomen.ai also offers on-chain extension of autonomous worlds to automate and transform game play. Command has an Intents based services layer to offer services on Autonomous Worlds that can expand to all Web3.\r\n\r\nYeomen.ai can work with MUD or Dojo powered onchain games and financial applications in future.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Talk", + "id": "zero-to-dapp", + "sourceId": "LUW7G9", + "title": "Zero To Dapp", + "description": "Learning Web3 programming. There are so many different tools and protocols to learn. Zero to Dapp is a workshop series that builds upon collaboration between different projects to guide the students from zero to their first Dapp. In this workshop, we review our learning from previous editions to encourage others give their own Zero to Dapp. Then we'll give a shortened version - usually, this workshop takes between a half day up to two full days. But we are fast learners at DevCon, aren’t we? ;)", + "track": "Developer Experience", + "type": "Workshop", "expertise": "Beginner", "audience": "Developer", "featured": false, "doNotRecord": false, - "keywords": [ - "Analytics", - "Modding", - "AI", - "Ownership", - "Marketplace" - ], "tags": [ - "Autonomous World", - "Collective Intelligence", - "Intents" + "Layer 1", + "Layer 2s", + "Tooling", + "DevRel", + "Live Coding", + "onboarding", + "DevRel", + "Layer 1", + "Layer 2s", + "Live Coding", + "Tooling" ], - "language": "en", - "speakers": [ - "rohan-abraham", - "roshan-abraham" + "keywords": [ + "Onboarding" ], + "duration": 4607, + "language": "en", + "sources_swarmHash": "bde6952b4b8c11c866e8ce4a9d794f08480304cabca0b1c268e7489f5bca9451", + "sources_youtubeId": "lRo-TBLTgzs", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": "6734297d9dbb7a90e1ae69fe", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731580800000, - "slot_end": 1731582300000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1-3KWguxf1wrbuaxgSi8ewkempDUuj4_SzXw0fz2dbbU" + "slot_start": 1731465900000, + "slot_end": 1731471300000, + "slot_roomId": "classroom-e", + "resources_presentation": "https://docs.google.com/presentation/d/1obE94TKOOHTvht_bjpYs85KpbFc9Qw-AagmzvQTXrYk", + "resources_slides": "https://drive.google.com/file/d/1kclBwzP6Bh24AR18Vqmq1Fy3fQEhgDjB/view", + "speakers": [ + "simon-emanuel-schmid", + "rob-stupay", + "abena" + ] }, "vector": [ 0, 0, 0, + 6, 0, 0, 0, @@ -885149,9 +882669,6 @@ 0, 0, 0, - 6, - 0, - 0, 0, 0, 0, @@ -885817,6 +883334,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -885901,11 +883419,13 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -885922,14 +883442,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -885943,7 +883455,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -885959,6 +883470,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -885998,7 +883510,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -886008,6 +883519,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -886066,6 +883578,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -886444,6 +883957,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -886473,43 +883987,52 @@ }, { "session": { - "id": "yeomenai-mud-day-demo", - "sourceId": "7DGLCG", - "title": "Yeomen.ai - MUD Day Demo", - "description": "This is a project demo for MUD Day CLS: onchain games and non-financial applications. \r\n\r\nYeomen.ai is building dashboards, automation tools, marketplaces, and platforms for autonomous worlds and onchain games built with MUD. Rohan will showcase some of these tools in this demo session.", - "track": "[CLS] MUD Community-Led Session, by 0xPARC", - "type": "Lightning Talk", - "expertise": "Beginner", - "audience": "Product", + "id": "zk-email-fast-proofs-and-production-ready-account-recovery", + "sourceId": "WNQBQH", + "title": "ZK Email: Fast Proofs and Production-Ready Account Recovery", + "description": "We discuss progress that ZK Email has made in making new proofs really easily, as well as interesting new on-chain directions for email-triggered transactions. We'll go over proof registries, email-based multisig signers, and email guardians for account recovery in production.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Tooling", - "Gaming", - "Autonomous World", - "analytics", - "Autonomous World", - "Gaming", - "Tooling" + "Privacy", + "ZKP", + "Use cases of cryptography", + "client-side", + "2FA", + "Account Abstraction", + "Cryptography", + "Identity", + "Privacy", + "Recovery", + "Security", + "Use cases of cryptography", + "Zero-Knowledge", + "ZKP" ], - "keywords": [], - "duration": 289, + "keywords": [ + "ZK", + "Email" + ], + "duration": 1518, "language": "en", - "sources_swarmHash": "9609159b76f8b938655bcfa12a870a0cd47ee7b167a065e54b727047328c4b6e", - "sources_youtubeId": "qWTamrBBH2E", + "sources_swarmHash": "a94b9dc27784f47de11b6a11d62b5643a1cf29f711ee569154584f599c98f857", + "sources_youtubeId": "YvzdNMpynZM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "673585b79dbb7a90e106bdd7", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/673585b79dbb7a90e106bdd7.vtt", - "transcript_text": " Hey, hello. My name is Rohan. I'm from Yeoman. How many people here are game developers? You build games. Okay, and how many play games? Great. I have something here for every one of you, gamers and developers. So what I want to show this morning is my email. Yes, so we have dashboards. We built dashboards. So all of the data is on-chain. All of our game data data everything you play is on chain we wanted to visualize it and this is useful for both developers and gamers so let me jump into one of the games biomes so we what we do yeoman is we index all this data we then create charts and graphs that's helpful for both the developers and the gamers. So here we have biomes. We have like a million transactions indexed, and we create these charts. For example, here what we show is like most of the actions in biomes is mining actions. You have move, quite a few things. As you scroll down, you get to see activity over the lifetime. It surged in the beginning of the launch. It was quiet for some time, and we see a huge spike now. And, yeah, that's in biomes. Pretty cool these days. And as you scroll down, you get to see a lot of data. Again, all of this data is on-chain, but then we make it presentable. We make it visualized. When you visualize it, you quickly get an idea of how all this data looks. It's pretty cool. And as you scroll down, you get to see lots of charts, graphs. We have all this data indexed on chains. So you just can create all these chains. So you can chess. If you are a gamer on biomes, if you're looking at mining, for example, it's pretty early days. We have about 6% of the Neptunium mined at the moment. And as you scroll down, you get to see lots of charts. This is interesting. Biomes is a 3D world. We have a 2D representation of what is mind and what's not. So the red represents all the ores, and the white is all the ones that are mind. So this is useful for gamers and developers. And let me scroll down to the last chart. So this is all the movement within biomes. Everybody that's moved on, it's all plotted on a single 2D graph. You can see the high traffic areas in the middle and then it's quieter at the sides. As a developer, if you were to visualize your game, you get to see things like, hey, there's a hot spot here. You can probably design, you can probably build a bazaar somewhere farther, or if you want to build, be a gamer, and if you want to see, hey, where to position your wonder that you're building, you could be like, go on to a high-traffic area or something like that. So all of this data is on-chain, and this helps you quite a bit. So that's that. Okay, let me switch on to another game. So, okay, I've got two minutes. So this is a game called Drawtech, which was built by Smallbrain a while ago. What I showed you now is all the data that we've indexed. We then were able to query it, plot it into charts and graphs and all of that. I want to show you something else interesting. So what we did was we built a time travel feature for mud indexer. So basically what you see here is the canvas on DrawTech when it was launched. So this was the first block. This is probably small brain. Somebody went in and tested this. They put a tree in there. If you click on next, what it'll do is it'll fetch the very next block. This is all using a mud indexer. It goes to what it was at the point in time, or rather point in block. It gets all that. Click on next. You can see how the whole game developed. As you click on next, it goes to every subsequent block. It fetches the canvas, the colors. You can see who it was and all of that. So you can see the whole development of the game. You can go to the very last one. This is how it looks at the moment. And then if you go back in time, you can rewind from where it is. You can go back and all of that. So all of this data is on-chain. We're making it presentable, pretty, accessible, and all of that for developers and gamers. And if you... If a project's not listed here, please reach out. You just fill out this form. Send us your ABI. Send us all the information. We can get it indexed. This afternoon, we are talking about our cool new project called Command. It's pretty interesting, but I need more time to talk about it. And we'll be presenting it then", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731558600000, - "slot_end": 1731558900000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1D2DHsWzGk1OOmOYP0VkdpHHHgEYGIOx9nMKOiTdQw-Y", - "resources_slides": null, + "slot_start": 1731468600000, + "slot_end": 1731470400000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1G6_OH46sVVpOgDR1P1ZWqOpTtRzjcESBO1p9aHuVisY", + "resources_slides": "https://drive.google.com/file/d/1xo7q6y2u3wR9Z7JcCzgdY094rNnKaFh5/view", "speakers": [ - "rohan-abraham" + "aayush-gupta", + "sora-suegami" ] }, "vector": [ @@ -886523,9 +884046,9 @@ 0, 0, 0, + 6, 0, 0, - 6, 0, 0, 0, @@ -886628,6 +884151,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -887260,6 +884784,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -887271,6 +884796,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -887306,14 +884833,9 @@ 0, 0, 0, + 2, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, + 2, 0, 0, 0, @@ -887374,17 +884896,11 @@ 0, 0, 0, - 2, - 2, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -887435,6 +884951,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -887821,11 +885338,14 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -887835,8 +885355,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -887849,39 +885367,39 @@ }, { "session": { - "id": "you-know-whats-going-to-get-us-from-web2-to-web3-therapy", - "sourceId": "LUKWAM", - "title": "You know what’s going to get us from web2 to web3? Therapy", - "description": "2024 has been about thinking how we avoid recreating the same systems just \"over here\". And it has to start with our intentions and our ability to make decisions from a better place vs continuing to be influenced by scarcity mindsets, disregulated nervous systems and a burntout collective. \r\n\r\nI delve deeper into this here https://pop.mirror.xyz/JoTHH4cSRw967mphJqur6hWS6vQx0q89ee0WnO1o63g", - "track": "Coordination", - "type": "Lightning Talk", + "id": "zk-in-rollups-full-validity-proving-on-the-op-stack", + "sourceId": "8J8Z7Q", + "title": "ZK in Rollups: Full Validity Proving on the OP Stack", + "description": "Historically, zkEVM rollups have been difficult to build, requiring deep cryptography expertise that makes customization and maintainability complicated and time-consuming. With advancements in zk, zkVMs make it easy for any developer to write ZK applications with Rust. With a zkVM, we've created seamless way to upgrade ANY existing OP Stack chain to use ZKPs in just 1 hour. These rollups get fast finality, cost-effective (<0.1 cent / tx), and full EVM equivalence.", + "track": "Layer 2", + "type": "Talk", "expertise": "Intermediate", - "audience": "Community", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "future" - ], - "keywords": [ - "thriving", - "mental health", - "future" + "Layer 2s", + "Rollups", + "ZKP" ], - "duration": 531, + "keywords": [], + "duration": 901, "language": "en", - "sources_swarmHash": "b56e50859f10264bce39a4458b8d038188b99b991e4359c0f173ef425205fdfe", - "sources_youtubeId": "mKDf6mBemhg", + "sources_swarmHash": "03b821200ebbe047eefcc6138ddffd5d683a7fb0bf2466243e8039984f8ba53e", + "sources_youtubeId": "11b9vvKiBrY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6735e5c39dbb7a90e1bb8d44", + "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735e5c39dbb7a90e1bb8d44.vtt", + "transcript_text": " Hello, my name is Uma, I'm one of the co-founders of Sysynkt, and today I'm going to be telling you guys about introducing full ZK validity proving into the OP stack. So let's start off with kind of motivating the problem, which is why are ZK rollups interesting and important to the Ethereum endgame? So today, the Ethereum rollup-centric roadmap is how Ethereum is going to scale. And within that, I think it's commonly acknowledged that ZK rollups are the only way that a lot of the problems we're facing today as an ecosystem will get solved. For example, we'll get fast finality, we'll get interoperability across all the rollups, unified liquidity for users, and overall, it'll help improve UX greatly. Inherently, we can kind of think of why ZK helps solve all these problems, because decentralization, with all the different Ethereum nodes re-running and re-executing transactions inherently has some overhead and ZK by giving us verifiability fixes this. So, ZK rollups have been around for a long time. Historically we've known that ZK has been very important. But until recently, ZK rollups have been really challenging. You'd have to write in specialized languages, SDKs and DSLs, to actually encode an Ethereum EVM state transition function in a ZK circuit to be able to prove it. And then, in general, there were a lot of compromises. You had type 1, type 2, type 3, where you can't use the native Ethereum storage format or sometimes even be bytecode compatible due to the limitations of ZK. So how are we solving this? Well, at Sysynkt we're building a ZK VM, not a ZK EVM, a ZK virtual machine where you can just write Rust and then use ZK. So what does that mean? As a developer, you can just take arbitrary Rust code.", "eventId": "devcon-7", - "slot_start": 1731487800000, - "slot_end": 1731488400000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1gUdSnWcxJdTYFT1JrkVP_VWgSxrlBCcEuwRk8pzgBSA", - "resources_slides": null, + "slot_start": 1731582600000, + "slot_end": 1731583800000, + "slot_roomId": "stage-5", + "resources_presentation": "https://docs.google.com/presentation/d/1Dw9W_WUh2DLUhcVkatH257BHYs8yWdxlfLhoJXs8jnY", + "resources_slides": "https://drive.google.com/file/d/105-JDBcWAEeDhpLVMWJLQdpvGf6kvcrh/view", "speakers": [ - "simona-pop" + "uma-roy" ] }, "vector": [ @@ -887892,10 +885410,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -888093,39 +885607,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -888666,6 +886147,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -888711,6 +886193,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -888734,6 +886217,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -888742,6 +886226,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -889199,7 +886684,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -889208,7 +886692,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -889216,32 +886699,6 @@ 0, 0, 0, - 0 - ] - }, - { - "session": { - "id": "your-intuition-antoine-flute-and-didgeridoo", - "sourceId": "B8SMVZ", - "title": "Your intuition Antoine flute and didgeridoo", - "description": "Join us at the Music Stage in the social area on Floor G for an unforgettable experience with the Open Source Orchestra! Dive into the beats and vibes curated by talented musicians from the Ethereum ecosystem, bringing together community, creativity, and rhythm. Let’s groove and connect through the universal language of music!", - "track": "Entertainment", - "type": "Music", - "expertise": "", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "keywords": [], - "tags": [], - "language": "en", - "speakers": [], - "eventId": "devcon-7", - "slot_start": 1731389400000, - "slot_end": 1731391200000, - "slot_roomId": "music-stage", - "resources_presentation": "https://docs.google.com/presentation/d/1y6uMrtpD3uRb_lrG6TXEsSK_UJ8-x7X4UM7zvdFJaIY" - }, - "vector": [ 0, 0, 0, @@ -889251,13 +886708,73 @@ 0, 0, 0, - 6, 0, 0, 0, 0, + 2, + 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + { + "session": { + "id": "zkmpc-bring-public-auditability-into-mpc", + "sourceId": "XNN3XR", + "title": "ZKMPC: Bring public auditability into MPC", + "description": "In multi-party computation (MPC), participants collaboratively compute without revealing private inputs. To secure MPC on a blockchain, preventing collusion is essential. We developed a \"publicly auditable\" version of SPDZ, a widely-used MPC protocol, that enables third-party verification through zero-knowledge proofs (ZKP) collaboratively generated by multiple parties. We will also demonstrate application examples, such as a Game Master-free werewolf game.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Research", + "featured": false, + "doNotRecord": false, + "tags": [ + "ZKP", + "MPC", + "collaboration", + "zk-snark", + "MPC", + "ZKP" + ], + "keywords": [ + "Collaborative", + "zk-SNARKs" + ], + "duration": 1399, + "language": "en", + "sources_swarmHash": "84b05559d4df707a8f29bbb79e18bb1bdb1fff62ae2738288c7d4be463f3b188", + "sources_youtubeId": "aWQ8zzi1EAQ", + "sources_ipfsHash": "", + "sources_livepeerId": "", + "sources_streamethId": null, + "eventId": "devcon-7", + "slot_start": 1731407400000, + "slot_end": 1731409200000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/10GOrQfQ0ldlyvKU05TvdHfQd4G2zNNTzfEe2i2bfgMQ", + "resources_slides": "https://drive.google.com/file/d/14MIJiVuYvN9GsgXtB9LkgWl_8Bwt1J5s/view", + "speakers": [ + "task-ohmori", + "yusuke-nakae" + ] + }, + "vector": [ 0, 0, 0, @@ -889268,6 +886785,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -890002,6 +887520,8 @@ 0, 0, 0, + 6, + 6, 0, 0, 0, @@ -890078,6 +887598,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -890091,6 +887612,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -890558,6 +888080,8 @@ 0, 0, 2, + 2, + 0, 0, 0, 2, @@ -890565,6 +888089,10 @@ 0, 0, 0, + 2, + 0, + 0, + 0, 0, 0, 0, @@ -890578,58 +888106,53 @@ }, { "session": { - "id": "zero-to-dapp", - "sourceId": "LUW7G9", - "title": "Zero To Dapp", - "description": "Learning Web3 programming. There are so many different tools and protocols to learn. Zero to Dapp is a workshop series that builds upon collaboration between different projects to guide the students from zero to their first Dapp. In this workshop, we review our learning from previous editions to encourage others give their own Zero to Dapp. Then we'll give a shortened version - usually, this workshop takes between a half day up to two full days. But we are fast learners at DevCon, aren’t we? ;)", - "track": "Developer Experience", - "type": "Workshop", - "expertise": "Beginner", - "audience": "Developer", + "id": "zkpassport-private-unforgeable-identity", + "sourceId": "K3GWST", + "title": "ZKpassport: Private Unforgeable Identity", + "description": "This talk presents ZKpassport, an identity verification solution integrating zero-knowledge proofs with ePassports to achieve privacy-preserving and unforgeable government-attested digital identities. We will delve into the technical architecture, implementation challenges, and practical applications. Attendees will gain insights into the development process, benefits, and potential uses of this technology in enhancing digital identity privacy and security.", + "track": "Applied Cryptography", + "type": "Talk", + "expertise": "Intermediate", + "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Layer 1", - "Layer 2s", - "Tooling", - "DevRel", - "Live Coding", - "onboarding", - "DevRel", - "Layer 1", - "Layer 2s", - "Live Coding", - "Tooling" + "Privacy", + "Identity", + "Zero-Knowledge", + "noir", + "Identity", + "Privacy", + "Zero-Knowledge" ], "keywords": [ - "Onboarding" + "ZK", + "NFC", + "Noir", + "PLONK" ], - "duration": 4607, + "duration": 1189, "language": "en", - "sources_swarmHash": "bde6952b4b8c11c866e8ce4a9d794f08480304cabca0b1c268e7489f5bca9451", - "sources_youtubeId": "lRo-TBLTgzs", + "sources_swarmHash": "8ff112bba1682d13788042e5ab586ab285935e607ec2031734b6ed5acbad29ea", + "sources_youtubeId": "W6C-duDEiOU", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6734297d9dbb7a90e1ae69fe", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", + "sources_streamethId": null, "eventId": "devcon-7", - "slot_start": 1731465900000, - "slot_end": 1731471300000, - "slot_roomId": "classroom-e", - "resources_presentation": "https://docs.google.com/presentation/d/1obE94TKOOHTvht_bjpYs85KpbFc9Qw-AagmzvQTXrYk", - "resources_slides": null, + "slot_start": 1731484200000, + "slot_end": 1731486000000, + "slot_roomId": "classroom-a", + "resources_presentation": "https://docs.google.com/presentation/d/1oOW6cu6Z74Nvx5lSpva4kFP8hggWPnZdL6MvVt9Hc9U", + "resources_slides": "", "speakers": [ - "abena", - "rob-stupay", - "simon-emanuel-schmid" + "michael-elliot", + "theo-madzou" ] }, "vector": [ 0, 0, 0, - 6, 0, 0, 0, @@ -890637,6 +888160,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -890796,8 +888320,10 @@ 0, 0, 0, + 6, 0, 0, + 6, 0, 0, 0, @@ -891306,7 +888832,6 @@ 0, 0, 0, - 6, 0, 0, 0, @@ -891370,8 +888895,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -891387,18 +888910,17 @@ 0, 0, 0, + 6, 0, 0, 0, 0, 0, - 6, 0, 0, 0, 0, 0, - 2, 0, 0, 0, @@ -891425,6 +888947,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -891443,8 +888966,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -891476,6 +888997,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -891551,7 +889073,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -891932,13 +889453,13 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, @@ -891946,8 +889467,6 @@ 0, 0, 0, - 2, - 0, 0, 0, 0, @@ -891962,61 +889481,50 @@ }, { "session": { - "id": "zk-email-fast-proofs-and-production-ready-account-recovery", - "sourceId": "WNQBQH", - "title": "ZK Email: Fast Proofs and Production-Ready Account Recovery", - "description": "We discuss progress that ZK Email has made in making new proofs really easily, as well as interesting new on-chain directions for email-triggered transactions. We'll go over proof registries, email-based multisig signers, and email guardians for account recovery in production.", - "track": "Applied Cryptography", + "id": "zkproving-the-history-of-ethereum-in-real-time", + "sourceId": "TVNJ99", + "title": "zkProving the history of Ethereum in real time.", + "description": "I'll explain the current work that we are doing in the Polygon zk teams to improve the performance of the provers and the quality of the tooling.\r\nI'll will explain how we can parallelise the generation of the proof and how we can integrate with different hardware and software so that it should allow to build a zk proof of a block in real time. \r\nI'll explain also how this proofs can be recursively linked to build a zkProof that can proof the whole Ethereum history from the genesis.", + "track": "Core Protocol", "type": "Talk", - "expertise": "Intermediate", + "expertise": "Expert", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Privacy", + "ZK-EVMs", "ZKP", - "Use cases of cryptography", - "client-side", - "2FA", - "Account Abstraction", - "Cryptography", - "Identity", - "Privacy", - "Recovery", - "Security", - "Use cases of cryptography", "Zero-Knowledge", + "lightclient", + "type1", + "starks", + "Zero-Knowledge", + "ZK-EVMs", "ZKP" ], "keywords": [ - "ZK", - "Email" + "Lightclient", + "type1", + "STARK" ], - "duration": 1518, + "duration": 1604, "language": "en", - "sources_swarmHash": "a94b9dc27784f47de11b6a11d62b5643a1cf29f711ee569154584f599c98f857", - "sources_youtubeId": "YvzdNMpynZM", + "sources_swarmHash": "848d3f552c5ce88efe748988407546a906a410c1d533f47a363d8c0dcf4463fe", + "sources_youtubeId": "boSCLHs30tk", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "673441219dbb7a90e10a4706", "eventId": "devcon-7", - "slot_start": 1731468600000, - "slot_end": 1731470400000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1G6_OH46sVVpOgDR1P1ZWqOpTtRzjcESBO1p9aHuVisY", - "resources_slides": null, + "slot_start": 1731474000000, + "slot_end": 1731475800000, + "slot_roomId": "stage-2", + "resources_presentation": "https://docs.google.com/presentation/d/1p0VlUcR1aOi--jA4hFb8aBF8mAWBuf-2vwun38CXBtI", + "resources_slides": "https://drive.google.com/file/d/1mdKsMi32bTXwb1XsJEfcfEWL-A0omDoP/view", "speakers": [ - "aayush-gupta", - "sora-suegami" + "jordi-baylina" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -892126,22 +889634,6 @@ 0, 0, 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 0, 0, @@ -892339,6 +889831,7 @@ 0, 0, 0, + 6, 0, 0, 0, @@ -892757,12 +890250,10 @@ 0, 0, 0, - 6, 0, 0, 0, 0, - 6, 0, 0, 0, @@ -892774,8 +890265,6 @@ 0, 0, 0, - 6, - 6, 0, 0, 0, @@ -892790,13 +890279,13 @@ 0, 0, 0, - 2, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, @@ -892811,9 +890300,7 @@ 0, 0, 0, - 2, 0, - 2, 0, 0, 0, @@ -892837,7 +890324,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -892862,6 +890348,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -892878,7 +890365,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -892929,7 +890415,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -893136,6 +890621,22 @@ 0, 0, 0, + 2, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, 0, 0, 0, @@ -893318,18 +890819,26 @@ 0, 0, 0, - 2, - 2, 0, 0, 0, 0, 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 2, + 2, 2, 0, 0, 0, 2, + 2, 0, 0, 0, @@ -893347,39 +890856,43 @@ }, { "session": { - "id": "zk-in-rollups-full-validity-proving-on-the-op-stack", - "sourceId": "8J8Z7Q", - "title": "ZK in Rollups: Full Validity Proving on the OP Stack", - "description": "Historically, zkEVM rollups have been difficult to build, requiring deep cryptography expertise that makes customization and maintainability complicated and time-consuming. With advancements in zk, zkVMs make it easy for any developer to write ZK applications with Rust. With a zkVM, we've created seamless way to upgrade ANY existing OP Stack chain to use ZKPs in just 1 hour. These rollups get fast finality, cost-effective (<0.1 cent / tx), and full EVM equivalence.", - "track": "Layer 2", + "id": "zoom-in-on-eof-stack-validation", + "sourceId": "YYGYGF", + "title": "Zoom in on EOF stack validation", + "description": "Deep dive into EIP-5450: EOF stack validation spec and explaining some of the rationale behind it.", + "track": "Core Protocol", "type": "Talk", - "expertise": "Intermediate", + "expertise": "Expert", "audience": "Engineering", "featured": false, "doNotRecord": false, "tags": [ - "Layer 2s", - "Rollups", - "ZKP" + "Core Protocol", + "eof", + "Core", + "Protocol" ], - "keywords": [], - "duration": 901, + "keywords": [ + "EVM", + "EOF" + ], + "duration": 1485, "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", + "sources_swarmHash": "d5182e8c0b90b2ac33f823220b4300a06bc0c0713de0715bda2313ea4d8fe5eb", + "sources_youtubeId": "80szRrNW0MM", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": "6735e5c39dbb7a90e1bb8d44", - "transcript_vtt": "https://streameth-develop.ams3.digitaloceanspaces.com/transcriptions/6735e5c39dbb7a90e1bb8d44.vtt", - "transcript_text": " Hello, my name is Uma, I'm one of the co-founders of Sysynkt, and today I'm going to be telling you guys about introducing full ZK validity proving into the OP stack. So let's start off with kind of motivating the problem, which is why are ZK rollups interesting and important to the Ethereum endgame? So today, the Ethereum rollup-centric roadmap is how Ethereum is going to scale. And within that, I think it's commonly acknowledged that ZK rollups are the only way that a lot of the problems we're facing today as an ecosystem will get solved. For example, we'll get fast finality, we'll get interoperability across all the rollups, unified liquidity for users, and overall, it'll help improve UX greatly. Inherently, we can kind of think of why ZK helps solve all these problems, because decentralization, with all the different Ethereum nodes re-running and re-executing transactions inherently has some overhead and ZK by giving us verifiability fixes this. So, ZK rollups have been around for a long time. Historically we've known that ZK has been very important. But until recently, ZK rollups have been really challenging. You'd have to write in specialized languages, SDKs and DSLs, to actually encode an Ethereum EVM state transition function in a ZK circuit to be able to prove it. And then, in general, there were a lot of compromises. You had type 1, type 2, type 3, where you can't use the native Ethereum storage format or sometimes even be bytecode compatible due to the limitations of ZK. So how are we solving this? Well, at Sysynkt we're building a ZK VM, not a ZK EVM, a ZK virtual machine where you can just write Rust and then use ZK. So what does that mean? As a developer, you can just take arbitrary Rust code.", + "sources_streamethId": "67357ab39dbb7a90e1d98e37", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731582600000, - "slot_end": 1731583800000, - "slot_roomId": "stage-5", - "resources_presentation": "https://docs.google.com/presentation/d/1Dw9W_WUh2DLUhcVkatH257BHYs8yWdxlfLhoJXs8jnY", - "resources_slides": null, + "slot_start": 1731555000000, + "slot_end": 1731556800000, + "slot_roomId": "stage-3", + "resources_presentation": "https://docs.google.com/presentation/d/1d8txUWtGhcQzZvxbPw_N_fi_3997eaZr5RJ2nDVrHkg", + "resources_slides": "https://drive.google.com/file/d/19U0kZKB4CH4sSd1WW9TZbzRf156Ey42t/view", "speakers": [ - "uma-roy" + "andrei-maiboroda" ] }, "vector": [ @@ -893387,10 +890900,10 @@ 0, 0, 0, + 6, 0, 0, 0, - 6, 0, 0, 0, @@ -894150,6 +891663,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -894176,7 +891690,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -894200,7 +891713,6 @@ 0, 0, 0, - 2, 0, 0, 0, @@ -894209,10 +891721,6 @@ 0, 0, 0, - 2, - 0, - 0, - 0, 0, 0, 0, @@ -894431,6 +891939,7 @@ 0, 0, 0, + 2, 0, 0, 0, @@ -894442,6 +891951,8 @@ 0, 0, 0, + 2, + 2, 0, 0, 0, @@ -894698,9 +892209,6 @@ 0, 0, 2, - 0, - 0, - 0, 2, 0, 0, @@ -894719,56 +892227,38 @@ }, { "session": { - "id": "zkmpc-bring-public-auditability-into-mpc", - "sourceId": "XNN3XR", - "title": "ZKMPC: Bring public auditability into MPC", - "description": "In multi-party computation (MPC), participants collaboratively compute without revealing private inputs. To secure MPC on a blockchain, preventing collusion is essential. We developed a \"publicly auditable\" version of SPDZ, a widely-used MPC protocol, that enables third-party verification through zero-knowledge proofs (ZKP) collaboratively generated by multiple parties. We will also demonstrate application examples, such as a Game Master-free werewolf game.", - "track": "Applied Cryptography", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Research", + "id": "zupass-identity-and-credentials-beyond-proof-of-personhood", + "sourceId": "K9SNB7", + "title": "Zupass, identity and credentials beyond proof of personhood", + "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", + "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", + "type": "Lightning Talk", + "expertise": "", + "audience": "Engineering", "featured": false, "doNotRecord": false, - "tags": [ - "ZKP", - "MPC", - "collaboration", - "zk-snark", - "MPC", - "ZKP" - ], - "keywords": [ - "Collaborative", - "zk-SNARKs" - ], - "duration": 1399, + "tags": [], + "keywords": [], + "duration": 593, "language": "en", - "sources_swarmHash": "84b05559d4df707a8f29bbb79e18bb1bdb1fff62ae2738288c7d4be463f3b188", - "sources_youtubeId": "aWQ8zzi1EAQ", + "sources_swarmHash": "0221e03f42edcd25f7bdec6110958dbda072c8b4ec25a907ce9118272507638e", + "sources_youtubeId": "hlE4x7JURjY", "sources_ipfsHash": "", "sources_livepeerId": "", - "sources_streamethId": null, + "sources_streamethId": "6735933f9dbb7a90e115f6af", + "transcript_vtt": "No VTT link provided", + "transcript_text": "No transcript text provided", "eventId": "devcon-7", - "slot_start": 1731407400000, - "slot_end": 1731409200000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/10GOrQfQ0ldlyvKU05TvdHfQd4G2zNNTzfEe2i2bfgMQ", - "resources_slides": null, + "slot_start": 1731561000000, + "slot_end": 1731561600000, + "slot_roomId": "breakout-3", + "resources_presentation": "https://docs.google.com/presentation/d/1T4jVcPwg6WbjeISdy1YwzFmXCYct8HCQrjsXs8P0FdI", + "resources_slides": "https://drive.google.com/file/d/1VT4pCA44gNZqfZVPRk8_0A1W4Od_PNxd/view", "speakers": [ - "task-ohmori", - "yusuke-nakae" + "rob-knight" ] }, "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 0, 6, 0, @@ -894896,5504 +892386,6 @@ 0, 0, 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 2, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ] - }, - { - "session": { - "id": "zkpassport-private-unforgeable-identity", - "sourceId": "K3GWST", - "title": "ZKpassport: Private Unforgeable Identity", - "description": "This talk presents ZKpassport, an identity verification solution integrating zero-knowledge proofs with ePassports to achieve privacy-preserving and unforgeable government-attested digital identities. We will delve into the technical architecture, implementation challenges, and practical applications. Attendees will gain insights into the development process, benefits, and potential uses of this technology in enhancing digital identity privacy and security.", - "track": "Applied Cryptography", - "type": "Talk", - "expertise": "Intermediate", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "tags": [ - "Privacy", - "Identity", - "Zero-Knowledge", - "noir", - "Identity", - "Privacy", - "Zero-Knowledge" - ], - "keywords": [ - "ZK", - "NFC", - "Noir", - "PLONK" - ], - "duration": 1189, - "language": "en", - "sources_swarmHash": "8ff112bba1682d13788042e5ab586ab285935e607ec2031734b6ed5acbad29ea", - "sources_youtubeId": "W6C-duDEiOU", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": null, - "eventId": "devcon-7", - "slot_start": 1731484200000, - "slot_end": 1731486000000, - "slot_roomId": "classroom-a", - "resources_presentation": "https://docs.google.com/presentation/d/1oOW6cu6Z74Nvx5lSpva4kFP8hggWPnZdL6MvVt9Hc9U", - "resources_slides": null, - "speakers": [ - "michael-elliot", - "theo-madzou" - ] - }, - "vector": [ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ] - }, - { - "session": { - "id": "zkproving-the-history-of-ethereum-in-real-time", - "sourceId": "TVNJ99", - "title": "zkProving the history of Ethereum in real time.", - "description": "I'll explain the current work that we are doing in the Polygon zk teams to improve the performance of the provers and the quality of the tooling.\r\nI'll will explain how we can parallelise the generation of the proof and how we can integrate with different hardware and software so that it should allow to build a zk proof of a block in real time. \r\nI'll explain also how this proofs can be recursively linked to build a zkProof that can proof the whole Ethereum history from the genesis.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Expert", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "tags": [ - "ZK-EVMs", - "ZKP", - "Zero-Knowledge", - "lightclient", - "type1", - "starks", - "Zero-Knowledge", - "ZK-EVMs", - "ZKP" - ], - "keywords": [ - "Lightclient", - "type1", - "STARK" - ], - "duration": 1604, - "language": "en", - "sources_swarmHash": "848d3f552c5ce88efe748988407546a906a410c1d533f47a363d8c0dcf4463fe", - "sources_youtubeId": "boSCLHs30tk", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "673441219dbb7a90e10a4706", - "eventId": "devcon-7", - "slot_start": 1731474000000, - "slot_end": 1731475800000, - "slot_roomId": "stage-2", - "resources_presentation": "https://docs.google.com/presentation/d/1p0VlUcR1aOi--jA4hFb8aBF8mAWBuf-2vwun38CXBtI", - "resources_slides": null, - "speakers": [ - "jordi-baylina" - ] - }, - "vector": [ - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 2, - 2, - 0, - 0, - 0, - 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ] - }, - { - "session": { - "id": "zoom-in-on-eof-stack-validation", - "sourceId": "YYGYGF", - "title": "Zoom in on EOF stack validation", - "description": "Deep dive into EIP-5450: EOF stack validation spec and explaining some of the rationale behind it.", - "track": "Core Protocol", - "type": "Talk", - "expertise": "Expert", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "tags": [ - "Core Protocol", - "eof", - "Core", - "Protocol" - ], - "keywords": [ - "EVM", - "EOF" - ], - "duration": 1485, - "language": "en", - "sources_swarmHash": "", - "sources_youtubeId": "", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "67357ab39dbb7a90e1d98e37", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731555000000, - "slot_end": 1731556800000, - "slot_roomId": "stage-3", - "resources_presentation": "https://docs.google.com/presentation/d/1d8txUWtGhcQzZvxbPw_N_fi_3997eaZr5RJ2nDVrHkg", - "resources_slides": null, - "speakers": [ - "andrei-maiboroda" - ] - }, - "vector": [ - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 2, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - ] - }, - { - "session": { - "id": "zupass-identity-and-credentials-beyond-proof-of-personhood", - "sourceId": "K9SNB7", - "title": "Zupass, identity and credentials beyond proof of personhood", - "description": "A one-day summit focusing on the theme of d/acc: emphasizing the values of decentralization, democracy, differential accelerated progress, and defensive tech including crypto security, public epistemics, bio defense, neurotech/longevity, decentralized ai and physical resilience.", - "track": "[CLS] d/acc Discovery Day: Building Towards a Resilient Utopia", - "type": "Lightning Talk", - "expertise": "", - "audience": "Engineering", - "featured": false, - "doNotRecord": false, - "tags": [], - "keywords": [], - "duration": 593, - "language": "en", - "sources_swarmHash": "0221e03f42edcd25f7bdec6110958dbda072c8b4ec25a907ce9118272507638e", - "sources_youtubeId": "hlE4x7JURjY", - "sources_ipfsHash": "", - "sources_livepeerId": "", - "sources_streamethId": "6735933f9dbb7a90e115f6af", - "transcript_vtt": "No VTT link provided", - "transcript_text": "No transcript text provided", - "eventId": "devcon-7", - "slot_start": 1731561000000, - "slot_end": 1731561600000, - "slot_roomId": "breakout-3", - "resources_presentation": "https://docs.google.com/presentation/d/1T4jVcPwg6WbjeISdy1YwzFmXCYct8HCQrjsXs8P0FdI", - "resources_slides": null, - "speakers": [ - "rob-knight" - ] - }, - "vector": [ - 0, - 6, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, 6, 0, 0, @@ -901577,8 +893569,6 @@ 0, 0, 0, - 0, - 0, 2, 0, 0, diff --git a/devcon-api/data/vectors/dictionary.json b/devcon-api/data/vectors/dictionary.json index ab158a29a..1803f8048 100644 --- a/devcon-api/data/vectors/dictionary.json +++ b/devcon-api/data/vectors/dictionary.json @@ -38,8 +38,8 @@ "yiannis-psaras", "dennis-trautwein", "hira-siddiqui", - "ley", "mujtaba-idrees", + "ley", "mislav-javor", "max-segall", "joshua-cheong", @@ -71,8 +71,8 @@ "briefkandle", "lin-oshitani", "richard-liu", - "antonio-seveso", "moritz-boullenger", + "antonio-seveso", "azam-soleimanian", "bogdan-ursu", "maryam-bahrani", @@ -83,30 +83,30 @@ "dhvani-patel", "remco-bloemen", "guo-liu", - "arun-maharajan", "rumee-singh", + "arun-maharajan", "david-casey", "rebecca-kacherginsky", "sean-anderson", "max-resnick", - "florian-glatz", "marina-markezic", - "abhinav-goel", - "bruno-batavia", - "bruno-moniz", - "camila-rioja", - "daniel-marquez", + "florian-glatz", + "suzana-maranhao-moreno", "hart-montgomery", "italo-borssatto", "luca-cosivi", + "bruno-batavia", + "abhinav-goel", "qin-en", - "shukyee-ma", - "sophia-lopez", - "suzana-maranhao-moreno", + "daniel-marquez", "thiago-rudiger", + "camila-rioja", + "bruno-moniz", + "sophia-lopez", + "shukyee-ma", "weekee-toh", - "evan-griffiths", "gabriel-fior", + "evan-griffiths", "peter-jung", "ray-jacobson", "julien-niset", @@ -129,11 +129,11 @@ "farhad-asgarov", "lukas-rosario", "conner-swenberg", - "florian-dreschner", - "forest-fang", - "miriam-neubauer", "rob-knight", "veronica-zheng", + "forest-fang", + "miriam-neubauer", + "florian-dreschner", "aisling-connolly", "andrew-lu", "richard", @@ -145,12 +145,12 @@ "eda-akturk", "ludens", "small-brain", + "timour-kosters", + "nicole-sun", "dc-posch", "janine-leger", - "nicole-sun", - "timour-kosters", - "bridget-hearst", "evin-mcmullen", + "bridget-hearst", "olivia-smith", "rachel-onchain", "toniya-sundaram", @@ -166,14 +166,14 @@ "konrad-urban", "radina-talanova", "everett-hildenbrandt", + "lightclient", "alex-beregszaszi", "daniel", "eniko-garam", - "lightclient", "mark-tyneway", "kris-kaczor", - "florent", "michael-elliot", + "florent", "remi", "theo-madzou", "julian-arnesino", @@ -182,19 +182,19 @@ "hal-seki", "julian-sutherland", "wanseob-lim", + "skylar-weaver", "aya-miyaguchi", - "gubsheep", "justin-glibert", + "gubsheep", "nicholas-paul", - "skylar-weaver", "alex-stokes", "tim-beiko", - "cody-crozier", - "mark-smargon", "pedro-gomes", "tom-teman", - "albert-ni", + "mark-smargon", + "cody-crozier", "barry", + "albert-ni", "vitalik-buterin", "puja-ohlhaver", "jay-baxter", @@ -229,10 +229,10 @@ "lisa-jy-tan", "aellison-cassimiro", "g-nick-gnidan", - "harith-kamarul", - "loi-luu", "matthew-tan", + "harith-kamarul", "tn-lee", + "loi-luu", "joe-andrews", "devansh-mehta", "kolby-moroz-liebl", @@ -252,22 +252,21 @@ "charles-guillemet", "karl-floersch", "griff-green", + "nico-gallardo", "james-kiernan", "lauren-luz", - "nico-gallardo", "timdaub", - "0xrajeev", - "harikrishnan-mulackal", "josselin-feist", + "0xrajeev", "matthias-egli", "mehdi-zerouali", "mooly-sagiv", + "harikrishnan-mulackal", "remy-roy", "akshit-gupta", - "erin-magennis", "paul-kohlhaas", - "ahmad-bitar", "anshu-jalan", + "ahmad-bitar", "launamu", "sejal-rekhan", "kaseth", @@ -313,8 +312,8 @@ "oleksandr-brezhniev", "juan-carlos-bell-llinas", "oxytocin", - "josh-davis", "mario-havel", + "josh-davis", "eniko-nagy", "echo", "eitan", @@ -322,8 +321,8 @@ "mark-holt", "philip-daian", "andres-forigua", - "mateo-sabogal", "william-martinez", + "mateo-sabogal", "michael-okeeffe", "mike-neuder", "stani-kulechov", @@ -388,10 +387,10 @@ "patricio-palladino", "quintus-kilbourn", "mate-soos", - "damaris-njambi-njoroge", - "david-nandwa", - "yele-bademosi", "yoseph-ayele", + "yele-bademosi", + "david-nandwa", + "damaris-njambi-njoroge", "wodann", "dominik-teiml", "dapplion", @@ -439,8 +438,8 @@ "justin-drake", "hart-lambur", "bruno-macaes", - "bitbeckers", "holke-brammer", + "bitbeckers", "peter-garamvolgyi", "rh", "krzysztof-urbanski", @@ -469,8 +468,8 @@ "rachel-rose-oleary", "ctrlc03", "artem-kotelskiy", - "beth-mccarthy", "joshua-davila", + "beth-mccarthy", "shufan-wang", "leo-lara", "ya-wen-jeng", @@ -480,8 +479,8 @@ "kevin-chia", "teeramet-jern-kunpittaya", "eduard-sanou", - "han-jian", "riley-wong-theythem", + "han-jian", "jean-philippe-bossuat", "nuno-loureiro", "jeff-emmett", @@ -506,8 +505,8 @@ "robert-drost", "pablo-sabbatella", "carl-cervone", - "carlos-matallana", "ignasi-ramos", + "carlos-matallana", "william-george", "miros", "gary-thung", @@ -635,7 +634,6 @@ "augusto-teixeira", "gregthegreek", "mikko-ohtamaa", - "vanishree-rao", "alex-towle", "merlin-egalite", "lucas-manuel", @@ -712,9 +710,9 @@ "amy-proal", "ira-nezhynska", "vlad-zamfir", - "brianna-chang", "hong-phuc-dang", "mario-behling", + "brianna-chang", "mishari-muqbil", "vijay-mohan", "medha-kothari", @@ -723,8 +721,8 @@ "arik-galansky", "adam-ceresko", "andrew-macpherson", - "kristina-mayman", "mindy-harrell", + "kristina-mayman", "ahmed-gatnash", "mike-silagadze", "stefanos-chaliasos", @@ -740,14 +738,13 @@ "namik-muduroglu", "vaibhav-chellani", "caspar-schwarz-schilling", - "pratyush-ranjan-tiwari", "alexandre-belling", "juan-blanco", "jason-vranek", "rohan-abraham", "roshan-abraham", - "abena", "simon-emanuel-schmid", + "abena", "sora-suegami", "uma-roy", "task-ohmori", @@ -1022,7 +1019,6 @@ "communication", "real", "rtc", - "science", "Permissionless", "impact", "RPGF", @@ -1253,7 +1249,6 @@ "Optimistic", "rollups", "fatf", - "history", "fixed", "rate", "arbitrum", diff --git a/devcon-api/package.json b/devcon-api/package.json index 55e6747ab..224d67e89 100644 --- a/devcon-api/package.json +++ b/devcon-api/package.json @@ -11,7 +11,7 @@ }, "scripts": { "build": "tsc --project tsconfig.json && tsc-alias -p tsconfig.json", - "postbuild": "node -e \"require('fs-extra').copySync('data', 'dist/devcon-api/data') && require('fs-extra').copySync('public', 'dist/devcon-api/public')\"", + "postbuild": "node -e \"const fs = require('fs-extra'); fs.copySync('public', 'dist/devcon-api/public', { overwrite: true }); fs.copySync('data', 'dist/devcon-api/data', { overwrite: true });\"", "start": "NODE_PATH=./dist node ./dist/devcon-api/src/index", "dev:generate": "yarn swagger:generate && yarn db:generate && nodemon ./src/index.ts", "dev": "nodemon ./src/index.ts", @@ -114,6 +114,7 @@ "@types/dotenv": "^8.2.0", "@types/express": "^4.17.15", "@types/express-session": "^1.18.0", + "@types/fs-extra": "^11.0.4", "@types/handlebars": "^4.1.0", "@types/helmet": "^4.0.0", "@types/jest": "^29.2.5", @@ -129,6 +130,7 @@ "eslint": "8.35.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-prettier": "^4.2.1", + "fs-extra": "^11.2.0", "husky": "^8.0.3", "jest": "^29.3.1", "lint-staged": "^13.1.0", diff --git a/devcon-api/src/db/devcon.db b/devcon-api/src/db/devcon.db index f8b833d61..67a7ff103 100644 Binary files a/devcon-api/src/db/devcon.db and b/devcon-api/src/db/devcon.db differ diff --git a/devcon-api/src/swagger/definition.json b/devcon-api/src/swagger/definition.json index de3cf85dd..2aa9ca27f 100644 --- a/devcon-api/src/swagger/definition.json +++ b/devcon-api/src/swagger/definition.json @@ -908,33 +908,6 @@ } } }, - "/sessions/{id}/slides": { - "get": { - "tags": [ - "Sessions" - ], - "description": "", - "produces": [ - "application/pdf" - ], - "parameters": [ - { - "name": "id", - "in": "path", - "required": true, - "type": "string" - } - ], - "responses": { - "200": { - "description": "OK" - }, - "404": { - "description": "Not Found" - } - } - } - }, "/sessions/{id}/image": { "get": { "tags": [ diff --git a/devcon-api/src/utils/config.ts b/devcon-api/src/utils/config.ts index 533854f48..c489d5f65 100644 --- a/devcon-api/src/utils/config.ts +++ b/devcon-api/src/utils/config.ts @@ -27,7 +27,7 @@ export const API_INFO = { export const API_DEFAULTS = { SIZE: 20, ORDER: 'desc', - githubDataUrl: 'https://raw.githubusercontent.com/efdevcon/api/dev/data', + githubDataUrl: 'https://raw.githubusercontent.com/efdevcon/monorepo/main/devcon-api/data', } export const CONFIG = { diff --git a/devcon-api/yarn.lock b/devcon-api/yarn.lock index ccab88941..c7f9d74de 100644 --- a/devcon-api/yarn.lock +++ b/devcon-api/yarn.lock @@ -1599,6 +1599,14 @@ "@types/qs" "*" "@types/serve-static" "*" +"@types/fs-extra@^11.0.4": + version "11.0.4" + resolved "https://registry.yarnpkg.com/@types/fs-extra/-/fs-extra-11.0.4.tgz#e16a863bb8843fba8c5004362b5a73e17becca45" + integrity sha512-yTbItCNreRooED33qjunPthRcSjERP1r4MqCZc7wv0u2sUkzTFp45tgUfS5+r7FrZPdmCCNflLhVSP/o+SemsQ== + dependencies: + "@types/jsonfile" "*" + "@types/node" "*" + "@types/graceful-fs@^4.1.3": version "4.1.6" resolved "https://registry.npmjs.org/@types/graceful-fs/-/graceful-fs-4.1.6.tgz" @@ -1659,6 +1667,13 @@ resolved "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz" integrity sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ== +"@types/jsonfile@*": + version "6.1.4" + resolved "https://registry.yarnpkg.com/@types/jsonfile/-/jsonfile-6.1.4.tgz#614afec1a1164e7d670b4a7ad64df3e7beb7b702" + integrity sha512-D5qGUYwjvnNNextdU59/+fI+spnwtTFmyQP0h+PfIOSkNfpU6AOICUOkm4i0OnSk+NyjdPJrxCDro0sJsWlRpQ== + dependencies: + "@types/node" "*" + "@types/linkify-it@*": version "3.0.2" resolved "https://registry.npmjs.org/@types/linkify-it/-/linkify-it-3.0.2.tgz" @@ -3985,7 +4000,7 @@ fs-constants@^1.0.0: fs-extra@^11.2.0: version "11.2.0" - resolved "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-11.2.0.tgz#e70e17dfad64232287d01929399e0ea7c86b0e5b" integrity sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw== dependencies: graceful-fs "^4.2.0"