diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..5ea803f
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,20 @@
+# See https://github.com/editorconfig/editorconfig/wiki/EditorConfig-Properties
+# for all available properties.
+
+# Top-most EditorConfig file for the firebase-ios-sdk repo.
+root = true
+
+# Defaults for all files
+[*]
+charset = utf-8
+end_of_line = lf
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+# ObjC and Swift files
+# See https://developer.apple.com/documentation/xcode-release-notes/xcode-16-release-notes#New-Features-in-Xcode-16-Beta
+# for the subset of properties supported by Xcode.
+[*.{h,m,mm,swift}]
+indent_style = space
+indent_size = 2
+max_line_length = 100
diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml
new file mode 100644
index 0000000..79c5d63
--- /dev/null
+++ b/.github/workflows/check.yml
@@ -0,0 +1,54 @@
+name: check
+
+on:
+ pull_request:
+ push:
+ branches: main
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ check:
+ runs-on: macos-latest
+ env:
+ MINT_PATH: ${{ github.workspace }}/mint
+ steps:
+ - uses: actions/checkout@v3
+
+ - uses: actions/setup-python@v3
+ with:
+ python-version: '3.10'
+
+ - name: Cache Mint packages
+ uses: actions/cache@v3
+ with:
+ path: ${{ env.MINT_PATH }}
+ key: ${{ runner.os }}-mint-${{ hashFiles('**/Mintfile') }}
+ restore-keys: ${{ runner.os }}-mint-
+
+ - name: Setup Scripts Directory
+ run: ./setup-scripts.sh
+
+ - name: Setup check
+ run: |
+ brew update
+ brew install clang-format@18
+ brew install mint
+ mint bootstrap
+
+ - name: Style
+ run: scripts/style.sh test-only
+
+ - name: Whitespace
+ run: scripts/check_whitespace.sh
+
+ - name: Filename spaces
+ run: scripts/check_filename_spaces.sh
+
+ - name: Copyrights
+ run: scripts/check_copyright.sh
+
+ - name: Imports
+ run: scripts/check_imports.swift
diff --git a/.swiftformat b/.swiftformat
new file mode 100644
index 0000000..7def2b7
--- /dev/null
+++ b/.swiftformat
@@ -0,0 +1,14 @@
+# Formatting Options - Mimic Google style
+--indent 2
+--maxwidth 100
+--wrapparameters afterfirst
+
+# Disabled Rules
+
+# Too many of our swift files have simplistic examples. While technically
+# it's correct to remove the unused argument labels, it makes our examples
+# look wrong.
+--disable unusedArguments
+
+# We prefer trailing braces.
+--disable wrapMultilineStatementBraces
diff --git a/.swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata b/.swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata
deleted file mode 100644
index 919434a..0000000
--- a/.swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-
-
-
diff --git a/.swiftpm/xcode/xcuserdata/aashishp.xcuserdatad/xcschemes/xcschememanagement.plist b/.swiftpm/xcode/xcuserdata/aashishp.xcuserdatad/xcschemes/xcschememanagement.plist
deleted file mode 100644
index fb5eda9..0000000
--- a/.swiftpm/xcode/xcuserdata/aashishp.xcuserdatad/xcschemes/xcschememanagement.plist
+++ /dev/null
@@ -1,35 +0,0 @@
-
-
-
-
- SchemeUserState
-
- FirebaseDataConnect.xcscheme_^#shared#^_
-
- orderHint
- 0
-
- Promises (Playground) 1.xcscheme
-
- isShown
-
- orderHint
- 2
-
- Promises (Playground) 2.xcscheme
-
- isShown
-
- orderHint
- 3
-
- Promises (Playground).xcscheme
-
- isShown
-
- orderHint
- 1
-
-
-
-
diff --git a/Mintfile b/Mintfile
new file mode 100644
index 0000000..640a5ca
--- /dev/null
+++ b/Mintfile
@@ -0,0 +1 @@
+nicklockwood/SwiftFormat@0.54.0
diff --git a/Package.swift b/Package.swift
index a786a70..b9b07ad 100644
--- a/Package.swift
+++ b/Package.swift
@@ -19,7 +19,7 @@
import class Foundation.ProcessInfo
import PackageDescription
-//let firebaseVersion = "10.25.0"
+// let firebaseVersion = "10.25.0"
let package = Package(
name: "FirebaseDataConnect",
@@ -28,7 +28,7 @@ let package = Package(
.library(
name: "FirebaseDataConnect",
targets: ["FirebaseDataConnect"]
- )
+ ),
],
dependencies: [
.package(url: "https://github.com/firebase/firebase-ios-sdk",
@@ -44,7 +44,7 @@ let package = Package(
dependencies: [
.product(name: "GRPC", package: "grpc-swift"),
.product(name: "FirebaseAuth", package: "firebase-ios-sdk"),
- .product(name: "FirebaseAppCheck", package: "firebase-ios-sdk")
+ .product(name: "FirebaseAppCheck", package: "firebase-ios-sdk"),
],
path: "Sources"
@@ -57,7 +57,6 @@ let package = Package(
resources: [
.copy("Resources/fdc-kitchensink"),
]
- )
+ ),
]
)
-
diff --git a/Sources/OptionalVarWrapper.swift b/Sources/OptionalVarWrapper.swift
index 2e289b5..a1391a1 100644
--- a/Sources/OptionalVarWrapper.swift
+++ b/Sources/OptionalVarWrapper.swift
@@ -17,7 +17,7 @@ import Foundation
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
@propertyWrapper
public struct OptionalVariable where Value: Encodable {
- public private(set) var isSet: Bool = false
+ public private(set) var isSet = false
public var wrappedValue: Value? {
didSet {
diff --git a/Sources/Scalars/LocalDate.swift b/Sources/Scalars/LocalDate.swift
index e51338a..d228e58 100644
--- a/Sources/Scalars/LocalDate.swift
+++ b/Sources/Scalars/LocalDate.swift
@@ -77,8 +77,8 @@ public struct LocalDate: Codable, Equatable, CustomStringConvertible {
}
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
-extension LocalDate {
- public init(from decoder: any Decoder) throws {
+public extension LocalDate {
+ init(from decoder: any Decoder) throws {
let container = try decoder.singleValueContainer()
let localDateString = try container.decode(String.self)
@@ -87,7 +87,7 @@ extension LocalDate {
dateComponents = calendar.dateComponents(components, from: date)
}
- public func encode(to encoder: any Encoder) throws {
+ func encode(to encoder: any Encoder) throws {
var container = encoder.singleValueContainer()
let formattedDate = dateFormatter.string(from: date)
try container.encode(formattedDate)
@@ -97,12 +97,12 @@ extension LocalDate {
// MARK: Equatable, Comparable
@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *)
-extension LocalDate {
- public static func < (lhs: LocalDate, rhs: LocalDate) -> Bool {
+public extension LocalDate {
+ static func < (lhs: LocalDate, rhs: LocalDate) -> Bool {
return lhs.date < rhs.date
}
- public static func == (lhs: LocalDate, rhs: LocalDate) -> Bool {
+ static func == (lhs: LocalDate, rhs: LocalDate) -> Bool {
return lhs.date == rhs.date
}
}
diff --git a/Tests/Integration/KitchenSinkClient.swift b/Tests/Integration/KitchenSinkClient.swift
index 66d0cde..c816f4a 100644
--- a/Tests/Integration/KitchenSinkClient.swift
+++ b/Tests/Integration/KitchenSinkClient.swift
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-
import FirebaseDataConnect
import Foundation
diff --git a/Tests/Integration/KitchenSinkKeys.swift b/Tests/Integration/KitchenSinkKeys.swift
index e28b2e6..3630b13 100644
--- a/Tests/Integration/KitchenSinkKeys.swift
+++ b/Tests/Integration/KitchenSinkKeys.swift
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-
import Foundation
import FirebaseDataConnect
diff --git a/Tests/Integration/KitchenSinkOperations.swift b/Tests/Integration/KitchenSinkOperations.swift
index 383fbb5..7b9b349 100644
--- a/Tests/Integration/KitchenSinkOperations.swift
+++ b/Tests/Integration/KitchenSinkOperations.swift
@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-
import FirebaseDataConnect
import Foundation
diff --git a/scripts/README.md b/scripts/README.md
deleted file mode 100644
index 0a7350b..0000000
--- a/scripts/README.md
+++ /dev/null
@@ -1,217 +0,0 @@
-# Firebase Apple Scripts
-
-This directory provides a set of scripts for development, test, and continuous
-integration of the Firebase Apple SDKs.
-
-## [check.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/check.sh)
-
-Used by the
-[check CI workflow](https://github.com/firebase/firebase-ios-sdk/blob/main/.github/workflows/check.yml)
-to run several static analysis checks. It calls the following scripts:
-
-### [style.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/style.sh)
-
-Runs clang-format and swiftformat across the repo.
-
-### [check_whitespace.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/check_whitespace.sh)
-
-Verify there are no files with trailing whitespace.
-
-### [check_filename_spaces.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/check_filename_spaces.sh)
-
-Spaces in filenames are not allowed.
-
-### [check_copyright.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/check_copyright.sh)
-
-Verify existence and format of copyrights.
-
-### [check_test_inclusion.py](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/check_test_inclusion.py)
-
-Test existence check for the internal Firestore Xcode project.
-
-### [check_imports.swift](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/check_imports.swift)
-
-Verify import style complies with
-[repo standards](https://github.com/firebase/firebase-ios-sdk/blob/main/HeadersImports.md).
-
-### [check_firestore_core_api_absl.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/check_firestore_core_api_absl.sh)
-
-Check Firestore `absl` usages for g3 build issues.
-
-### [check_lint.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/check_lint.sh)
-
-Run cpplint.
-
-### [sync_project.rb](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/sync_project.rb)
-
-Used by Firestore to to keep the Xcode project in sync after adding/removing tests.
-
-## Other Scripts
-### [binary_to_array.py](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/binary_to_array.py)
-
-Firestore script to convert binary data into a C/C++ array.
-
-### [build.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/build.sh)
-
-Script used by CI jobs to wrap xcodebuild invocations with options.
-
-### [build_non_firebase_sdks.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/build.sh)
-
-CI script to build binary versions of non-Firebase SDKs for QuickStart testing.
-
-### [build_zip.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/build_zip.sh)
-
-CI script for building the zip distribution.
-
-### [buildcache.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/buildcache.sh)
-
-Clang options for the buildcache GitHub action.
-
-### [change_headers.swift](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/change_headers.swift)
-
-Utility script to update source to repo-relative headers.
-
-### [check_secrets.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/check_secrets.sh)
-
-CI script to test if secrets are available (not running on a fork).
-
-### [collect_metrics.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/collect_metrics.sh)
-
-CI script to collect project health metrics and upload them to a database.
-
-### [configure_test_keychain.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/configure_test_keychain.sh)
-
-CI script to setup the keychain for macOS and Catalyst testing.
-
-### [cpplint.py](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/cpplint.py)
-
-Firestore script for C++ linting.
-
-### [create_pull_request.rb](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/create_pull_request.rb)
-
-Utility used by CI scripts to create issues and PRs.
-
-### [decrypt_gha_secret.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/decrypt_gha_secret.sh)
-
-CI script to decrypt a GitHub Actions secret.
-
-### [encrypt_gha_secret.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/encrypt_gha_secret.sh)
-
-CI script to encrypt a GitHub Actions secret.
-
-### [fuzzing_ci.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/fuzzing_ci.sh)
-
-Firestore CI script to run fuzz testing.
-
-### [generate_access_token.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/generate_access_token.sh)
-
-Script to generate a Firebase access token used by Remote config integration tests.
-
-### [install_prereqs.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scriptsinstall_prereqs.sh)
-
-Utility CI script to provide configuration for build.sh
-
-### [localize_podfile.swift](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/localize_podfile.swift)
-
-Utility script to update a Podfile to point to local podspecs.
-
-### [make_release_notes.py](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/make_release_notes.py)
-
-Converts GitHub-flavored markdown changelogs to devsite-compatible release notes.
-
-### [pod_lib_lint.rb](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/pod_lib_lint.rb)
-
-Wrapper script for running `pod lib lint` tests to include dependencies from the monorepo.
-
-### [release_testing_setup.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/release_testing_setup.sh)
-
-Utility script for the release workflow.
-
-### [remove_data.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/remove_data.sh)
-
-Cleanup script for CI workflows.
-
-### [run_database_emulator.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/run_database_emulator.sh)
-
-Run the RTDB emulator.
-
-### [run_firestore_emulator.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/run_firestore_emulator.sh)
-
-Run the Firestore emulator.
-
-### [setup_bundler.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/setup_bundler.sh)
-
-Set up the Ruby bundler.
-
-### [setup_check.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/setup_check.sh)
-
-Install tooling for the check workflow.
-
-### [setup_quickstart.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/setup_quickstart.sh)
-
-Set up a QuickStart for integration testing.
-
-### [setup_quickstart_framework.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/setup_quickstart_framework.sh)
-
-Set up a QuickStart for zip distribution testing.
-
-### [setup_spm_tests.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/setup_spm_tests.sh)
-
-Configuration for SPM testing.
-
-### [spm_test_schemes/](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/spm_test_schemes)
-
-Schemes used by above script to enable test target schemes.
-
-### [test_archiving.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/test_archiving.sh)
-
-Test Xcode Archive build.
-
-### [test_catalyst.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/test_catalyst.sh)
-
-Test catalyst build.
-
-### [test_quickstart.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/test_quickstart.sh)
-
-Test QuickStart.
-
-### [test_quickstart_framework.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/test_quickstart_framework.sh)
-
-Test QuickStart with the zip distribution.
-
-### [update_xcode_target.rb](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/update_xcode_target.rb)
-
-Script to add a file to an Xcode target.
-
-### [xcresult_logs.py](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/xcresult_logs.py)
-
-Tooling used by `build.sh` to get the log output for an `xcodebuild` invocation.
-
-### [zip_quickstart_test.sh](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/zip_quickstart_test.sh)
-
-Run the tests associated with a QuickStart with a zip distribution.
-
-## Script Subdirectories
-### [create_spec_repo](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/create_spec_repo)
-
-Swift utility to build a podspec repo.
-
-### [gha-encrypted](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/gha-encrypted)
-
-Store for GitHub secret encrypted resources.
-
-### [health_metrics](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/health_metrics)
-
-Code coverage and binary size tooling.
-
-### [lib](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/lib)
-
-Support libraries for `xcresult_logs.py`.
-
-### [lldb](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/lldb)
-
-Firestore utilities.
-
-### [third_party](https://github.com/firebase/firebase-ios-sdk/blob/main/scripts/third_party)
-
-Use Travis's MIT licensed retry.sh script.
diff --git a/scripts/api_diff_report/api_diff_report.py b/scripts/api_diff_report/api_diff_report.py
deleted file mode 100644
index 4ec6a6d..0000000
--- a/scripts/api_diff_report/api_diff_report.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import json
-import argparse
-import logging
-import os
-import api_info
-
-STATUS_ADD = 'ADDED'
-STATUS_REMOVED = 'REMOVED'
-STATUS_MODIFIED = 'MODIFIED'
-STATUS_ERROR = 'BUILD ERROR'
-API_DIFF_FILE_NAME = 'api_diff_report.markdown'
-
-
-def main():
- logging.getLogger().setLevel(logging.INFO)
-
- args = parse_cmdline_args()
-
- new_api_file = os.path.join(os.path.expanduser(args.pr_branch),
- api_info.API_INFO_FILE_NAME)
- old_api_file = os.path.join(os.path.expanduser(args.base_branch),
- api_info.API_INFO_FILE_NAME)
- if os.path.exists(new_api_file):
- with open(new_api_file) as f:
- new_api_json = json.load(f)
- else:
- new_api_json = {}
- if os.path.exists(old_api_file):
- with open(old_api_file) as f:
- old_api_json = json.load(f)
- else:
- old_api_json = {}
-
- diff = generate_diff_json(new_api_json, old_api_json)
- if diff:
- logging.info(f'json diff: \n{json.dumps(diff, indent=2)}')
- logging.info(f'plain text diff report: \n{generate_text_report(diff)}')
- report = generate_markdown_report(diff)
- logging.info(f'markdown diff report: \n{report}')
- else:
- logging.info('No API Diff Detected.')
- report = ""
-
- output_dir = os.path.expanduser(args.output_dir)
- if not os.path.exists(output_dir):
- os.makedirs(output_dir)
- api_report_path = os.path.join(output_dir, API_DIFF_FILE_NAME)
- logging.info(f'Writing API diff report to {api_report_path}')
- with open(api_report_path, 'w') as f:
- f.write(report)
-
-
-def generate_diff_json(new_api, old_api, level='module'):
- """diff_json only contains module & api that has a change.
-
- format:
- {
- $(module_name_1): {
- "api_types": {
- $(api_type_1): {
- "apis": {
- $(api_1): {
- "declaration": [
- $(api_1_declaration)
- ],
- "sub_apis": {
- $(sub_api_1): {
- "declaration": [
- $(sub_api_1_declaration)
- ]
- },
- },
- "status": $(diff_status)
- }
- }
- }
- }
- }
- }
- """
- NEXT_LEVEL = {'module': 'api_types', 'api_types': 'apis', 'apis': 'sub_apis'}
- next_level = NEXT_LEVEL.get(level)
-
- diff = {}
- for key in set(new_api.keys()).union(old_api.keys()):
- # Added API
- if key not in old_api:
- diff[key] = new_api[key]
- diff[key]['status'] = STATUS_ADD
- if diff[key].get('declaration'):
- diff[key]['declaration'] = [STATUS_ADD] + diff[key]['declaration']
- # Removed API
- elif key not in new_api:
- diff[key] = old_api[key]
- diff[key]['status'] = STATUS_REMOVED
- if diff[key].get('declaration'):
- diff[key]['declaration'] = [STATUS_REMOVED] + diff[key]['declaration']
- # Module Build Error. If a "module" exist but have no
- # content (e.g. doc_path), it must have a build error.
- elif level == 'module' and (not new_api[key]['path']
- or not old_api[key]['path']):
- diff[key] = {'status': STATUS_ERROR}
- # Check diff in child level and diff in declaration
- else:
- child_diff = generate_diff_json(new_api[key][next_level],
- old_api[key][next_level],
- level=next_level) if next_level else {}
- declaration_diff = new_api[key].get('declaration') != old_api[key].get(
- 'declaration') if level in ['apis', 'sub_apis'] else False
-
- # No diff
- if not child_diff and not declaration_diff:
- continue
-
- diff[key] = new_api[key]
- # Changes at child level
- if child_diff:
- diff[key][next_level] = child_diff
-
- # Modified API (changes in API declaration)
- if declaration_diff:
- diff[key]['status'] = STATUS_MODIFIED
- diff[key]['declaration'] = [STATUS_ADD] + \
- new_api[key]['declaration'] + \
- [STATUS_REMOVED] + \
- old_api[key]['declaration']
-
- return diff
-
-
-def generate_text_report(diff, level=0, print_key=True):
- report = ''
- indent_str = ' ' * level
- for key, value in diff.items():
- # filter out ["path", "api_type_link", "api_link", "declaration", "status"]
- if isinstance(value, dict):
- if key in ['api_types', 'apis', 'sub_apis']:
- report += generate_text_report(value, level=level)
- else:
- status_text = f"{value.get('status', '')}:" if 'status' in value else ''
- if status_text:
- if print_key:
- report += f'{indent_str}{status_text} {key}\n'
- else:
- report += f'{indent_str}{status_text}\n'
- if value.get('declaration'):
- for d in value.get('declaration'):
- report += f'{indent_str}{d}\n'
- else:
- report += f'{indent_str}{key}\n'
- report += generate_text_report(value, level=level + 1)
-
- return report
-
-
-def generate_markdown_report(diff, level=0):
- report = ''
- header_str = '#' * (level + 3)
- for key, value in diff.items():
- if isinstance(value, dict):
- if key in ['api_types', 'apis', 'sub_apis']:
- report += generate_markdown_report(value, level=level)
- else:
- current_status = value.get('status')
- if current_status:
- # Module level: Always print out module name and class name as title
- if level in [0, 2]:
- report += f'{header_str} [{current_status}] {key}\n'
- if current_status != STATUS_ERROR: # ADDED,REMOVED,MODIFIED
- report += '\n\n'
- report += f'[{current_status}] {key}\n'
- report += '
\n\n'
- declarations = value.get('declaration', [])
- sub_report = generate_text_report(value, level=1, print_key=False)
- detail = process_declarations(current_status, declarations,
- sub_report)
- report += f'```diff\n{detail}\n```\n\n \n\n'
- else: # no diff at current level
- report += f'{header_str} {key}\n'
- report += generate_markdown_report(value, level=level + 1)
- # Module level: Always print out divider in the end
- if level == 0:
- report += '-----\n'
-
- return report
-
-
-def process_declarations(current_status, declarations, sub_report):
- """Diff syntax highlighting in Github Markdown."""
- detail = ''
- if current_status == STATUS_MODIFIED:
- for line in (declarations + sub_report.split('\n')):
- if STATUS_ADD in line:
- prefix = '+ '
- continue
- elif STATUS_REMOVED in line:
- prefix = '- '
- continue
- if line:
- detail += f'{prefix}{line}\n'
- else:
- prefix = '+ ' if current_status == STATUS_ADD else '- '
- for line in (declarations + sub_report.split('\n')):
- if line:
- detail += f'{prefix}{line}\n'
-
- return categorize_declarations(detail)
-
-
-def categorize_declarations(detail):
- """Categorize API info by Swift and Objective-C."""
- lines = detail.split('\n')
-
- swift_lines = [line.replace('Swift', '') for line in lines if 'Swift' in line]
- objc_lines = [
- line.replace('Objective-C', '') for line in lines if 'Objective-C' in line
- ]
-
- swift_detail = 'Swift:\n' + '\n'.join(swift_lines) if swift_lines else ''
- objc_detail = 'Objective-C:\n' + '\n'.join(objc_lines) if objc_lines else ''
-
- if not swift_detail and not objc_detail:
- return detail
- else:
- return f'{swift_detail}\n{objc_detail}'.strip()
-
-
-def parse_cmdline_args():
- parser = argparse.ArgumentParser()
- parser.add_argument('-p', '--pr_branch')
- parser.add_argument('-b', '--base_branch')
- parser.add_argument('-o', '--output_dir', default='output_dir')
-
- args = parser.parse_args()
- return args
-
-
-if __name__ == '__main__':
- main()
diff --git a/scripts/api_diff_report/api_info.py b/scripts/api_diff_report/api_info.py
deleted file mode 100644
index bf9687e..0000000
--- a/scripts/api_diff_report/api_info.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import json
-import argparse
-import logging
-import os
-import subprocess
-import icore_module
-from urllib.parse import unquote
-from bs4 import BeautifulSoup
-
-API_INFO_FILE_NAME = 'api_info.json'
-
-
-def main():
- logging.getLogger().setLevel(logging.INFO)
-
- # Parse command-line arguments
- args = parse_cmdline_args()
- output_dir = os.path.expanduser(args.output_dir)
- if not os.path.exists(output_dir):
- os.makedirs(output_dir)
-
- # Detect changed modules based on changed files
- changed_api_files = get_api_files(args.file_list)
- if not changed_api_files:
- logging.info('No Changed API File Detected')
- exit(0)
- changed_modules = icore_module.detect_changed_modules(changed_api_files)
- if not changed_modules:
- logging.info('No Changed Module Detected')
- exit(0)
-
- # Generate API documentation and parse API declarations
- # for each changed module
- api_container = {}
- for _, module in changed_modules.items():
- api_doc_dir = os.path.join(output_dir, 'doc', module['name'])
- build_api_doc(module, api_doc_dir)
-
- if os.path.exists(api_doc_dir):
- module_api_container = parse_module(api_doc_dir)
- api_container[module['name']] = {
- 'path': api_doc_dir,
- 'api_types': module_api_container
- }
- else: # api doc fail to build.
- api_container[module['name']] = {'path': '', 'api_types': {}}
-
- api_info_path = os.path.join(output_dir, API_INFO_FILE_NAME)
- logging.info(f'Writing API data to {api_info_path}')
- with open(api_info_path, 'w') as f:
- f.write(json.dumps(api_container, indent=2))
-
-
-def get_api_files(file_list):
- """Filter out non api files."""
- return [
- f for f in file_list
- if f.endswith('.swift') or (f.endswith('.h') and 'Public' in f)
- ]
-
-
-def build_api_doc(module, output_dir):
- """Use Jazzy to build API documentation for a specific module's source
- code."""
- if module['language'] == icore_module.SWIFT:
- logging.info('------------')
- cmd = f'jazzy --module {module["name"]}'\
- + ' --swift-build-tool xcodebuild'\
- + ' --build-tool-arguments'\
- + f' -scheme,{module["scheme"]}'\
- + ',-destination,generic/platform=iOS,build'\
- + f' --output {output_dir}'
- logging.info(cmd)
- result = subprocess.Popen(cmd,
- universal_newlines=True,
- shell=True,
- stdout=subprocess.PIPE)
- logging.info(result.stdout.read())
- elif module['language'] == icore_module.OBJECTIVE_C:
- logging.info('------------')
- cmd = 'jazzy --objc'\
- + f' --framework-root {module["root_dir"]}'\
- + f' --umbrella-header {module["umbrella_header"]}'\
- + f' --output {output_dir}'
- logging.info(cmd)
- result = subprocess.Popen(cmd,
- universal_newlines=True,
- shell=True,
- stdout=subprocess.PIPE)
- logging.info(result.stdout.read())
-
-
-def parse_module(api_doc_path):
- """Parse "${module}/index.html" and extract necessary information
- e.g.
- {
- $(api_type_1): {
- "api_type_link": $(api_type_link),
- "apis": {
- $(api_name_1): {
- "api_link": $(api_link_1),
- "declaration": [$(swift_declaration), $(objc_declaration)],
- "sub_apis": {
- $(sub_api_name_1): {"declaration": [$(swift_declaration)]},
- $(sub_api_name_2): {"declaration": [$(objc_declaration)]},
- ...
- }
- },
- $(api_name_2): {
- ...
- },
- }
- },
- $(api_type_2): {
- ..
- },
- }
- """
- module_api_container = {}
- # Read the HTML content from the file
- index_link = f'{api_doc_path}/index.html'
- with open(index_link, 'r') as file:
- html_content = file.read()
-
- # Parse the HTML content
- soup = BeautifulSoup(html_content, 'html.parser')
-
- # Locate the element with class="nav-groups"
- nav_groups_element = soup.find('ul', class_='nav-groups')
- # Extract data and convert to JSON format
- for nav_group in nav_groups_element.find_all('li', class_='nav-group-name'):
- api_type = nav_group.find('a').text
- api_type_link = nav_group.find('a')['href']
-
- apis = {}
- for nav_group_task in nav_group.find_all('li', class_='nav-group-task'):
- api_name = nav_group_task.find('a').text
- api_link = nav_group_task.find('a')['href']
- apis[api_name] = {'api_link': api_link, 'declaration': [], 'sub_apis': {}}
-
- module_api_container[api_type] = {
- 'api_type_link': api_type_link,
- 'apis': apis
- }
-
- parse_api(api_doc_path, module_api_container)
-
- return module_api_container
-
-
-def parse_api(doc_path, module_api_container):
- """Parse API html and extract necessary information.
-
- e.g. ${module}/Classes.html
- """
- for api_type, api_type_abstract in module_api_container.items():
- api_type_link = f'{doc_path}/{unquote(api_type_abstract["api_type_link"])}'
- api_data_container = module_api_container[api_type]['apis']
- with open(api_type_link, 'r') as file:
- html_content = file.read()
-
- # Parse the HTML content
- soup = BeautifulSoup(html_content, 'html.parser')
- for api in soup.find('div', class_='task-group').find_all('li',
- class_='item'):
- api_name = api.find('a', class_='token').text
- for api_declaration in api.find_all('div', class_='language'):
- api_declaration_text = ' '.join(api_declaration.stripped_strings)
- api_data_container[api_name]['declaration'].append(api_declaration_text)
-
- for api, api_abstruct in api_type_abstract['apis'].items():
- if api_abstruct['api_link'].endswith('.html'):
- parse_sub_api(f'{doc_path}/{unquote(api_abstruct["api_link"])}',
- api_data_container[api]['sub_apis'])
-
-
-def parse_sub_api(api_link, sub_api_data_container):
- """Parse SUB_API html and extract necessary information.
-
- e.g. ${module}/Classes/${class_name}.html
- """
- with open(api_link, 'r') as file:
- html_content = file.read()
-
- # Parse the HTML content
- soup = BeautifulSoup(html_content, 'html.parser')
- for s_api_group in soup.find_all('div', class_='task-group'):
- for s_api in s_api_group.find_all('li', class_='item'):
- api_name = s_api.find('a', class_='token').text
- sub_api_data_container[api_name] = {'declaration': []}
- for api_declaration in s_api.find_all('div', class_='language'):
- api_declaration_text = ' '.join(api_declaration.stripped_strings)
- sub_api_data_container[api_name]['declaration'].append(
- api_declaration_text)
-
-
-def parse_cmdline_args():
- parser = argparse.ArgumentParser()
- parser.add_argument('-f', '--file_list', nargs='+', default=[])
- parser.add_argument('-o', '--output_dir', default='output_dir')
-
- args = parser.parse_args()
- return args
-
-
-if __name__ == '__main__':
- main()
diff --git a/scripts/api_diff_report/icore_module.py b/scripts/api_diff_report/icore_module.py
deleted file mode 100644
index 47ef00b..0000000
--- a/scripts/api_diff_report/icore_module.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import logging
-import json
-import subprocess
-
-SWIFT = 'Swift'
-OBJECTIVE_C = 'Objective-C'
-
-# List of Swift and Objective-C modules
-MODULE_LIST = [
- 'FirebaseABTesting',
- 'FirebaseAnalytics', # Not buildable from source
- 'FirebaseAnalyticsOnDeviceConversion', # Not buildable.
- 'FirebaseAnalyticsSwift',
- 'FirebaseAppCheck',
- 'FirebaseAppDistribution',
- 'FirebaseAuth',
- 'FirebaseCore',
- 'FirebaseCrashlytics',
- 'FirebaseDatabase',
- 'FirebaseDatabaseSwift',
- 'FirebaseDynamicLinks',
- 'FirebaseFirestore',
- 'FirebaseFirestoreSwift',
- 'FirebaseFunctions',
- 'FirebaseInAppMessaging'
- 'FirebaseInAppMessagingSwift',
- 'FirebaseInstallations',
- 'FirebaseMessaging',
- 'FirebaseMLModelDownloader',
- 'FirebasePerformance',
- 'FirebaseRemoteConfig',
- 'FirebaseRemoteConfigSwift',
- # Not buildable. No scheme named "FirebaseSharedSwift"
- 'FirebaseSharedSwift',
- 'FirebaseStorage',
- # Not buildable. NO "source_files"
- 'GoogleAppMeasurement',
- # Not buildable. NO "source_files"
- 'GoogleAppMeasurementOnDeviceConversion'
-]
-
-
-def main():
- module_info()
-
-
-def detect_changed_modules(changed_api_files):
- """Detect changed modules based on changed API files."""
- all_modules = module_info()
- changed_modules = {}
- for file_path in changed_api_files:
- for k, v in all_modules.items():
- if v['root_dir'] and v['root_dir'] in file_path:
- changed_modules[k] = v
- break
-
- logging.info(f'changed_modules:\n{json.dumps(changed_modules, indent=4)}')
- return changed_modules
-
-
-def module_info():
- """retrieve module info in MODULE_LIST from `.podspecs`
- The module info helps to build Jazzy
- includes: module name, source_files, public_header_files,
- language, umbrella_header, framework_root
- """
- module_from_podspecs = module_info_from_podspecs()
- module_list = {}
- for k, v in module_from_podspecs.items():
- if k in MODULE_LIST:
- if k not in module_list:
- module_list[k] = v
- module_list[k]['language'] = OBJECTIVE_C if v.get(
- 'public_header_files') else SWIFT
- module_list[k]['scheme'] = get_scheme(k)
- module_list[k]['umbrella_header'] = get_umbrella_header(
- k, v.get('public_header_files'))
- module_list[k]['root_dir'] = get_root_dir(k, v.get('source_files'))
-
- logging.info(f'all_module:\n{json.dumps(module_list, indent=4)}')
- return module_list
-
-
-def get_scheme(module_name):
- """Jazzy documentation Info SWIFT only.
-
- Get scheme from module name in .podspecs Assume the scheme is the
- same as the module name:
- """
- MODULE_SCHEME_PATCH = {
- 'FirebaseInAppMessagingSwift': 'FirebaseInAppMessagingSwift-Beta',
- }
- if module_name in MODULE_SCHEME_PATCH:
- return MODULE_SCHEME_PATCH[module_name]
- return module_name
-
-
-def get_umbrella_header(module_name, public_header_files):
- """Jazzy documentation Info OBJC only Get umbrella_header from
- public_header_files in .podspecs Assume the umbrella_header is with the
- format:
-
- {module_name}/Sources/Public/{module_name}/{module_name}.h
- """
- if public_header_files:
- if isinstance(public_header_files, list):
- return public_header_files[0].replace('*', module_name)
- elif isinstance(public_header_files, str):
- return public_header_files.replace('*', module_name)
- return ''
-
-
-def get_root_dir(module_name, source_files):
- """Get source code root_dir from source_files in .podspecs Assume the
- root_dir is with the format:
-
- {module_name}/Sources or {module_name}/Source
- """
- MODULE_ROOT_PATCH = {
- 'FirebaseFirestore': 'Firestore/Source',
- 'FirebaseFirestoreSwift': 'Firestore/Swift/Source',
- 'FirebaseCrashlytics': 'Crashlytics/Crashlytics',
- 'FirebaseInAppMessagingSwift': 'FirebaseInAppMessaging/Swift/Source',
- }
- if module_name in MODULE_ROOT_PATCH:
- return MODULE_ROOT_PATCH[module_name]
- if source_files:
- for source_file in source_files:
- if f'{module_name}/Sources' in source_file:
- return f'{module_name}/Sources'
- if f'{module_name}/Source' in source_file:
- return f'{module_name}/Source'
- return ''
-
-
-def module_info_from_podspecs(root_dir=os.getcwd()):
- result = {}
- for filename in os.listdir(root_dir):
- if filename.endswith('.podspec'):
- podspec_data = parse_podspec(filename)
- source_files = podspec_data.get('source_files')
- if not podspec_data.get('source_files') and podspec_data.get('ios'):
- source_files = podspec_data.get('ios').get('source_files')
- result[podspec_data['name']] = {
- 'name': podspec_data['name'],
- 'source_files': source_files,
- 'public_header_files': podspec_data.get('public_header_files')
- }
- return result
-
-
-def parse_podspec(podspec_file):
- result = subprocess.run(f'pod ipc spec {podspec_file}',
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- text=True,
- shell=True)
- if result.returncode != 0:
- logging.info(f'Error: {result.stderr}')
- return None
-
- # Parse the JSON output
- podspec_data = json.loads(result.stdout)
- return podspec_data
-
-
-if __name__ == '__main__':
- main()
diff --git a/scripts/api_diff_report/pr_commenter.py b/scripts/api_diff_report/pr_commenter.py
deleted file mode 100644
index 3b3dda7..0000000
--- a/scripts/api_diff_report/pr_commenter.py
+++ /dev/null
@@ -1,215 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the 'License');
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an 'AS IS' BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import json
-import logging
-import requests
-import argparse
-import api_diff_report
-import datetime
-import pytz
-
-from requests.adapters import HTTPAdapter
-from requests.packages.urllib3.util.retry import Retry
-
-STAGES_PROGRESS = "progress"
-STAGES_END = "end"
-
-TITLE_PROGESS = "## ⏳ Detecting API diff in progress...\n"
-TITLE_END_DIFF = '## Apple API Diff Report\n'
-TITLE_END_NO_DIFF = "## ✅ No API diff detected\n"
-
-COMMENT_HIDDEN_IDENTIFIER = '\r\n\r\n'
-GITHUB_API_URL = 'https://api.github.com/repos/firebase/firebase-ios-sdk'
-PR_LABEL = "public-api-change"
-
-
-def main():
- logging.getLogger().setLevel(logging.INFO)
-
- # Parse command-line arguments
- args = parse_cmdline_args()
-
- stage = args.stage
- token = args.token
- pr_number = args.pr_number
- commit = args.commit
- run_id = args.run_id
-
- report = ""
- comment_id = get_comment_id(token, pr_number, COMMENT_HIDDEN_IDENTIFIER)
- if stage == STAGES_PROGRESS:
- if comment_id:
- report = COMMENT_HIDDEN_IDENTIFIER
- report += generate_markdown_title(TITLE_PROGESS, commit, run_id)
- update_comment(token, comment_id, report)
- delete_label(token, pr_number, PR_LABEL)
- elif stage == STAGES_END:
- diff_report_file = os.path.join(os.path.expanduser(args.report),
- api_diff_report.API_DIFF_FILE_NAME)
- with open(diff_report_file, 'r') as file:
- report_content = file.read()
- if report_content: # Diff detected
- report = COMMENT_HIDDEN_IDENTIFIER + generate_markdown_title(
- TITLE_END_DIFF, commit, run_id) + report_content
- if comment_id:
- update_comment(token, comment_id, report)
- else:
- add_comment(token, pr_number, report)
- add_label(token, pr_number, PR_LABEL)
- else: # No diff
- if comment_id:
- report = COMMENT_HIDDEN_IDENTIFIER + generate_markdown_title(
- TITLE_END_NO_DIFF, commit, run_id)
- update_comment(token, comment_id, report)
- delete_label(token, pr_number, PR_LABEL)
-
-
-def generate_markdown_title(title, commit, run_id):
- pst_now = datetime.datetime.utcnow().astimezone(
- pytz.timezone('America/Los_Angeles'))
- return (
- title + 'Commit: %s\n' % commit
- + 'Last updated: %s \n' % pst_now.strftime('%a %b %e %H:%M %Z %G')
- + '**[View workflow logs & download artifacts]'
- + '(https://github.com/firebase/firebase-ios-sdk/actions/runs/%s)**\n\n'
- % run_id + '-----\n')
-
-
-RETRIES = 3
-BACKOFF = 5
-RETRY_STATUS = (403, 500, 502, 504)
-TIMEOUT = 5
-
-
-def requests_retry_session(retries=RETRIES,
- backoff_factor=BACKOFF,
- status_forcelist=RETRY_STATUS):
- session = requests.Session()
- retry = Retry(total=retries,
- read=retries,
- connect=retries,
- backoff_factor=backoff_factor,
- status_forcelist=status_forcelist)
- adapter = HTTPAdapter(max_retries=retry)
- session.mount('http://', adapter)
- session.mount('https://', adapter)
- return session
-
-
-def get_comment_id(token, issue_number, comment_identifier):
- comments = list_comments(token, issue_number)
- for comment in comments:
- if comment_identifier in comment['body']:
- return comment['id']
- return None
-
-
-def list_comments(token, issue_number):
- """https://docs.github.com/en/rest/reference/issues#list-issue-comments"""
- url = f'{GITHUB_API_URL}/issues/{issue_number}/comments'
- headers = {
- 'Accept': 'application/vnd.github.v3+json',
- 'Authorization': f'token {token}'
- }
- with requests_retry_session().get(url, headers=headers,
- timeout=TIMEOUT) as response:
- logging.info("list_comments: %s response: %s", url, response)
- return response.json()
-
-
-def add_comment(token, issue_number, comment):
- """https://docs.github.com/en/rest/reference/issues#create-an-issue-comment"""
- url = f'{GITHUB_API_URL}/issues/{issue_number}/comments'
- headers = {
- 'Accept': 'application/vnd.github.v3+json',
- 'Authorization': f'token {token}'
- }
- data = {'body': comment}
- with requests.post(url,
- headers=headers,
- data=json.dumps(data),
- timeout=TIMEOUT) as response:
- logging.info("add_comment: %s response: %s", url, response)
-
-
-def update_comment(token, comment_id, comment):
- """https://docs.github.com/en/rest/reference/issues#update-an-issue-comment"""
- url = f'{GITHUB_API_URL}/issues/comments/{comment_id}'
- headers = {
- 'Accept': 'application/vnd.github.v3+json',
- 'Authorization': f'token {token}'
- }
- data = {'body': comment}
- with requests_retry_session().patch(url,
- headers=headers,
- data=json.dumps(data),
- timeout=TIMEOUT) as response:
- logging.info("update_comment: %s response: %s", url, response)
-
-
-def delete_comment(token, comment_id):
- """https://docs.github.com/en/rest/reference/issues#delete-an-issue-comment"""
- url = f'{GITHUB_API_URL}/issues/comments/{comment_id}'
- headers = {
- 'Accept': 'application/vnd.github.v3+json',
- 'Authorization': f'token {token}'
- }
- with requests.delete(url, headers=headers, timeout=TIMEOUT) as response:
- logging.info("delete_comment: %s response: %s", url, response)
-
-
-def add_label(token, issue_number, label):
- """https://docs.github.com/en/rest/reference/issues#add-labels-to-an-issue"""
- url = f'{GITHUB_API_URL}/issues/{issue_number}/labels'
- headers = {
- 'Accept': 'application/vnd.github.v3+json',
- 'Authorization': f'token {token}'
- }
- data = [label]
- with requests.post(url,
- headers=headers,
- data=json.dumps(data),
- timeout=TIMEOUT) as response:
- logging.info("add_label: %s response: %s", url, response)
-
-
-def delete_label(token, issue_number, label):
- """https://docs.github.com/en/rest/reference/issues#delete-a-label"""
- url = f'{GITHUB_API_URL}/issues/{issue_number}/labels/{label}'
- headers = {
- 'Accept': 'application/vnd.github.v3+json',
- 'Authorization': f'token {token}'
- }
- with requests.delete(url, headers=headers, timeout=TIMEOUT) as response:
- logging.info("delete_label: %s response: %s", url, response)
-
-
-def parse_cmdline_args():
- parser = argparse.ArgumentParser()
- parser.add_argument('-s', '--stage')
- parser.add_argument('-r', '--report')
- parser.add_argument('-t', '--token')
- parser.add_argument('-n', '--pr_number')
- parser.add_argument('-c', '--commit')
- parser.add_argument('-i', '--run_id')
-
- args = parser.parse_args()
- return args
-
-
-if __name__ == '__main__':
- main()
diff --git a/scripts/binary_to_array.py b/scripts/binary_to_array.py
deleted file mode 100755
index 3101806..0000000
--- a/scripts/binary_to_array.py
+++ /dev/null
@@ -1,313 +0,0 @@
-#!/usr/bin/env python2
-
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""Utility to convert binary data into a C/C++ array.
-
-Usage: %s --input=input_file.bin [--output_source=output_source.cc]
- [--output_header=output_header.h] [--cpp_namespace=namespace]
- [--header_guard=HEADER_GUARD_TEXT] [--array=array_c_identifier]
- [--array_size=array_size_c_identifier] [--filename=override_filename]
- [--filename_identifier=filename_c_identifier]
-
-By default, the output source file will be named the same as the input file,
-but with .cc as the extension; the output header file will be named the
-same as the input file but with .h as the extension.
-
-By default, the data will be in an array named $NAME_data and the size will
-be in a constant named $NAME_length, and the filename will be stored in
-$NAME_filename. In all these cases, $NAME is the input filename (sans path and
-extension) with runs of non-alphanumeric characters changed to underscores. The
-header guard will be generated from the output header filename in a similar way.
-
-By default, the data will be placed in the root namespace. If the data is placed
-in the root namespace, it will be declared as a C array (using extern "C" if
-compiled in C++ mode).
-
-The actual size of $NAME_data is $NAME_length + 1, where it contains an extra
-0x00 at the end. When data is actually text, $NAME_data can be used as a valid C
-string directly.
-"""
-
-from os import path
-from re import sub
-import argparse
-import logging
-import os
-
-arg_parser = argparse.ArgumentParser()
-
-arg_parser.add_argument("input",
- help="Input file containing binary data to embed.")
-arg_parser.add_argument("--output_source",
- help="Output source file, defining the array data.")
-arg_parser.add_argument("--output_header",
- help="Output header file, declaring the array data.")
-arg_parser.add_argument("--array", help="Identifier for the array.")
-arg_parser.add_argument("--array_size", help="Identifier for the array size.")
-arg_parser.add_argument("--filename", help="Override file name in code.")
-arg_parser.add_argument("--filename_identifier",
- help="Where to put the filename.")
-arg_parser.add_argument("--header_guard",
- help="Header guard to #define in the output header.")
-arg_parser.add_argument("--cpp_namespace",
- help="C++ namespace to use. "
- "If blank, will generate a C array.")
-
-# How many hex bytes to display in a line. Each "0x00, " takes 6 characters, so
-# a width of 12 lets us fit within 80 characters.
-WIDTH = 12
-
-
-def header(header_guard, namespaces, array_name, array_size_name, fileid):
- """Return a C/C++ header for the given array.
-
- Args:
- header_guard: Name of the HEADER_GUARD to define.
- namespaces: List of namespaces, outer to inner.
- array_name: Name of the array.
- array_size_name: Name of the array size constant.
- fileid: Name of the identifier containing the file name.
-
- Returns:
- A list of strings containing the C/C++ header file, line-by-line.
- """
-
- data = []
- data.extend([
- "// Copyright 2019 Google Inc. All Rights Reserved.",
- "",
- "#ifndef %s" % header_guard,
- "#define %s" % header_guard,
- "",
- "#include ",
- ""
- ])
- if namespaces:
- data.extend([
- "namespace %s {" % ns for ns in namespaces
- ])
- else:
- data.extend([
- "#if defined(__cplusplus)",
- "extern \"C\" {",
- "#endif // defined(__cplusplus)"])
-
- data.extend([
- "",
- "extern const size_t %s;" % array_size_name,
- "extern const unsigned char %s[];" % array_name,
- "extern const char %s[];" % fileid,
- ])
-
- data.extend([
- ""
- ])
- if namespaces:
- data.extend([
- "} // namespace %s" % ns for ns in reversed(namespaces)
- ])
- else:
- data.extend([
- "#if defined(__cplusplus)",
- "} // extern \"C\"",
- "#endif // defined(__cplusplus)"
- ])
- data.extend([
- "",
- "#endif // %s" % header_guard,
- ""
- ])
- return data
-
-
-def source(namespaces, array_name, array_size_name, fileid, filename,
- input_bytes, include_name):
- """Return a C/C++ source file for the given array.
-
- Args:
- namespaces: List of namespaces, outer to inner.
- array_name: Name of the array.
- array_size_name: Name of the array size constant.
- fileid: Name of the identifier containing the filename.
- filename: The original data filename itself.
- input_bytes: Binary data to put into the array.
- include_name: Name of the corresponding header file to include.
-
- Returns:
- A string containing the C/C++ source file.
- """
-
- if os.name == 'nt':
- # Force forward slashes on Windows
- include_name = include_name.replace('\\', '/')
-
- data = []
- data.extend([
- "// Copyright 2019 Google Inc. All Rights Reserved.",
- "",
- "#include \"%s\"" % include_name,
- "",
- "#include ",
- ""
- ])
- if namespaces:
- data.extend([
- "namespace %s {" % ns for ns in namespaces
- ])
- else:
- data.extend([
- "#if defined(__cplusplus)",
- "extern \"C\" {",
- "#endif // defined(__cplusplus)"])
-
- data.extend([
- "",
- "extern const size_t %s;" % array_size_name,
- "extern const char %s[];" % fileid,
- "extern const unsigned char %s[];" % array_name, "",
- "const unsigned char %s[] = {" % array_name
- ])
- length = len(input_bytes)
- line = ""
- for idx in range(0, length):
- if idx % WIDTH == 0:
- line += " "
- else:
- line += " "
- line += "0x%02x," % input_bytes[idx]
- if idx % WIDTH == WIDTH - 1:
- data.append(line)
- line = ""
- data.append(line)
- data.append(" 0x00 // Extra \\0 to make it a C string")
-
- data.extend([
- "};",
- "",
- "const size_t %s =" % array_size_name,
- " sizeof(%s) - 1;" % array_name,
- "",
- "const char %s[] = \"%s\";" % (fileid, filename),
- "",
- ])
-
- if namespaces:
- data.extend([
- "} // namespace %s" % ns for ns in namespaces
- ][::-1]) # close namespaces in reverse order
- else:
- data.extend([
- "#if defined(__cplusplus)",
- "} // extern \"C\"",
- "#endif // defined(__cplusplus)"
- ])
- data.extend([
- ""
- ])
- return data
-
-
-def _get_repo_root():
- """Returns the root of the source repository.
- """
-
- scripts_dir = os.path.abspath(os.path.dirname(__file__))
- assert os.path.basename(scripts_dir) == 'scripts'
-
- root_dir = os.path.dirname(scripts_dir)
- assert os.path.isdir(os.path.join(root_dir, '.github'))
-
- return root_dir
-
-
-def main():
- """Read an binary input file and output to a C/C++ source file as an array.
- """
-
- args = arg_parser.parse_args()
-
- input_file = args.input
- input_file_base = os.path.splitext(args.input)[0]
-
- output_source = args.output_source
- if not output_source:
- output_source = input_file_base + ".cc"
- logging.debug("Using default --output_source='%s'", output_source)
-
- output_header = args.output_header
- if not output_header:
- output_header = input_file_base + ".h"
- logging.debug("Using default --output_header='%s'", output_header)
-
- root_dir = _get_repo_root()
- absolute_dir = path.dirname(output_header)
-
- relative_dir = path.relpath(absolute_dir, root_dir)
- relative_header_path = path.join(relative_dir, path.basename(output_header))
-
- identifier_base = sub("[^0-9a-zA-Z]+", "_", path.basename(input_file_base))
- array_name = args.array
- if not array_name:
- array_name = identifier_base + "_data"
- logging.debug("Using default --array='%s'", array_name)
-
- array_size_name = args.array_size
- if not array_size_name:
- array_size_name = identifier_base + "_size"
- logging.debug("Using default --array_size='%s'", array_size_name)
-
- fileid = args.filename_identifier
- if not fileid:
- fileid = identifier_base + "_filename"
- logging.debug("Using default --filename_identifier='%s'", fileid)
-
- filename = args.filename
- if filename is None: # but not if it's the empty string
- filename = path.basename(input_file)
- logging.debug("Using default --filename='%s'", filename)
-
- header_guard = args.header_guard
- if not header_guard:
- header_guard = sub("[^0-9a-zA-Z]+", "_", relative_header_path).upper() + '_'
- # Avoid double underscores to stay compliant with the Standard.
- header_guard = sub("[_]+", "_", header_guard)
- logging.debug("Using default --header_guard='%s'", header_guard)
-
- namespace = args.cpp_namespace
- namespaces = namespace.split("::") if namespace else []
-
- with open(input_file, "rb") as infile:
- input_bytes = bytearray(infile.read())
- logging.debug("Read %d bytes from %s", len(input_bytes), input_file)
-
- header_text = "\n".join(header(header_guard, namespaces, array_name,
- array_size_name, fileid))
- source_text = "\n".join(source(namespaces, array_name, array_size_name,
- fileid, filename, input_bytes,
- relative_header_path))
-
- with open(output_header, "w") as hdr:
- hdr.write(header_text)
- logging.debug("Wrote header file %s", output_header)
-
- with open(output_source, "w") as src:
- src.write(source_text)
- logging.debug("Wrote source file %s", output_source)
-
-
-if __name__ == "__main__":
- main()
diff --git a/scripts/build.sh b/scripts/build.sh
deleted file mode 100755
index f44d3d2..0000000
--- a/scripts/build.sh
+++ /dev/null
@@ -1,701 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2018 Google
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# USAGE: build.sh product [platform] [method]
-#
-# Builds the given product for the given platform using the given build method
-
-function pod_gen() {
- # Call pod gen with a podspec and additional optional arguments.
- bundle exec pod gen --local-sources=./ --sources=https://github.com/firebase/SpecsDev.git,https://github.com/firebase/SpecsStaging.git,https://cdn.cocoapods.org/ "$@"
-}
-
-set -euo pipefail
-
-if [[ $# -lt 1 ]]; then
- cat 1>&2 <&2
- sleep 5
-
- result=0
- xcodebuild "$@" | tee xcodebuild.log | "${xcpretty_cmd[@]}" || result=$?
- fi
-
- if [[ $result != 0 ]]; then
- echo "xcodebuild exited with $result" 1>&2
-
- ExportLogs "$@"
- return $result
- fi
-}
-
-# Exports any logs output captured in the xcresult
-function ExportLogs() {
- python "${scripts_dir}/xcresult_logs.py" "$@"
-}
-
-if [[ "$xcode_major" -lt 15 ]]; then
- ios_flags=(
- -sdk 'iphonesimulator'
- -destination 'platform=iOS Simulator,name=iPhone 14'
- )
-else
- ios_flags=(
- -sdk 'iphonesimulator'
- -destination 'platform=iOS Simulator,name=iPhone 15'
- )
-fi
-
-ios_device_flags=(
- -sdk 'iphoneos'
- -destination 'generic/platform=iOS'
-)
-
-ipad_flags=(
- -sdk 'iphonesimulator'
- -destination 'platform=iOS Simulator,name=iPad Pro (9.7-inch)'
-)
-
-macos_flags=(
- -sdk 'macosx'
- -destination 'platform=OS X,arch=x86_64'
-)
-tvos_flags=(
- -sdk "appletvsimulator"
- -destination 'platform=tvOS Simulator,name=Apple TV'
-)
-watchos_flags=(
- -destination 'platform=watchOS Simulator,name=Apple Watch Series 7 (45mm)'
-)
-visionos_flags=(
- -destination 'platform=visionOS Simulator'
-)
-catalyst_flags=(
- ARCHS=x86_64 VALID_ARCHS=x86_64 SUPPORTS_MACCATALYST=YES -sdk macosx
- -destination platform="macOS,variant=Mac Catalyst,arch=x86_64" TARGETED_DEVICE_FAMILY=2
- CODE_SIGN_IDENTITY=- CODE_SIGNING_REQUIRED=NO CODE_SIGNING_ALLOWED=NO
-)
-
-# Compute standard flags for all platforms
-case "$platform" in
- iOS)
- xcb_flags=("${ios_flags[@]}")
- gen_platform=ios
- ;;
-
- iOS-device)
- xcb_flags=("${ios_device_flags[@]}")
- gen_platform=ios
- ;;
-
- iPad)
- xcb_flags=("${ipad_flags[@]}")
- ;;
-
- macOS)
- xcb_flags=("${macos_flags[@]}")
- gen_platform=macos
- ;;
-
- tvOS)
- xcb_flags=("${tvos_flags[@]}")
- gen_platform=tvos
- ;;
-
- watchOS)
- xcb_flags=("${watchos_flags[@]}")
- ;;
-
- visionOS)
- xcb_flags=("${visionos_flags[@]}")
- ;;
-
- catalyst)
- xcb_flags=("${catalyst_flags[@]}")
- ;;
-
- all)
- xcb_flags=()
- ;;
-
- Linux)
- xcb_flags=()
- ;;
-
- *)
- echo "Unknown platform '$platform'" 1>&2
- exit 1
- ;;
-esac
-
-xcb_flags+=(
- ONLY_ACTIVE_ARCH=YES
- CODE_SIGNING_REQUIRED=NO
- CODE_SIGNING_ALLOWED=YES
- COMPILER_INDEX_STORE_ENABLE=NO
-)
-
-source scripts/buildcache.sh
-xcb_flags=("${xcb_flags[@]}" "${buildcache_xcb_flags[@]}")
-
-# TODO(varconst): Add --warn-unused-vars and --warn-uninitialized.
-# Right now, it makes the log overflow on Travis because many of our
-# dependencies don't build cleanly this way.
-cmake_options=(
- -Wdeprecated
- -DCMAKE_BUILD_TYPE=Debug
-)
-
-if [[ -n "${SANITIZERS:-}" ]]; then
- for sanitizer in $SANITIZERS; do
- case "$sanitizer" in
- asan)
- xcb_flags+=(
- -enableAddressSanitizer YES
- )
- cmake_options+=(
- -DWITH_ASAN=ON
- )
- ;;
-
- tsan)
- xcb_flags+=(
- -enableThreadSanitizer YES
- )
- cmake_options+=(
- -DWITH_TSAN=ON
- )
- ;;
-
- ubsan)
- xcb_flags+=(
- -enableUndefinedBehaviorSanitizer YES
- )
- cmake_options+=(
- -DWITH_UBSAN=ON
- )
- ;;
-
- *)
- echo "Unknown sanitizer '$sanitizer'" 1>&2
- exit 1
- ;;
- esac
- done
-fi
-
-
-case "$product-$platform-$method" in
- FirebasePod-iOS-*)
- RunXcodebuild \
- -workspace 'CoreOnly/Tests/FirebasePodTest/FirebasePodTest.xcworkspace' \
- -scheme "FirebasePodTest" \
- "${xcb_flags[@]}" \
- build
- ;;
-
- Auth-*-xcodebuild)
- if check_secrets; then
- RunXcodebuild \
- -workspace 'FirebaseAuth/Tests/Sample/AuthSample.xcworkspace' \
- -scheme "Auth_ApiTests" \
- "${xcb_flags[@]}" \
- test
-
- RunXcodebuild \
- -workspace 'FirebaseAuth/Tests/Sample/AuthSample.xcworkspace' \
- -scheme "SwiftApiTests" \
- "${xcb_flags[@]}" \
- test
- fi
- ;;
-
- CombineSwift-*-xcodebuild)
- pod_gen FirebaseCombineSwift.podspec --platforms=ios
- RunXcodebuild \
- -workspace 'gen/FirebaseCombineSwift/FirebaseCombineSwift.xcworkspace' \
- -scheme "FirebaseCombineSwift-Unit-unit" \
- "${xcb_flags[@]}" \
- build \
- test
- ;;
-
- InAppMessaging-*-xcodebuild)
- RunXcodebuild \
- -workspace 'FirebaseInAppMessaging/Tests/Integration/DefaultUITestApp/InAppMessagingDisplay-Sample.xcworkspace' \
- -scheme 'FiamDisplaySwiftExample' \
- "${xcb_flags[@]}" \
- test
- ;;
-
- Firestore-*-xcodebuild)
- "${firestore_emulator}" start
- trap '"${firestore_emulator}" stop' ERR EXIT
-
- RunXcodebuild \
- -workspace 'Firestore/Example/Firestore.xcworkspace' \
- -scheme "Firestore_IntegrationTests_$platform" \
- -enableCodeCoverage YES \
- "${xcb_flags[@]}" \
- test
- ;;
-
- Firestore-macOS-cmake | Firestore-Linux-cmake)
- "${firestore_emulator}" start
- trap '"${firestore_emulator}" stop' ERR EXIT
-
- (
- test -d build || mkdir build
- cd build
-
- echo "Preparing cmake build ..."
- cmake -G Ninja "${cmake_options[@]}" ..
-
- echo "Building cmake build ..."
- ninja -k 10 all
- ctest --verbose
- )
- ;;
-
- SymbolCollision-*-*)
- RunXcodebuild \
- -workspace 'SymbolCollisionTest/SymbolCollisionTest.xcworkspace' \
- -scheme "SymbolCollisionTest" \
- "${xcb_flags[@]}" \
- build
- ;;
-
- # TODO(#12205) Restore this test to "test" instead of "build"
- Messaging-*-xcodebuild)
- pod_gen FirebaseMessaging.podspec --platforms=ios
-
- # Add GoogleService-Info.plist to generated Test Wrapper App.
- ruby ./scripts/update_xcode_target.rb gen/FirebaseMessaging/Pods/Pods.xcodeproj \
- AppHost-FirebaseMessaging-Unit-Tests \
- ../../../FirebaseMessaging/Tests/IntegrationTests/Resources/GoogleService-Info.plist
-
- if check_secrets; then
- # Integration tests are only run on iOS to minimize flake failures.
- RunXcodebuild \
- -workspace 'gen/FirebaseMessaging/FirebaseMessaging.xcworkspace' \
- -scheme "FirebaseMessaging-Unit-integration" \
- "${ios_flags[@]}" \
- "${xcb_flags[@]}" \
- build
- fi
- ;;
-
- MessagingSample-*-*)
- if check_secrets; then
- RunXcodebuild \
- -workspace 'FirebaseMessaging/Apps/Sample/Sample.xcworkspace' \
- -scheme "Sample" \
- "${xcb_flags[@]}" \
- build
- fi
- ;;
-
- SwiftUISample-*-*)
- if check_secrets; then
- RunXcodebuild \
- -workspace 'FirebaseMessaging/Apps/SwiftUISample/SwiftUISample.xcworkspace' \
- -scheme "SwiftUISample" \
- "${xcb_flags[@]}" \
- build
- fi
- ;;
-
- MessagingSampleStandaloneWatchApp-*-*)
- if check_secrets; then
- RunXcodebuild \
- -workspace 'FirebaseMessaging/Apps/SampleStandaloneWatchApp/SampleStandaloneWatchApp.xcworkspace' \
- -scheme "SampleStandaloneWatchApp Watch App" \
- "${xcb_flags[@]}" \
- build
- fi
- ;;
-
- MLModelDownloaderSample-*-*)
- if check_secrets; then
- RunXcodebuild \
- -workspace 'FirebaseMLModelDownloader/Apps/Sample/MLDownloaderTestApp.xcworkspace' \
- -scheme "MLDownloaderTestApp" \
- "${xcb_flags[@]}" \
- build
- fi
- ;;
-
- WatchOSSample-*-*)
- RunXcodebuild \
- -workspace 'Example/watchOSSample/SampleWatchApp.xcworkspace' \
- -scheme "SampleWatchAppWatchKitApp" \
- "${xcb_flags[@]}" \
- build
- ;;
-
- Database-*-integration)
- "${database_emulator}" start
- trap '"${database_emulator}" stop' ERR EXIT
- pod_gen FirebaseDatabase.podspec --platforms="${gen_platform}"
-
- RunXcodebuild \
- -workspace 'gen/FirebaseDatabase/FirebaseDatabase.xcworkspace' \
- -scheme "FirebaseDatabase-Unit-integration" \
- "${xcb_flags[@]}" \
- build \
- test
- ;;
-
- RemoteConfig-*-fakeconsole)
- pod_gen FirebaseRemoteConfigSwift.podspec --platforms="${gen_platform}"
-
- RunXcodebuild \
- -workspace 'gen/FirebaseRemoteConfigSwift/FirebaseRemoteConfigSwift.xcworkspace' \
- -scheme "FirebaseRemoteConfigSwift-Unit-fake-console-tests" \
- "${xcb_flags[@]}" \
- build \
- test
- ;;
-
- RemoteConfig-*-integration)
- pod_gen FirebaseRemoteConfigSwift.podspec --platforms="${gen_platform}"
-
- # Add GoogleService-Info.plist to generated Test Wrapper App.
- ruby ./scripts/update_xcode_target.rb gen/FirebaseRemoteConfigSwift/Pods/Pods.xcodeproj \
- AppHost-FirebaseRemoteConfigSwift-Unit-Tests \
- ../../../FirebaseRemoteConfigSwift/Tests/SwiftAPI/GoogleService-Info.plist
-
- # Add AccessToken to generated Test Wrapper App.
- ruby ./scripts/update_xcode_target.rb gen/FirebaseRemoteConfigSwift/Pods/Pods.xcodeproj \
- AppHost-FirebaseRemoteConfigSwift-Unit-Tests \
- ../../../FirebaseRemoteConfigSwift/Tests/AccessToken.json
-
- RunXcodebuild \
- -workspace 'gen/FirebaseRemoteConfigSwift/FirebaseRemoteConfigSwift.xcworkspace' \
- -scheme "FirebaseRemoteConfigSwift-Unit-swift-api-tests" \
- "${xcb_flags[@]}" \
- build \
- test
- ;;
-
- RemoteConfigSample-*-*)
- RunXcodebuild \
- -workspace 'FirebaseRemoteConfig/Tests/Sample/RemoteConfigSampleApp.xcworkspace' \
- -scheme "RemoteConfigSampleApp" \
- "${xcb_flags[@]}" \
- build
- ;;
-
- Sessions-*-integration)
- # Perform "pod install" to install the relevant dependencies
- # ./FirebaseSessions/generate_testapp.sh
- pod_gen FirebaseSessions.podspec --platforms=ios --clean
- cd FirebaseSessions/Tests/TestApp; pod install; cd -
-
- # Run E2E Integration Tests for Prod.
- RunXcodebuild \
- -workspace 'FirebaseSessions/Tests/TestApp/AppQualityDevApp.xcworkspace' \
- -scheme "AppQualityDevApp_iOS" \
- "${ios_flags[@]}" \
- "${xcb_flags[@]}" \
- build \
- test
-
- # Run E2E Integration Tests for Staging.
- RunXcodebuild \
- -workspace 'FirebaseSessions/Tests/TestApp/AppQualityDevApp.xcworkspace' \
- -scheme "AppQualityDevApp_iOS" \
- FirebaseSessionsRunEnvironment=STAGING \
- "${ios_flags[@]}" \
- "${xcb_flags[@]}" \
- build \
- test
-
- # Run E2E Integration Tests for Autopush.
- RunXcodebuild \
- -workspace 'FirebaseSessions/Tests/TestApp/AppQualityDevApp.xcworkspace' \
- -scheme "AppQualityDevApp_iOS" \
- FirebaseSessionsRunEnvironment=AUTOPUSH \
- "${ios_flags[@]}" \
- "${xcb_flags[@]}" \
- build \
- test
- ;;
-
- StorageSwift-*-xcodebuild)
- pod_gen FirebaseStorage.podspec --platforms=ios
-
- # Add GoogleService-Info.plist to generated Test Wrapper App.
- ruby ./scripts/update_xcode_target.rb gen/FirebaseStorage/Pods/Pods.xcodeproj \
- AppHost-FirebaseStorage-Unit-Tests \
- ../../../FirebaseStorage/Tests/Integration/Resources/GoogleService-Info.plist
-
- if check_secrets; then
- # Integration tests are only run on iOS to minimize flake failures.
- RunXcodebuild \
- -workspace 'gen/FirebaseStorage/FirebaseStorage.xcworkspace' \
- -scheme "FirebaseStorage-Unit-integration" \
- "${ios_flags[@]}" \
- "${xcb_flags[@]}" \
- test
- fi
- ;;
-
- StorageObjC-*-xcodebuild)
- pod_gen FirebaseStorage.podspec --platforms=ios
-
- # Add GoogleService-Info.plist to generated Test Wrapper App.
- ruby ./scripts/update_xcode_target.rb gen/FirebaseStorage/Pods/Pods.xcodeproj \
- AppHost-FirebaseStorage-Unit-Tests \
- ../../../FirebaseStorage/Tests/Integration/Resources/GoogleService-Info.plist
-
- if check_secrets; then
- # Integration tests are only run on iOS to minimize flake failures.
- RunXcodebuild \
- -workspace 'gen/FirebaseStorage/FirebaseStorage.xcworkspace' \
- -scheme "FirebaseStorage-Unit-ObjCIntegration" \
- "${ios_flags[@]}" \
- "${xcb_flags[@]}" \
- test
- fi
- ;;
-
- StorageCombine-*-xcodebuild)
- pod_gen FirebaseCombineSwift.podspec --platforms=ios
-
- # Add GoogleService-Info.plist to generated Test Wrapper App.
- ruby ./scripts/update_xcode_target.rb gen/FirebaseCombineSwift/Pods/Pods.xcodeproj \
- AppHost-FirebaseCombineSwift-Unit-Tests \
- ../../../FirebaseStorage/Tests/Integration/Resources/GoogleService-Info.plist
-
- if check_secrets; then
- # Integration tests are only run on iOS to minimize flake failures.
- RunXcodebuild \
- -workspace 'gen/FirebaseCombineSwift/FirebaseCombineSwift.xcworkspace' \
- -scheme "FirebaseCombineSwift-Unit-integration" \
- "${ios_flags[@]}" \
- "${xcb_flags[@]}" \
- test
- fi
- ;;
-
- GoogleDataTransport-watchOS-xcodebuild)
- RunXcodebuild \
- -workspace 'GoogleDataTransport/GDTWatchOSTestApp/GDTWatchOSTestApp.xcworkspace' \
- -scheme "GDTWatchOSTestAppWatchKitApp" \
- "${xcb_flags[@]}" \
- build
-
- RunXcodebuild \
- -workspace 'GoogleDataTransport/GDTCCTWatchOSTestApp/GDTCCTWatchOSTestApp.xcworkspace' \
- -scheme "GDTCCTWatchOSIndependentTestAppWatchKitApp" \
- "${xcb_flags[@]}" \
- build
-
- RunXcodebuild \
- -workspace 'GoogleDataTransport/GDTCCTWatchOSTestApp/GDTCCTWatchOSTestApp.xcworkspace' \
- -scheme "GDTCCTWatchOSCompanionTestApp" \
- "${xcb_flags[@]}" \
- build
- ;;
-
- Performance-*-unit)
- # Run unit tests on prod environment with unswizzle capabilities.
- export FPR_UNSWIZZLE_AVAILABLE="1"
- export FPR_AUTOPUSH_ENV="0"
- pod_gen FirebasePerformance.podspec --platforms="${gen_platform}"
- RunXcodebuild \
- -workspace 'gen/FirebasePerformance/FirebasePerformance.xcworkspace' \
- -scheme "FirebasePerformance-Unit-unit" \
- "${xcb_flags[@]}" \
- build \
- test
- ;;
-
- Performance-*-proddev)
- # Build the prod dev test app.
- export FPR_UNSWIZZLE_AVAILABLE="0"
- export FPR_AUTOPUSH_ENV="0"
- pod_gen FirebasePerformance.podspec --platforms="${gen_platform}"
- RunXcodebuild \
- -workspace 'gen/FirebasePerformance/FirebasePerformance.xcworkspace' \
- -scheme "FirebasePerformance-TestApp" \
- "${xcb_flags[@]}" \
- build
- ;;
-
- Performance-*-integration)
- # Generate the workspace for the SDK to generate Protobuf files.
- export FPR_UNSWIZZLE_AVAILABLE="0"
- pod_gen FirebasePerformance.podspec --platforms=ios --clean
-
- # Perform "pod install" to install the relevant dependencies
- cd FirebasePerformance/Tests/FIRPerfE2E; pod install; cd -
-
- # Run E2E Integration Tests for Autopush.
- RunXcodebuild \
- -workspace 'FirebasePerformance/Tests/FIRPerfE2E/FIRPerfE2E.xcworkspace' \
- -scheme "FIRPerfE2EAutopush" \
- FPR_AUTOPUSH_ENV=1 \
- "${ios_flags[@]}" \
- "${xcb_flags[@]}" \
- build \
- test
-
- # Run E2E Integration Tests for Prod.
- RunXcodebuild \
- -workspace 'FirebasePerformance/Tests/FIRPerfE2E/FIRPerfE2E.xcworkspace' \
- -scheme "FIRPerfE2EProd" \
- "${ios_flags[@]}" \
- "${xcb_flags[@]}" \
- build \
- test
- ;;
-
- # Note that the combine tests require setting the minimum iOS and tvOS version to 13.0
- *-*-spm)
- RunXcodebuild \
- -scheme $product \
- "${xcb_flags[@]}" \
- IPHONEOS_DEPLOYMENT_TARGET=13.0 \
- TVOS_DEPLOYMENT_TARGET=13.0 \
- test
- ;;
-
- *-*-spmbuildonly)
- RunXcodebuild \
- -scheme $product \
- "${xcb_flags[@]}" \
- build
- ;;
-
- ClientApp-iOS-xcodebuild | ClientApp-iOS13-iOS-xcodebuild)
- RunXcodebuild \
- -project 'IntegrationTesting/ClientApp/ClientApp.xcodeproj' \
- -scheme $product \
- "${xcb_flags[@]}" \
- build
- ;;
-
- ClientApp-CocoaPods*-iOS-xcodebuild)
- RunXcodebuild \
- -workspace 'IntegrationTesting/ClientApp/ClientApp.xcworkspace' \
- -scheme $product \
- "${xcb_flags[@]}" \
- build
- ;;
-
- *)
-
- echo "Don't know how to build this product-platform-method combination" 1>&2
- echo " product=$product" 1>&2
- echo " platform=$platform" 1>&2
- echo " method=$method" 1>&2
- exit 1
- ;;
-
-esac
diff --git a/scripts/build_non_firebase_sdks.sh b/scripts/build_non_firebase_sdks.sh
deleted file mode 100755
index d3544e1..0000000
--- a/scripts/build_non_firebase_sdks.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -ex
-
-cd "${REPO}"/ReleaseTooling
-
-# This file will have non Firebase SDKs that will be built by ZipBuilder.
-ZIP_POD_JSON="non_firebase_sdk.json"
-rm -f "${ZIP_POD_JSON}"
-IFS=' ,' read -a NON_FIREBASE_SDKS <<< "${NON_FIREBASE_SDKS}"
-
-num_sdk="${#NON_FIREBASE_SDKS[@]}"
-echo "[" >> "${ZIP_POD_JSON}"
-for sdk in "${NON_FIREBASE_SDKS[@]}"
-do
- echo "{\"name\":\"${sdk}\"}" >> "${ZIP_POD_JSON}"
- if [ "$num_sdk" -ne 1 ]; then
- echo ",">> "${ZIP_POD_JSON}"
- fi
- num_sdk=$((num_sdk-1))
-done
-echo "]" >> "${ZIP_POD_JSON}"
-mkdir -p "${REPO}"/sdk_zip
-swift run zip-builder --keep-build-artifacts --update-pod-repo --platforms ios \
- --zip-pods "${ZIP_POD_JSON}" --output-dir "${REPO}"/sdk_zip --disable-build-dependencies
-
-unzip -o "${REPO}"/sdk_zip/Frameworks.zip -d "${HOME}"/ios_frameworks/Firebase/
-
-# Move Frameworks to Firebase dir, so be align with Firebase SDKs.
-mv -n "${HOME}"/ios_frameworks/Firebase/Binaries "${HOME}"/ios_frameworks/Firebase/NonFirebaseSDKs/
diff --git a/scripts/build_zip.sh b/scripts/build_zip.sh
deleted file mode 100755
index c71579c..0000000
--- a/scripts/build_zip.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-
-# Copyright 2020 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -x
-REPO=`pwd`
-
-if [[ $# -lt 2 ]]; then
- cat 2>&2 < [String: String] {
- var headerMap = [String: String]()
- for root in findHeaders {
- let rootURL = url.appendingPathComponent(root)
- let enumerator = FileManager.default.enumerator(atPath: rootURL.path)
- while let file = enumerator?.nextObject() as? String {
- if let fType = enumerator?.fileAttributes?[FileAttributeKey.type] as? FileAttributeType,
- fType == .typeRegular {
- if let url = URL(string: file) {
- let filename = url.lastPathComponent
- if filename.hasSuffix(".h") {
- headerMap[filename] = root + "/" + file
- }
- }
- }
- }
- }
- return headerMap
-}
-
-func getImportFile(_ line: String) -> String? {
- return line.components(separatedBy: " ")[1]
- .replacingOccurrences(of: "\"", with: "")
- .replacingOccurrences(of: "<", with: "")
- .replacingOccurrences(of: ">", with: "")
- .components(separatedBy: "/").last
-}
-
-func transformFile(_ file: String) {
- var fileContents = ""
- do {
- fileContents = try String(contentsOfFile: file, encoding: .utf8)
- } catch {
- print("Could not read \(file). \(error)")
- // Not a source file, give up and return.
- return
- }
- var outBuffer = ""
- var inSwiftPackage = false
- let lines = fileContents.components(separatedBy: .newlines)
- for line in lines {
- if line.starts(with: "#if SWIFT_PACKAGE") {
- inSwiftPackage = true
- } else if inSwiftPackage, line.starts(with: "#else") {
- inSwiftPackage = false
- } else if line.starts(with: "@import") {
- if !inSwiftPackage {
- fatalError("@import should not be used in CocoaPods library code: \(file):\(line)")
- }
- }
- if line.starts(with: "#import"),
- let importFile = getImportFile(line),
- let path = headerMap[importFile] {
- outBuffer += "#import \"\(path)\"\n"
- } else if line.starts(with: "#include"),
- let importFile = getImportFile(line),
- let path = headerMap[importFile] {
- outBuffer += "#include \"\(path)\"\n"
- } else {
- outBuffer += line + "\n"
- }
- }
- // Write out the changed file.
- do {
- try outBuffer.dropLast()
- .write(toFile: file, atomically: false, encoding: String.Encoding.utf8)
- } catch {
- fatalError("Failed to write \(file). \(error)")
- }
-}
-
-// Search the path upwards to find the root of the firebase-ios-sdk repo.
-var url = URL(fileURLWithPath: FileManager().currentDirectoryPath)
-while url.path != "/", url.lastPathComponent != "firebase-ios-sdk" {
- url = url.deletingLastPathComponent()
-}
-
-print(url)
-
-// Build map of all headers.
-
-let headerMap = getHeaderMap(url)
-
-// print(headerMap)
-
-for root in changeImports {
- let rootURL = url.appendingPathComponent(root)
- let enumerator = FileManager.default.enumerator(atPath: rootURL.path)
- whileLoop: while let file = enumerator?.nextObject() as? String {
- if let fType = enumerator?.fileAttributes?[FileAttributeKey.type] as? FileAttributeType,
- fType == .typeRegular {
- if file.starts(with: ".") {
- continue
- }
- if !(file.hasSuffix(".h") ||
- file.hasSuffix(".m") ||
- file.hasSuffix(".mm") ||
- file.hasSuffix(".c")) {
- continue
- }
- if file.range(of: "/Public/") != nil {
- continue
- }
- let fullTransformPath = root + "/" + file
- for dirPattern in skipDirPatterns {
- if fullTransformPath.range(of: dirPattern) != nil {
- continue whileLoop
- }
- }
- transformFile(fullTransformPath)
- }
- }
-}
diff --git a/scripts/check.sh b/scripts/check.sh
deleted file mode 100755
index 2cd2e12..0000000
--- a/scripts/check.sh
+++ /dev/null
@@ -1,300 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 Google
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Checks that the current state of the tree is sane and optionally auto-fixes
-# errors automatically. Meant for interactive use.
-
-function usage() {
- cat <]
-
-Runs auto-formatting scripts, source-tree checks, and linters on any files that
-have changed since origin/main.
-
-By default, any changes are left as uncommited changes in the working tree. You
-can review them with git diff. Pass --commit to automatically commit any changes.
-
-Pass an alternate revision to use as the basis for checking changes.
-
-OPTIONS:
-
- --allow-dirty
- By default, check.sh requires a clean working tree to keep any generated
- changes separate from logical changes.
-
- --commit
- Commit any auto-generated changes with a message indicating which tool made
- the changes.
-
- --amend
- Commit any auto-generated changes by amending the HEAD commit.
-
- --fixup
- Commit any auto-generated changes with a fixup! message for the HEAD
- commit. The next rebase will squash these fixup commits.
-
- --test-only
- Run all checks without making any changes to local files.
-
-
- Specifies a starting revision other than the default of origin/main.
-
-
-EXAMPLES:
-
- check.sh
- Runs automated checks and formatters on all changed files since
- origin/main. Check for changes with git diff.
-
- check.sh --commit
- Runs automated checks and formatters on all changed files since
- origin/main and commits the results.
-
- check.sh --amend HEAD
- Runs automated checks and formatters on all changed files since the last
- commit and amends the last commit with the difference.
-
- check.sh --allow-dirty HEAD
- Runs automated checks and formatters on all changed files since the last
- commit and intermingles the changes with any pending changes. Useful for
- interactive use from an editor.
-
-EOF
-}
-set -euo pipefail
-unset CDPATH
-
-# Change to the top-directory of the working tree
-top_dir=$(git rev-parse --show-toplevel)
-cd "${top_dir}"
-
-ALLOW_DIRTY=false
-COMMIT_METHOD="none"
-CHECK_DIFF=true
-START_REVISION="origin/main"
-TEST_ONLY=false
-VERBOSE=false
-
-# Default to verbose operation if this isn't an interactive build.
-if [[ ! -t 1 ]]; then
- VERBOSE=true
-fi
-
-# When travis clones a repo for building, it uses a shallow clone. After the
-# first commit on a non-main branch, TRAVIS_COMMIT_RANGE is not set, main
-# is not available and we need to compute the START_REVISION from the common
-# ancestor of $TRAVIS_COMMIT and origin/main.
-if [[ -n "${TRAVIS_COMMIT_RANGE:-}" ]] ; then
- CHECK_DIFF=true
- START_REVISION="$TRAVIS_COMMIT_RANGE"
-elif [[ -n "${TRAVIS_COMMIT:-}" ]] ; then
- if ! git rev-parse origin/main >& /dev/null; then
- git remote set-branches --add origin main
- git fetch origin
- fi
- CHECK_DIFF=true
- START_REVISION=$(git merge-base origin/main "${TRAVIS_COMMIT}")
-fi
-
-while [[ $# -gt 0 ]]; do
- case "$1" in
- --)
- # Do nothing: explicitly allow this, but ignore it
- ;;
-
- -h | --help)
- usage
- exit 1
- ;;
-
- --allow-dirty)
- ALLOW_DIRTY=true
- ;;
-
- --amend)
- COMMIT_METHOD=amend
- ;;
-
- --fixup)
- COMMIT_METHOD=fixup
- ;;
-
- --commit)
- COMMIT_METHOD=message
- ;;
-
- --verbose)
- VERBOSE=true
- ;;
-
- --test-only)
- # In test-only mode, no changes are made, so there's no reason to
- # require a clean source tree.
- ALLOW_DIRTY=true
- TEST_ONLY=true
- ;;
-
- *)
- START_REVISION="$1"
- shift
- break
- ;;
- esac
- shift
-done
-
-if [[ "${TEST_ONLY}" == true && "${COMMIT_METHOD}" != "none" ]]; then
- echo "--test-only cannot be combined with --amend, --fixup, or --commit"
- exit 1
-fi
-
-if [[ "${ALLOW_DIRTY}" == true && "${COMMIT_METHOD}" == "message" ]]; then
- echo "--allow-dirty and --commit are mutually exclusive"
- exit 1
-fi
-
-if ! git diff-index --quiet HEAD --; then
- if [[ "${ALLOW_DIRTY}" != true ]]; then
- echo "You have local changes that could be overwritten by this script."
- echo "Please commit your changes first or pass --allow-dirty."
- exit 2
- fi
-fi
-
-# Show Travis-related environment variables, to help with debuging failures.
-if [[ "${VERBOSE}" == true ]]; then
- env | egrep '^TRAVIS_(BRANCH|COMMIT|PULL|REPO)' | sort || true
-fi
-
-if [[ "${START_REVISION}" == *..* ]]; then
- RANGE_START="${START_REVISION/..*/}"
- RANGE_END="${START_REVISION/*../}"
-
- # Figure out if we have access to main. If not add it to the repo.
- if ! git rev-parse origin/main >& /dev/null; then
- git remote set-branches --add origin main
- git fetch origin
- fi
-
- # Try to come up with a more accurate representation of the merge, so that
- # checks will operate on just the differences the PR would merge into main.
- # The start of the revision range that Travis supplies can sometimes be a
- # seemingly random value.
- NEW_RANGE_START=$(git merge-base origin/main "${RANGE_END}" || echo "")
- if [[ -n "$NEW_RANGE_START" ]]; then
- START_REVISION="${NEW_RANGE_START}..${RANGE_END}"
- START_SHA="${START_REVISION}"
- else
- # In the shallow clone that Travis has created there's no merge base
- # between the PR and main. In this case just fall back on checking
- # everything.
- echo "Unable to detect base commit for change detection."
- echo "Falling back on just checking everything."
- CHECK_DIFF=false
- START_REVISION="origin/main"
- START_SHA="origin/main"
- fi
-
-else
- START_SHA=$(git rev-parse "${START_REVISION}")
-fi
-
-if [[ "${VERBOSE}" == true ]]; then
- echo "START_REVISION=$START_REVISION"
- echo "START_SHA=$START_SHA"
-fi
-
-# If committing --fixup, avoid messages with fixup! fixup! that might come from
-# multiple fixup commits.
-HEAD_SHA=$(git rev-parse HEAD)
-
-function maybe_commit() {
- local message="$1"
-
- if [[ "${COMMIT_METHOD}" == "none" ]]; then
- return
- fi
-
- echo "${message}"
- case "${COMMIT_METHOD}" in
- amend)
- git commit -a --amend -C "${HEAD_SHA}"
- ;;
-
- fixup)
- git commit -a --fixup="${HEAD_SHA}"
- ;;
-
- message)
- git commit -a -m "${message}"
- ;;
-
- *)
- echo "Unknown commit method ${COMMIT_METHOD}" 1>&2
- exit 2
- ;;
- esac
-}
-
-style_cmd=("${top_dir}/scripts/style.sh")
-if [[ "${TEST_ONLY}" == true ]]; then
- style_cmd+=(test-only)
-fi
-if [[ "$CHECK_DIFF" == true ]]; then
- style_cmd+=("${START_SHA}")
-fi
-
-# Restyle and commit any changes
-"${style_cmd[@]}"
-if ! git diff --quiet; then
- maybe_commit "style.sh generated changes"
-fi
-
-# If there are changes to the Firestore project, ensure they're ordered
-# correctly to minimize conflicts.
-if [ -z "${GITHUB_WORKFLOW-}" ]; then
- if [[ "$CHECK_DIFF" == "false" ]] || \
- ! git diff --quiet "${START_SHA}" -- Firestore; then
-
- sync_project_cmd=("${top_dir}/scripts/sync_project.rb")
- if [[ "${TEST_ONLY}" == true ]]; then
- sync_project_cmd+=(--test-only)
- fi
- "${sync_project_cmd[@]}"
- if ! git diff --quiet; then
- maybe_commit "sync_project.rb generated changes"
- fi
- fi
-fi
-
-set -x
-
-# Print the versions of tools being used.
-python --version
-
-# Check lint errors.
-"${top_dir}/scripts/check_whitespace.sh"
-"${top_dir}/scripts/check_filename_spaces.sh"
-"${top_dir}/scripts/check_copyright.sh"
-"${top_dir}/scripts/check_test_inclusion.py"
-"${top_dir}/scripts/check_imports.swift"
-
-# Google C++ style
-lint_cmd=("${top_dir}/scripts/check_lint.py")
-if [[ "$CHECK_DIFF" == true ]]; then
- lint_cmd+=("${START_SHA}")
-fi
-"${lint_cmd[@]}"
diff --git a/scripts/check_copyright.sh b/scripts/check_copyright.sh
deleted file mode 100755
index 2624720..0000000
--- a/scripts/check_copyright.sh
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2018 Google
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Check source files for copyright notices
-
-options=(
- -E # Use extended regexps
- -I # Exclude binary files
- -L # Show files that don't have a match
- 'Copyright [0-9]{4}.*Google LLC'
-)
-
-list=$(git grep "${options[@]}" -- \
- '*.'{c,cc,cmake,h,js,m,mm,py,rb,sh,swift} \
- CMakeLists.txt '**/CMakeLists.txt' \
- ':(exclude)**/third_party/**')
-
-# Allow copyrights before 2020 without LLC.
-if [[ $list ]]; then
- result=$(grep -L 'Copyright 20[0-1][0-9].*Google' "$list")
-fi
-
-if [[ $result ]]; then
- echo "$result"
- echo "ERROR: Missing copyright notices in the files above. Please fix."
- exit 1
-fi
diff --git a/scripts/check_filename_spaces.sh b/scripts/check_filename_spaces.sh
deleted file mode 100755
index 5e48c48..0000000
--- a/scripts/check_filename_spaces.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 Google
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Fail on spaces in file names, excluding the patterns listed below.
-
-# A sed program that removes filename patterns that are allowed to have spaces
-# in them.
-function remove_valid_names() {
- sed '
- # Xcode-generated asset files
- /Assets.xcassets/ d
-
- # Files without spaces
- /^[^ ]*$/ d
- '
-}
-
-count=$(git ls-files | remove_valid_names | wc -l | xargs)
-
-if [[ ${count} != 0 ]]; then
- echo 'ERROR: Spaces in filenames are not permitted in this repo. Please fix.'
- echo ''
-
- git ls-files | remove_valid_names
- exit 1
-fi
diff --git a/scripts/check_firestore_symbols.sh b/scripts/check_firestore_symbols.sh
deleted file mode 100755
index 5fb2de7..0000000
--- a/scripts/check_firestore_symbols.sh
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/bin/bash
-
-# Copyright 2023 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# DESCRIPTION: This script identifies Objective-C symbols within the
-# `FirebaseFirestoreInternal.xcframework` that are not automatically linked
-# when used in a client target. Because the
-# `FirebaseFirestoreInternal.xcframework` should function without clients
-# needing to pass the `-ObjC` flag, this script catches potential regressions
-# that break that requirement.
-#
-# DEPENDENCIES: This script depends on the given Firebase repo's `Package.swift`
-# using the `FIREBASECI_USE_LOCAL_FIRESTORE_ZIP` env var to swap the Firestore
-# target definition out to instead reference a *local* binary using the
-# `.binaryTarget(path:)` API.
-#
-# DESIGN: This script creates an executable package that depends on Firestore
-# via a local binary SPM target. The package is built twice, once with the
-# -ObjC flag and once without. The linked Objective-C symbols are then
-# stripped from each build's resulting executable. The symbols are then diffed
-# to determine if there exists symbols that were only linked due to the -ObjC
-# flag.
-#
-# USAGE: ./check_firestore_symbols.sh
-
-set -euo pipefail
-
-if [[ $# -ne 2 ]]; then
- echo "Usage: ./check_firestore_symbols.sh "
- exit 1
-fi
-
-# Check if the given repo path is valid.
-FIREBASE_REPO_PATH=$1
-
-if [[ "$FIREBASE_REPO_PATH" != /* ]]; then
- echo "The given path should be an absolute path."
- exit 1
-fi
-
-if [[ ! -d "$FIREBASE_REPO_PATH" ]]; then
- echo "The given repo does not exist: $FIREBASE_REPO_PATH"
- exit 1
-fi
-
-# Check if the given xcframework path is valid.
-FIRESTORE_XCFRAMEWORK_PATH=$2
-
-if [ "$(basename $FIRESTORE_XCFRAMEWORK_PATH)" != 'FirebaseFirestoreInternal.xcframework' ]; then
- echo "The given xcframework is not a FirebaseFirestoreInternal.xcframework."
- exit 1
-fi
-
-if [[ ! -d "$FIRESTORE_XCFRAMEWORK_PATH" ]]; then
- echo "The given xcframework does not exist: $FIRESTORE_XCFRAMEWORK_PATH"
- exit 1
-fi
-
-# Copy the given Firestore framework to the root of the given Firebase repo.
-# This script uses an env var that will alter the repo's `Package.swift` to
-# pick up the copied Firestore framework. See
-# `FIREBASECI_USE_LOCAL_FIRESTORE_ZIP` in Firebase's `Package.swift` for more.
-cp -r "$FIRESTORE_XCFRAMEWORK_PATH" "$FIREBASE_REPO_PATH"
-
-# Create a temporary directory for the test package. The test package defines an
-# executable and has the following directory structure:
-#
-# TestPkg
-# ├── Package.swift
-# └── Sources
-# └── TestPkg
-# └── main.swift
-TEST_PKG_ROOT=$(mktemp -d -t TestPkg)
-echo "Test package root: $TEST_PKG_ROOT"
-
-# Create the package's subdirectories.
-mkdir -p "$TEST_PKG_ROOT/Sources/TestPkg"
-
-# Generate the package's `Package.swift`.
-cat > "$TEST_PKG_ROOT/Package.swift" <<- EOM
-// swift-tools-version: 5.6
-import PackageDescription
-
-let package = Package(
- name: "TestPkg",
- platforms: [.macOS(.v10_13)],
- dependencies: [
- .package(path: "${FIREBASE_REPO_PATH}")
- ],
- targets: [
- .executableTarget(
- name: "TestPkg",
- dependencies: [
- .product(
- name: "FirebaseFirestore",
- package: "firebase-ios-sdk"
- )
- ]
- )
- ]
-)
-EOM
-
-# Generate the package's `main.swift`.
-cat > "$TEST_PKG_ROOT/Sources/TestPkg/main.swift" <<- EOM
-import FirebaseFirestore
-
-let db = Firestore.firestore()
-EOM
-
-# Change to the test package's root directory in order to build the package.
-cd "$TEST_PKG_ROOT"
-
-# Build the test package *without* the `-ObjC` linker flag, and dump the
-# resulting executable file's Objective-C symbols into a text file.
-echo "Building test package without -ObjC linker flag..."
-FIREBASECI_USE_LOCAL_FIRESTORE_ZIP=1 xcodebuild -scheme 'TestPkg' \
- -destination 'generic/platform=macOS' \
- -derivedDataPath "$HOME/Library/Developer/Xcode/DerivedData/TestPkg" \
- | xcpretty
-
-nm ~/Library/Developer/Xcode/DerivedData/TestPkg/Build/Products/Debug/TestPkg \
- | grep -o "[-+]\[.*\]" > objc_symbols_without_linker_flag.txt
-
-# Build the test package *with* the -ObjC linker flag, and dump the
-# resulting executable file's Objective-C symbols into a text file.
-echo "Building test package with -ObjC linker flag..."
-FIREBASECI_USE_LOCAL_FIRESTORE_ZIP=1 xcodebuild -scheme 'TestPkg' \
- -destination 'generic/platform=macOS' \
- -derivedDataPath "$HOME/Library/Developer/Xcode/DerivedData/TestPkg-ObjC" \
- OTHER_LDFLAGS='-ObjC' \
- | xcpretty
-
-nm ~/Library/Developer/Xcode/DerivedData/TestPkg-ObjC/Build/Products/Debug/TestPkg \
- | grep -o "[-+]\[.*\]" > objc_symbols_with_linker_flag.txt
-
-# Compare the two text files to see if the -ObjC linker flag caused additional
-# symbols to link.
-#
-# Note: In the case where the diff is non-empty, the diff command will
-# return exit code 1, which will cause the set pipefail to terminate execution.
-# To avoid this, `|| true` ensures the exit code always indicates success.
-DIFF=$(
- git diff --no-index --output-indicator-new="?" \
- objc_symbols_without_linker_flag.txt \
- objc_symbols_with_linker_flag.txt \
- || true
-)
-if [[ -n "$DIFF" ]]; then
- echo "Failure: Unlinked Objective-C symbols have been detected:"
- echo "$DIFF"
- echo -n "💡 To fix, follow the process shown in "
- echo -n "https://github.com/firebase/firebase-ios-sdk/pull/12534 for the "
- echo "above symbols that are prefixed with ?"
- exit 1
-else
- echo "Success: No unlinked Objective-C symbols have been detected."
- exit 0
-fi
diff --git a/scripts/check_imports.swift b/scripts/check_imports.swift
deleted file mode 100755
index 04e84ae..0000000
--- a/scripts/check_imports.swift
+++ /dev/null
@@ -1,234 +0,0 @@
-#!/usr/bin/swift
-/*
- * Copyright 2020 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Utility script for verifying `import` and `include` syntax. This ensures a
-// consistent style as well as functionality across multiple package managers.
-
-// For more context, see https://github.com/firebase/firebase-ios-sdk/blob/main/HeadersImports.md.
-
-import Foundation
-
-// Skip these directories. Imports should only be repo-relative in libraries
-// and unit tests.
-let skipDirPatterns = ["/Sample/", "/Pods/",
- "FirebaseDynamicLinks/Tests/Integration",
- "FirebaseInAppMessaging/Tests/Integration/",
- "SymbolCollisionTest/", "/gen/",
- "IntegrationTesting/CocoapodsIntegrationTest/",
- "FirebasePerformance/Tests/TestApp/",
- "cmake-build-debug/", "build/", "ObjCIntegration/",
- "FirebasePerformance/Tests/FIRPerfE2E/"] +
- [
- "CoreOnly/Sources", // Skip Firebase.h.
- "SwiftPMTests", // The SwiftPM tests test module imports.
- "IntegrationTesting/ClientApp", // The ClientApp tests module imports.
- "FirebaseSessions/Protogen/", // Generated nanopb code with imports
- ] +
-
- // The following are temporary skips pending working through a first pass of the repo:
- [
- "FirebaseDatabase/Sources/third_party/Wrap-leveldb", // Pending SwiftPM for leveldb.
- "Example",
- "Firestore",
- "GoogleUtilitiesComponents",
- "FirebasePerformance/ProtoSupport/",
- ]
-
-// Skip existence test for patterns that start with the following:
-let skipImportPatterns = [
- "FBLPromise",
- "OCMock",
- "OCMStubRecorder",
-]
-
-private class ErrorLogger {
- var foundError = false
- func log(_ message: String) {
- print(message)
- foundError = true
- }
-
- func importLog(_ message: String, _ file: String, _ line: Int) {
- log("Import Error: \(file):\(line) \(message)")
- }
-}
-
-private func checkFile(_ file: String, logger: ErrorLogger, inRepo repoURL: URL,
- isSwiftFile: Bool) {
- var fileContents = ""
- do {
- fileContents = try String(contentsOfFile: file, encoding: .utf8)
- } catch {
- logger.log("Could not read \(file). \(error)")
- // Not a source file, give up and return.
- return
- }
-
- guard !isSwiftFile else {
- // Swift specific checks.
- fileContents.components(separatedBy: .newlines)
- .enumerated() // [(lineNum, line), ...]
- .filter { $1.starts(with: "import FirebaseCoreExtension") }
- .forEach { lineNum, line in
- logger
- .importLog(
- "Use `@_implementationOnly import FirebaseCoreExtension` when importing `FirebaseCoreExtension`.",
- file, lineNum
- )
- }
- return
- }
-
- let isPublic = file.range(of: "/Public/") != nil &&
- // TODO: Skip legacy GDTCCTLibrary file that isn't Public and should be moved.
- // This test is used in the GoogleDataTransport's repo's CI clone of this repo.
- file.range(of: "GDTCCTLibrary/Public/GDTCOREvent+GDTCCTSupport.h") == nil
- let isPrivate = file.range(of: "/Sources/Private/") != nil ||
- // Delete when FirebaseInstallations fixes directory structure.
- file.range(of: "Source/Library/Private/FirebaseInstallationsInternal.h") != nil ||
- file.range(of: "FirebaseCore/Extension") != nil
-
- // Treat all files with names finishing on "Test" or "Tests" as files with tests.
- let isTestFile = file.contains("Test.m") || file.contains("Tests.m") ||
- file.contains("Test.swift") || file.contains("Tests.swift")
- let isBridgingHeader = file.contains("Bridging-Header.h")
- var inSwiftPackage = false
- var inSwiftPackageElse = false
- let lines = fileContents.components(separatedBy: .newlines)
- var lineNum = 0
- nextLine: for rawLine in lines {
- let line = rawLine.trimmingCharacters(in: .whitespaces)
- lineNum += 1
- if line.starts(with: "#if SWIFT_PACKAGE") {
- inSwiftPackage = true
- } else if inSwiftPackage, line.starts(with: "#else") {
- inSwiftPackage = false
- inSwiftPackageElse = true
- } else if inSwiftPackageElse, line.starts(with: "#endif") {
- inSwiftPackageElse = false
- } else if inSwiftPackage {
- continue
- } else if file.contains("FirebaseTestingSupport") {
- // Module imports ok in SPM only test infrastructure.
- continue
- }
-
- // "The #else of a SWIFT_PACKAGE check should only do CocoaPods module-style imports."
- if line.starts(with: "#import") || line.starts(with: "#include") {
- let importFile = line.components(separatedBy: " ")[1]
- if inSwiftPackageElse {
- if importFile.first != "<" {
- logger
- .importLog("Import in SWIFT_PACKAGE #else should start with \"<\".", file, lineNum)
- }
- continue
- }
- let importFileRaw = importFile.replacingOccurrences(of: "\"", with: "")
- .replacingOccurrences(of: "<", with: "")
- .replacingOccurrences(of: ">", with: "")
-
- if importFile.first == "\"" {
- // Public Headers should only use simple file names without paths.
- if isPublic {
- if importFile.contains("/") {
- logger.importLog("Public header import should not include \"/\"", file, lineNum)
- }
-
- } else if !FileManager.default.fileExists(atPath: repoURL.path + "/" + importFileRaw) {
- // Non-public header imports should be repo-relative paths. Unqualified imports are
- // allowed in private headers.
- if !isPrivate || importFile.contains("/") {
- for skip in skipImportPatterns {
- if importFileRaw.starts(with: skip) {
- continue nextLine
- }
- }
- logger.importLog("Import \(importFileRaw) does not exist.", file, lineNum)
- }
- }
- } else if importFile.first == "<", !isPrivate, !isTestFile, !isBridgingHeader, !isPublic {
- // Verify that double quotes are always used for intra-module imports.
- if importFileRaw.starts(with: "Firebase"),
- // Allow intra-module imports of FirebaseAppCheckInterop.
- // TODO: Remove the FirebaseAppCheckInterop exception when it's moved to a separate repo.
- importFile.range(of: "FirebaseAppCheckInterop/FirebaseAppCheckInterop.h") == nil {
- logger
- .importLog("Imports internal to the repo should use double quotes not \"<\"", file,
- lineNum)
- }
- }
- }
- }
-}
-
-private func main() -> Int32 {
- let logger = ErrorLogger()
- // Search the path upwards to find the root of the firebase-ios-sdk repo.
- var url = URL(fileURLWithPath: FileManager().currentDirectoryPath)
- while url.path != "/" {
- let script = url.appendingPathComponent("scripts/check_imports.swift")
- if FileManager.default.fileExists(atPath: script.path) {
- break
- }
- url = url.deletingLastPathComponent()
- }
- let repoURL = url
- guard let contents = try? FileManager.default.contentsOfDirectory(at: repoURL,
- includingPropertiesForKeys: nil,
- options: [.skipsHiddenFiles])
- else {
- logger.log("Failed to get repo contents \(repoURL)")
- return 1
- }
-
- for rootURL in contents {
- if !rootURL.hasDirectoryPath {
- continue
- }
- let enumerator = FileManager.default.enumerator(atPath: rootURL.path)
- whileLoop: while let file = enumerator?.nextObject() as? String {
- if let fType = enumerator?.fileAttributes?[FileAttributeKey.type] as? FileAttributeType,
- fType == .typeRegular {
- if file.starts(with: ".") {
- continue
- }
- if !(file.hasSuffix(".h") ||
- file.hasSuffix(".m") ||
- file.hasSuffix(".mm") ||
- file.hasSuffix(".c") ||
- file.hasSuffix(".swift")) {
- continue
- }
- let fullTransformPath = rootURL.path + "/" + file
- for dirPattern in skipDirPatterns {
- if fullTransformPath.range(of: dirPattern) != nil {
- continue whileLoop
- }
- }
- checkFile(
- fullTransformPath,
- logger: logger,
- inRepo: repoURL,
- isSwiftFile: file.hasSuffix(".swift")
- )
- }
- }
- }
- return logger.foundError ? 1 : 0
-}
-
-exit(main())
diff --git a/scripts/check_lint.py b/scripts/check_lint.py
deleted file mode 100755
index 3337d1e..0000000
--- a/scripts/check_lint.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2019 Google
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Lints source files for conformance with the style guide that applies.
-
-Currently supports linting Objective-C, Objective-C++, C++, and Python source.
-"""
-
-import argparse
-import logging
-import os
-import subprocess
-import sys
-import textwrap
-
-from lib import checker
-from lib import command_trace
-from lib import git
-from lib import source
-
-_logger = logging.getLogger('lint')
-
-
-_dry_run = False
-
-
-_CPPLINT_OBJC_FILTERS = [
- # Objective-C uses #import and does not use header guards
- '-build/header_guard',
-
- # Inline definitions of Objective-C blocks confuse
- '-readability/braces',
-
- # C-style casts are acceptable in Objective-C++
- '-readability/casting',
-
- # Objective-C needs use type 'long' for interop between types like NSInteger
- # and printf-style functions.
- '-runtime/int',
-
- # cpplint is generally confused by Objective-C mixing with C++.
- # * Objective-C method invocations in a for loop make it think its a
- # range-for
- # * Objective-C dictionary literals confuse brace spacing
- # * Empty category declarations ("@interface Foo ()") look like function
- # invocations
- '-whitespace',
-]
-
-_CPPLINT_OBJC_OPTIONS = [
- # cpplint normally excludes Objective-C++
- '--extensions=h,m,mm',
-
- # Objective-C style allows longer lines
- '--linelength=100',
-
- '--filter=' + ','.join(_CPPLINT_OBJC_FILTERS),
-]
-
-
-def main():
- global _dry_run
-
- parser = argparse.ArgumentParser(description='Lint source files.')
- parser.add_argument('--dry-run', '-n', action='store_true',
- help='Show what the linter would do without doing it')
- parser.add_argument('--all', action='store_true',
- help='run the linter over all known sources')
- parser.add_argument('rev_or_files', nargs='*',
- help='A single revision that specifies a point in time '
- 'from which to look for changes. Defaults to '
- 'origin/main. Alternatively, a list of specific '
- 'files or git pathspecs to lint.')
- args = command_trace.parse_args(parser)
-
- if args.dry_run:
- _dry_run = True
- command_trace.enable_tracing()
-
- pool = checker.Pool()
-
- sources = _unique(source.CC_DIRS + source.OBJC_DIRS + source.PYTHON_DIRS)
- patterns = git.make_patterns(sources)
-
- files = git.find_changed_or_files(args.all, args.rev_or_files, patterns)
- check(pool, files)
-
- pool.exit()
-
-
-def check(pool, files):
- group = source.categorize_files(files)
-
- for kind, files in group.kinds.items():
- for chunk in checker.shard(files):
- if not chunk:
- continue
-
- linter = _linters[kind]
- pool.submit(linter, chunk)
-
-
-def lint_cc(files):
- return _run_cpplint([], files)
-
-
-def lint_objc(files):
- return _run_cpplint(_CPPLINT_OBJC_OPTIONS, files)
-
-
-def _run_cpplint(options, files):
- scripts_dir = os.path.dirname(os.path.abspath(__file__))
- cpplint = os.path.join(scripts_dir, 'cpplint.py')
-
- command = [sys.executable, cpplint, '--quiet']
- command.extend(options)
- command.extend(files)
-
- return _read_output(command)
-
-
-_flake8_warned = False
-
-
-def lint_py(files):
- flake8 = which('flake8')
- if flake8 is None:
- global _flake8_warned
- if not _flake8_warned:
- _flake8_warned = True
- _logger.warn(textwrap.dedent(
- """
- Could not find flake8 on $PATH; skipping python lint.
- Install with:
-
- pip install --user flake8
- """))
- return
-
- command = [flake8]
- command.extend(files)
-
- return _read_output(command)
-
-
-def _read_output(command):
- command_trace.log(command)
-
- if _dry_run:
- return checker.Result(0, '')
-
- proc = subprocess.Popen(
- command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- output = proc.communicate('')[0]
- sc = proc.wait()
-
- return checker.Result(sc, output)
-
-
-_linters = {
- 'cc': lint_cc,
- 'objc': lint_objc,
- 'py': lint_py,
-}
-
-
-def _unique(items):
- return list(set(items))
-
-
-def which(executable):
- """Finds the executable with the given name.
-
- Returns:
- The fully qualified path to the executable or None if the executable isn't
- found.
- """
- if executable.startswith('/'):
- return executable
-
- path = os.environ['PATH'].split(os.pathsep)
-
- for executable_with_ext in _executable_names(executable):
- for entry in path:
- joined = os.path.join(entry, executable_with_ext)
- if os.path.isfile(joined) and os.access(joined, os.X_OK):
- return joined
-
- return None
-
-
-def _executable_names(executable):
- """Yields a sequence of all possible executable names."""
-
- if os.name == 'nt':
- pathext = os.environ.get('PATHEXT', '').split(os.pathsep)
- for ext in pathext:
- yield executable + ext
-
- else:
- yield executable
-
-
-if __name__ == '__main__':
- main()
diff --git a/scripts/check_secrets.sh b/scripts/check_secrets.sh
deleted file mode 100755
index 4db1540..0000000
--- a/scripts/check_secrets.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh
-
-# Copyright 2020 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Check if secrets are available for multiple CI's
-
-set -x
-echo "GITHUB_BASE_REF: ${GITHUB_BASE_REF:-}"
-echo "GITHUB_HEAD_REF: ${GITHUB_HEAD_REF:-}"
-
-check_secrets()
-{
- # Travis: Secrets are available if we're not running on a fork.
- if [[ -n "${TRAVIS_PULL_REQUEST:-}" ]]; then
- if [[ "$TRAVIS_PULL_REQUEST" == "false" ||
- "$TRAVIS_PULL_REQUEST_SLUG" == "$TRAVIS_REPO_SLUG" ]]; then
- return 0
- fi
- fi
- # GitHub Actions: Secrets are available if we're not running on a fork.
- # See https://help.github.com/en/actions/automating-your-workflow-with-github-actions/using-environment-variables
- # TODO- Both GITHUB_BASE_REF and GITHUB_HEAD_REF are set in main repo
- # PRs even thought the docs say otherwise. They are not set in cron jobs on main.
- # Investigate how do to distinguish fork PRs from main repo PRs.
- if [[ -n "${GITHUB_WORKFLOW:-}" ]]; then
- return 0
- fi
- return 1
-}
diff --git a/scripts/check_test_inclusion.py b/scripts/check_test_inclusion.py
deleted file mode 100755
index 327f902..0000000
--- a/scripts/check_test_inclusion.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright 2018 Google
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Verifies that all tests are a part of the project file.
-"""
-
-from __future__ import print_function
-import os
-import os.path
-import re
-import sys
-
-
-# Tests that are known not to compile in Xcode and can't be added there.
-EXCLUDED = frozenset([
-])
-
-
-def Main():
- """Runs the style check."""
-
- tests = FindTestFiles("Firestore/Example/Tests", "Firestore/core/test")
- problems = CheckProject(
- "Firestore/Example/Firestore.xcodeproj/project.pbxproj", tests)
-
- if problems:
- Error("Test files exist that are unreferenced in Xcode project files:")
- for problem in problems:
- Error(problem)
- sys.exit(1)
-
- sys.exit(0)
-
-
-def FindTestFiles(*test_dirs):
- """Searches the given source roots for test files.
-
- Args:
- *test_dirs: A list of directories containing test sources.
-
- Returns:
- A list of test source filenames.
- """
-
- test_file_pattern = re.compile(r"(?:Tests?\.mm?|_test\.(?:cc|mm))$")
-
- result = []
- for test_dir in test_dirs:
- for root, dirs, files in os.walk(test_dir):
- del dirs # unused
- for basename in files:
- filename = os.path.join(root, basename)
- if filename not in EXCLUDED and test_file_pattern.search(basename):
- result.append(filename)
- return result
-
-
-def CheckProject(project_file, test_files):
- """Checks the given project file for tests in the given test_dirs.
-
- Args:
- project_file: The path to an Xcode pbxproj file.
- test_files: A list of all tests source files in the project.
-
- Returns:
- A sorted list of filenames that aren't referenced in the project_file.
- """
-
- # An dict of basename to filename
- basenames = {os.path.basename(f): f for f in test_files}
-
- file_list_pattern = re.compile(r"/\* (\S+) in Sources \*/")
- with open(project_file, "r") as fd:
- for line in fd:
- line = line.rstrip()
- m = file_list_pattern.search(line)
- if m:
- basename = m.group(1)
- if basename in basenames:
- del basenames[basename]
-
- return sorted(basenames.values())
-
-
-def Error(message, *args):
- message %= args
- print(message, file=sys.stderr)
-
-
-if __name__ == "__main__":
- Main()
diff --git a/scripts/check_whitespace.sh b/scripts/check_whitespace.sh
deleted file mode 100755
index 0867180..0000000
--- a/scripts/check_whitespace.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-# Copyright 2018 Google
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Fail on an trailing whitespace characters, excluding
-# * binary files (-I)
-# * nanopb-generated files
-# * protoc-generated files
-#
-# Note: specifying revisions we care about makes this go slower than just
-# grepping through the whole repo.
-options=(
- -n # show line numbers
- -I # exclude binary files
- ' $'
-)
-
-git grep "${options[@]}" -- \
- ':(exclude)cmake/external/leveldb-1.22_windows_paths.patch' \
- ':(exclude)cmake/external/leveldb-1.23_windows_paths.patch' \
- ':(exclude)cmake/external/nanopb.patch' \
- ':(exclude)cmake/external/snappy.patch' \
- ':(exclude)Crashlytics/ProtoSupport' \
- ':(exclude)Crashlytics/UnitTests/Data' \
- ':(exclude)CoreOnly/NOTICES' \
- ':(exclude)Firebase/Firebase/NOTICES' \
- ':(exclude)Firebase/InAppMessaging/ProtoSupport' \
- ':(exclude)Firestore/Protos/nanopb' \
- ':(exclude)Firestore/Protos/cpp' \
- ':(exclude)Firestore/Protos/objc' \
- ':(exclude)Firestore/third_party/abseil-cpp' \
- ':(exclude)GoogleDataTransport/ProtoSupport' \
- ':(exclude)ReleaseTooling/Template/NOTICES'
-
-if [[ $? == 0 ]]; then
- echo "ERROR: Trailing whitespace found in the files above. Please fix."
- exit 1
-fi
diff --git a/scripts/collect_metrics.sh b/scripts/collect_metrics.sh
deleted file mode 100755
index c71bd9b..0000000
--- a/scripts/collect_metrics.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright 2019 Google
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# USAGE: ./collect_metrics.sh workspace scheme
-#
-# Collects project health metrics and uploads them to a database. Currently just collects code
-# coverage for the provided workspace and scheme. Assumes that those tests have already been
-# executed.
-
-set -euo pipefail
-
-if [[ $# -ne 2 ]]; then
- cat 1>&2 < [file] ...
-
- Style checker for C/C++ source files.
- This is a fork of the Google style checker with minor extensions.
-
- The style guidelines this tries to follow are those in
- https://google.github.io/styleguide/cppguide.html
-
- Every problem is given a confidence score from 1-5, with 5 meaning we are
- certain of the problem, and 1 meaning it could be a legitimate construct.
- This will miss some errors, and is not a substitute for a code review.
-
- To suppress false-positive errors of a certain category, add a
- 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
- suppresses errors of all categories on that line.
-
- The files passed in will be linted; at least one file must be provided.
- Default linted extensions are %s.
- Other file types will be ignored.
- Change the extensions with the --extensions flag.
-
- Flags:
-
- output=emacs|eclipse|vs7|junit|sed|gsed
- By default, the output is formatted to ease emacs parsing. Visual Studio
- compatible output (vs7) may also be used. Further support exists for
- eclipse (eclipse), and JUnit (junit). XML parsers such as those used
- in Jenkins and Bamboo may also be used.
- The sed format outputs sed commands that should fix some of the errors.
- Note that this requires gnu sed. If that is installed as gsed on your
- system (common e.g. on macOS with homebrew) you can use the gsed output
- format. Sed commands are written to stdout, not stderr, so you should be
- able to pipe output straight to a shell to run the fixes.
-
- verbose=#
- Specify a number 0-5 to restrict errors to certain verbosity levels.
- Errors with lower verbosity levels have lower confidence and are more
- likely to be false positives.
-
- quiet
- Don't print anything if no errors are found.
-
- filter=-x,+y,...
- Specify a comma-separated list of category-filters to apply: only
- error messages whose category names pass the filters will be printed.
- (Category names are printed with the message and look like
- "[whitespace/indent]".) Filters are evaluated left to right.
- "-FOO" and "FOO" means "do not print categories that start with FOO".
- "+FOO" means "do print categories that start with FOO".
-
- Examples: --filter=-whitespace,+whitespace/braces
- --filter=whitespace,runtime/printf,+runtime/printf_format
- --filter=-,+build/include_what_you_use
-
- To see a list of all the categories used in cpplint, pass no arg:
- --filter=
-
- counting=total|toplevel|detailed
- The total number of errors found is always printed. If
- 'toplevel' is provided, then the count of errors in each of
- the top-level categories like 'build' and 'whitespace' will
- also be printed. If 'detailed' is provided, then a count
- is provided for each category like 'build/class'.
-
- repository=path
- The top level directory of the repository, used to derive the header
- guard CPP variable. By default, this is determined by searching for a
- path that contains .git, .hg, or .svn. When this flag is specified, the
- given path is used instead. This option allows the header guard CPP
- variable to remain consistent even if members of a team have different
- repository root directories (such as when checking out a subdirectory
- with SVN). In addition, users of non-mainstream version control systems
- can use this flag to ensure readable header guard CPP variables.
-
- Examples:
- Assuming that Alice checks out ProjectName and Bob checks out
- ProjectName/trunk and trunk contains src/chrome/ui/browser.h, then
- with no --repository flag, the header guard CPP variable will be:
-
- Alice => TRUNK_SRC_CHROME_BROWSER_UI_BROWSER_H_
- Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
-
- If Alice uses the --repository=trunk flag and Bob omits the flag or
- uses --repository=. then the header guard CPP variable will be:
-
- Alice => SRC_CHROME_BROWSER_UI_BROWSER_H_
- Bob => SRC_CHROME_BROWSER_UI_BROWSER_H_
-
- root=subdir
- The root directory used for deriving header guard CPP variable.
- This directory is relative to the top level directory of the repository
- which by default is determined by searching for a directory that contains
- .git, .hg, or .svn but can also be controlled with the --repository flag.
- If the specified directory does not exist, this flag is ignored.
-
- Examples:
- Assuming that src is the top level directory of the repository (and
- cwd=top/src), the header guard CPP variables for
- src/chrome/browser/ui/browser.h are:
-
- No flag => CHROME_BROWSER_UI_BROWSER_H_
- --root=chrome => BROWSER_UI_BROWSER_H_
- --root=chrome/browser => UI_BROWSER_H_
- --root=.. => SRC_CHROME_BROWSER_UI_BROWSER_H_
-
- linelength=digits
- This is the allowed line length for the project. The default value is
- 80 characters.
-
- Examples:
- --linelength=120
-
- recursive
- Search for files to lint recursively. Each directory given in the list
- of files to be linted is replaced by all files that descend from that
- directory. Files with extensions not in the valid extensions list are
- excluded.
-
- exclude=path
- Exclude the given path from the list of files to be linted. Relative
- paths are evaluated relative to the current directory and shell globbing
- is performed. This flag can be provided multiple times to exclude
- multiple files.
-
- Examples:
- --exclude=one.cc
- --exclude=src/*.cc
- --exclude=src/*.cc --exclude=test/*.cc
-
- extensions=extension,extension,...
- The allowed file extensions that cpplint will check
-
- Examples:
- --extensions=%s
-
- includeorder=default|standardcfirst
- For the build/include_order rule, the default is to blindly assume angle
- bracket includes with file extension are c-system-headers (default),
- even knowing this will have false classifications.
- The default is established at google.
- standardcfirst means to instead use an allow-list of known c headers and
- treat all others as separate group of "other system headers". The C headers
- included are those of the C-standard lib and closely related ones.
-
- headers=x,y,...
- The header extensions that cpplint will treat as .h in checks. Values are
- automatically added to --extensions list.
- (by default, only files with extensions %s will be assumed to be headers)
-
- Examples:
- --headers=%s
- --headers=hpp,hxx
- --headers=hpp
-
- cpplint.py supports per-directory configurations specified in CPPLINT.cfg
- files. CPPLINT.cfg file can contain a number of key=value pairs.
- Currently the following options are supported:
-
- set noparent
- filter=+filter1,-filter2,...
- exclude_files=regex
- linelength=80
- root=subdir
- headers=x,y,...
-
- "set noparent" option prevents cpplint from traversing directory tree
- upwards looking for more .cfg files in parent directories. This option
- is usually placed in the top-level project directory.
-
- The "filter" option is similar in function to --filter flag. It specifies
- message filters in addition to the |_DEFAULT_FILTERS| and those specified
- through --filter command-line flag.
-
- "exclude_files" allows to specify a regular expression to be matched against
- a file name. If the expression matches, the file is skipped and not run
- through the linter.
-
- "linelength" allows to specify the allowed line length for the project.
-
- The "root" option is similar in function to the --root flag (see example
- above). Paths are relative to the directory of the CPPLINT.cfg.
-
- The "headers" option is similar in function to the --headers flag
- (see example above).
-
- CPPLINT.cfg has an effect on files in the same directory and all
- sub-directories, unless overridden by a nested configuration file.
-
- Example file:
- filter=-build/include_order,+build/include_alpha
- exclude_files=.*\\.cc
-
- The above example disables build/include_order warning and enables
- build/include_alpha as well as excludes all .cc from being
- processed by linter, in the current directory (where the .cfg
- file is located) and all sub-directories.
-"""
-
-# We categorize each error message we print. Here are the categories.
-# We want an explicit list so we can list them all in cpplint --filter=.
-# If you add a new error message with a new category, add it to the list
-# here! cpplint_unittest.py should tell you if you forget to do this.
-_ERROR_CATEGORIES = [
- 'build/class',
- 'build/c++11',
- 'build/c++14',
- 'build/c++tr1',
- 'build/deprecated',
- 'build/endif_comment',
- 'build/explicit_make_pair',
- 'build/forward_decl',
- 'build/header_guard',
- 'build/include',
- 'build/include_subdir',
- 'build/include_alpha',
- 'build/include_order',
- 'build/include_what_you_use',
- 'build/namespaces_headers',
- 'build/namespaces_literals',
- 'build/namespaces',
- 'build/printf_format',
- 'build/storage_class',
- 'legal/copyright',
- 'readability/alt_tokens',
- 'readability/braces',
- 'readability/casting',
- 'readability/check',
- 'readability/constructors',
- 'readability/fn_size',
- 'readability/inheritance',
- 'readability/multiline_comment',
- 'readability/multiline_string',
- 'readability/namespace',
- 'readability/nolint',
- 'readability/nul',
- 'readability/strings',
- 'readability/todo',
- 'readability/utf8',
- 'runtime/arrays',
- 'runtime/casting',
- 'runtime/explicit',
- 'runtime/int',
- 'runtime/init',
- 'runtime/invalid_increment',
- 'runtime/member_string_references',
- 'runtime/memset',
- 'runtime/indentation_namespace',
- 'runtime/operator',
- 'runtime/printf',
- 'runtime/printf_format',
- 'runtime/references',
- 'runtime/string',
- 'runtime/threadsafe_fn',
- 'runtime/vlog',
- 'whitespace/blank_line',
- 'whitespace/braces',
- 'whitespace/comma',
- 'whitespace/comments',
- 'whitespace/empty_conditional_body',
- 'whitespace/empty_if_body',
- 'whitespace/empty_loop_body',
- 'whitespace/end_of_line',
- 'whitespace/ending_newline',
- 'whitespace/forcolon',
- 'whitespace/indent',
- 'whitespace/line_length',
- 'whitespace/newline',
- 'whitespace/operators',
- 'whitespace/parens',
- 'whitespace/semicolon',
- 'whitespace/tab',
- 'whitespace/todo',
- ]
-
-# keywords to use with --outputs which generate stdout for machine processing
-_MACHINE_OUTPUTS = [
- 'junit',
- 'sed',
- 'gsed'
-]
-
-# These error categories are no longer enforced by cpplint, but for backwards-
-# compatibility they may still appear in NOLINT comments.
-_LEGACY_ERROR_CATEGORIES = [
- 'readability/streams',
- 'readability/function',
- ]
-
-# The default state of the category filter. This is overridden by the --filter=
-# flag. By default all errors are on, so only add here categories that should be
-# off by default (i.e., categories that must be enabled by the --filter= flags).
-# All entries here should start with a '-' or '+', as in the --filter= flag.
-_DEFAULT_FILTERS = ['-build/include_alpha']
-
-# The default list of categories suppressed for C (not C++) files.
-_DEFAULT_C_SUPPRESSED_CATEGORIES = [
- 'readability/casting',
- ]
-
-# The default list of categories suppressed for Linux Kernel files.
-_DEFAULT_KERNEL_SUPPRESSED_CATEGORIES = [
- 'whitespace/tab',
- ]
-
-# We used to check for high-bit characters, but after much discussion we
-# decided those were OK, as long as they were in UTF-8 and didn't represent
-# hard-coded international strings, which belong in a separate i18n file.
-
-# C++ headers
-_CPP_HEADERS = frozenset([
- # Legacy
- 'algobase.h',
- 'algo.h',
- 'alloc.h',
- 'builtinbuf.h',
- 'bvector.h',
- 'complex.h',
- 'defalloc.h',
- 'deque.h',
- 'editbuf.h',
- 'fstream.h',
- 'function.h',
- 'hash_map',
- 'hash_map.h',
- 'hash_set',
- 'hash_set.h',
- 'hashtable.h',
- 'heap.h',
- 'indstream.h',
- 'iomanip.h',
- 'iostream.h',
- 'istream.h',
- 'iterator.h',
- 'list.h',
- 'map.h',
- 'multimap.h',
- 'multiset.h',
- 'ostream.h',
- 'pair.h',
- 'parsestream.h',
- 'pfstream.h',
- 'procbuf.h',
- 'pthread_alloc',
- 'pthread_alloc.h',
- 'rope',
- 'rope.h',
- 'ropeimpl.h',
- 'set.h',
- 'slist',
- 'slist.h',
- 'stack.h',
- 'stdiostream.h',
- 'stl_alloc.h',
- 'stl_relops.h',
- 'streambuf.h',
- 'stream.h',
- 'strfile.h',
- 'strstream.h',
- 'tempbuf.h',
- 'tree.h',
- 'type_traits.h',
- 'vector.h',
- # 17.6.1.2 C++ library headers
- 'algorithm',
- 'array',
- 'atomic',
- 'bitset',
- 'chrono',
- 'codecvt',
- 'complex',
- 'condition_variable',
- 'deque',
- 'exception',
- 'forward_list',
- 'fstream',
- 'functional',
- 'future',
- 'initializer_list',
- 'iomanip',
- 'ios',
- 'iosfwd',
- 'iostream',
- 'istream',
- 'iterator',
- 'limits',
- 'list',
- 'locale',
- 'map',
- 'memory',
- 'mutex',
- 'new',
- 'numeric',
- 'ostream',
- 'queue',
- 'random',
- 'ratio',
- 'regex',
- 'scoped_allocator',
- 'set',
- 'sstream',
- 'stack',
- 'stdexcept',
- 'streambuf',
- 'string',
- 'strstream',
- 'system_error',
- 'thread',
- 'tuple',
- 'typeindex',
- 'typeinfo',
- 'type_traits',
- 'unordered_map',
- 'unordered_set',
- 'utility',
- 'valarray',
- 'vector',
- # 17.6.1.2 C++14 headers
- 'shared_mutex',
- # 17.6.1.2 C++17 headers
- 'any',
- 'charconv',
- 'codecvt',
- 'execution',
- 'filesystem',
- 'memory_resource',
- 'optional',
- 'string_view',
- 'variant',
- # 17.6.1.2 C++ headers for C library facilities
- 'cassert',
- 'ccomplex',
- 'cctype',
- 'cerrno',
- 'cfenv',
- 'cfloat',
- 'cinttypes',
- 'ciso646',
- 'climits',
- 'clocale',
- 'cmath',
- 'csetjmp',
- 'csignal',
- 'cstdalign',
- 'cstdarg',
- 'cstdbool',
- # 'cstddef', https://github.com/firebase/firebase-ios-sdk/pull/7563
- 'cstdint',
- 'cstdio',
- # 'cstdlib', https://github.com/firebase/firebase-ios-sdk/pull/7563
- 'cstring',
- 'ctgmath',
- 'ctime',
- 'cuchar',
- 'cwchar',
- 'cwctype',
- ])
-
-# C headers
-_C_HEADERS = frozenset([
- # System C headers
- 'assert.h',
- 'complex.h',
- 'ctype.h',
- 'errno.h',
- 'fenv.h',
- 'float.h',
- 'inttypes.h',
- 'iso646.h',
- 'limits.h',
- 'locale.h',
- 'math.h',
- 'setjmp.h',
- 'signal.h',
- 'stdalign.h',
- 'stdarg.h',
- 'stdatomic.h',
- 'stdbool.h',
- 'stddef.h',
- 'stdint.h',
- 'stdio.h',
- 'stdlib.h',
- 'stdnoreturn.h',
- 'string.h',
- 'tgmath.h',
- 'threads.h',
- 'time.h',
- 'uchar.h',
- 'wchar.h',
- 'wctype.h',
- # additional POSIX C headers
- 'aio.h',
- 'arpa/inet.h',
- 'cpio.h',
- 'dirent.h',
- 'dlfcn.h',
- 'fcntl.h',
- 'fmtmsg.h',
- 'fnmatch.h',
- 'ftw.h',
- 'glob.h',
- 'grp.h',
- 'iconv.h',
- 'langinfo.h',
- 'libgen.h',
- 'monetary.h',
- 'mqueue.h',
- 'ndbm.h',
- 'net/if.h',
- 'netdb.h',
- 'netinet/in.h',
- 'netinet/tcp.h',
- 'nl_types.h',
- 'poll.h',
- 'pthread.h',
- 'pwd.h',
- 'regex.h',
- 'sched.h',
- 'search.h',
- 'semaphore.h',
- 'setjmp.h',
- 'signal.h',
- 'spawn.h',
- 'strings.h',
- 'stropts.h',
- 'syslog.h',
- 'tar.h',
- 'termios.h',
- 'trace.h',
- 'ulimit.h',
- 'unistd.h',
- 'utime.h',
- 'utmpx.h',
- 'wordexp.h',
- # additional GNUlib headers
- 'a.out.h',
- 'aliases.h',
- 'alloca.h',
- 'ar.h',
- 'argp.h',
- 'argz.h',
- 'byteswap.h',
- 'crypt.h',
- 'endian.h',
- 'envz.h',
- 'err.h',
- 'error.h',
- 'execinfo.h',
- 'fpu_control.h',
- 'fstab.h',
- 'fts.h',
- 'getopt.h',
- 'gshadow.h',
- 'ieee754.h',
- 'ifaddrs.h',
- 'libintl.h',
- 'mcheck.h',
- 'mntent.h',
- 'obstack.h',
- 'paths.h',
- 'printf.h',
- 'pty.h',
- 'resolv.h',
- 'shadow.h',
- 'sysexits.h',
- 'ttyent.h',
- # Additional linux glibc headers
- 'dlfcn.h',
- 'elf.h',
- 'features.h',
- 'gconv.h',
- 'gnu-versions.h',
- 'lastlog.h',
- 'libio.h',
- 'link.h',
- 'malloc.h',
- 'memory.h',
- 'netash/ash.h',
- 'netatalk/at.h',
- 'netax25/ax25.h',
- 'neteconet/ec.h',
- 'netipx/ipx.h',
- 'netiucv/iucv.h',
- 'netpacket/packet.h',
- 'netrom/netrom.h',
- 'netrose/rose.h',
- 'nfs/nfs.h',
- 'nl_types.h',
- 'nss.h',
- 're_comp.h',
- 'regexp.h',
- 'sched.h',
- 'sgtty.h',
- 'stab.h',
- 'stdc-predef.h',
- 'stdio_ext.h',
- 'syscall.h',
- 'termio.h',
- 'thread_db.h',
- 'ucontext.h',
- 'ustat.h',
- 'utmp.h',
- 'values.h',
- 'wait.h',
- 'xlocale.h',
- # Hardware specific headers
- 'arm_neon.h',
- 'emmintrin.h',
- 'xmmintin.h',
- ])
-
-# Folders of C libraries so commonly used in C++,
-# that they have parity with standard C libraries.
-C_STANDARD_HEADER_FOLDERS = frozenset([
- # standard C library
- "sys",
- # glibc for linux
- "arpa",
- "asm-generic",
- "bits",
- "gnu",
- "net",
- "netinet",
- "protocols",
- "rpc",
- "rpcsvc",
- "scsi",
- # linux kernel header
- "drm",
- "linux",
- "misc",
- "mtd",
- "rdma",
- "sound",
- "video",
- "xen",
- ])
-
-_C_SYSTEM_DIRECTORIES = frozenset([
- 'SystemConfiguration',
- 'dispatch',
- 'libkern',
- 'mach',
- 'netinet',
- 'objc',
- 'sys',
-])
-
-# Type names
-_TYPES = re.compile(
- r'^(?:'
- # [dcl.type.simple]
- r'(char(16_t|32_t)?)|wchar_t|'
- r'bool|short|int|long|signed|unsigned|float|double|'
- # [support.types]
- r'(ptrdiff_t|size_t|max_align_t|nullptr_t)|'
- # [cstdint.syn]
- r'(u?int(_fast|_least)?(8|16|32|64)_t)|'
- r'(u?int(max|ptr)_t)|'
- r')$')
-
-
-# These headers are excluded from [build/include] and [build/include_order]
-# checks:
-# - Anything not following google file name conventions (containing an
-# uppercase character, such as Python.h or nsStringAPI.h, for example).
-# - Lua headers.
-_THIRD_PARTY_HEADERS_PATTERN = re.compile(
- r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
-
-# Pattern for matching FileInfo.BaseName() against test file name
-_test_suffixes = ['_test', '_regtest', '_unittest']
-_TEST_FILE_SUFFIX = '(' + '|'.join(_test_suffixes) + r')$'
-
-# Pattern that matches only complete whitespace, possibly across multiple lines.
-_EMPTY_CONDITIONAL_BODY_PATTERN = re.compile(r'^\s*$', re.DOTALL)
-
-# Assertion macros. These are defined in base/logging.h and
-# testing/base/public/gunit.h.
-_CHECK_MACROS = [
- 'DCHECK', 'CHECK',
- 'EXPECT_TRUE', 'ASSERT_TRUE',
- 'EXPECT_FALSE', 'ASSERT_FALSE',
- ]
-
-# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
-_CHECK_REPLACEMENT = dict([(macro_var, {}) for macro_var in _CHECK_MACROS])
-
-for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
- ('>=', 'GE'), ('>', 'GT'),
- ('<=', 'LE'), ('<', 'LT')]:
- _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
- _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
- _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
- _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
-
-for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
- ('>=', 'LT'), ('>', 'LE'),
- ('<=', 'GT'), ('<', 'GE')]:
- _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
- _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
-
-# Alternative tokens and their replacements. For full list, see section 2.5
-# Alternative tokens [lex.digraph] in the C++ standard.
-#
-# Digraphs (such as '%:') are not included here since it's a mess to
-# match those on a word boundary.
-_ALT_TOKEN_REPLACEMENT = {
- 'and': '&&',
- 'bitor': '|',
- 'or': '||',
- 'xor': '^',
- 'compl': '~',
- 'bitand': '&',
- 'and_eq': '&=',
- 'or_eq': '|=',
- 'xor_eq': '^=',
- 'not': '!',
- 'not_eq': '!='
- }
-
-# Compile regular expression that matches all the above keywords. The "[ =()]"
-# bit is meant to avoid matching these keywords outside of boolean expressions.
-#
-# False positives include C-style multi-line comments and multi-line strings
-# but those have always been troublesome for cpplint.
-_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
- r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
-
-
-# These constants define types of headers for use with
-# _IncludeState.CheckNextIncludeOrder().
-_C_SYS_HEADER = 1
-_CPP_SYS_HEADER = 2
-_OTHER_SYS_HEADER = 3
-_LIKELY_MY_HEADER = 4
-_POSSIBLE_MY_HEADER = 5
-_OTHER_HEADER = 6
-
-# These constants define the current inline assembly state
-_NO_ASM = 0 # Outside of inline assembly block
-_INSIDE_ASM = 1 # Inside inline assembly block
-_END_ASM = 2 # Last line of inline assembly block
-_BLOCK_ASM = 3 # The whole block is an inline assembly block
-
-# Match start of assembly blocks
-_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
- r'(?:\s+(volatile|__volatile__))?'
- r'\s*[{(]')
-
-# Match strings that indicate we're working on a C (not C++) file.
-_SEARCH_C_FILE = re.compile(r'\b(?:LINT_C_FILE|'
- r'vim?:\s*.*(\s*|:)filetype=c(\s*|:|$))')
-
-# Match string that indicates we're working on a Linux Kernel file.
-_SEARCH_KERNEL_FILE = re.compile(r'\b(?:LINT_KERNEL_FILE)')
-
-# Commands for sed to fix the problem
-_SED_FIXUPS = {
- 'Remove spaces around =': r's/ = /=/',
- 'Remove spaces around !=': r's/ != /!=/',
- 'Remove space before ( in if (': r's/if (/if(/',
- 'Remove space before ( in for (': r's/for (/for(/',
- 'Remove space before ( in while (': r's/while (/while(/',
- 'Remove space before ( in switch (': r's/switch (/switch(/',
- 'Should have a space between // and comment': r's/\/\//\/\/ /',
- 'Missing space before {': r's/\([^ ]\){/\1 {/',
- 'Tab found, replace by spaces': r's/\t/ /g',
- 'Line ends in whitespace. Consider deleting these extra spaces.': r's/\s*$//',
- 'You don\'t need a ; after a }': r's/};/}/',
- 'Missing space after ,': r's/,\([^ ]\)/, \1/g',
-}
-
-_regexp_compile_cache = {}
-
-# {str, set(int)}: a map from error categories to sets of linenumbers
-# on which those errors are expected and should be suppressed.
-_error_suppressions = {}
-
-# The root directory used for deriving header guard CPP variable.
-# This is set by --root flag.
-_root = None
-_root_debug = False
-
-# The top level repository directory. If set, _root is calculated relative to
-# this directory instead of the directory containing version control artifacts.
-# This is set by the --repository flag.
-_repository = None
-
-# Files to exclude from linting. This is set by the --exclude flag.
-_excludes = None
-
-# Whether to supress all PrintInfo messages, UNRELATED to --quiet flag
-_quiet = False
-
-# The allowed line length of files.
-# This is set by --linelength flag.
-_line_length = 80
-
-# This allows to use different include order rule than default
-_include_order = "default"
-
-try:
- unicode
-except NameError:
- # -- pylint: disable=redefined-builtin
- basestring = unicode = str
-
-try:
- long
-except NameError:
- # -- pylint: disable=redefined-builtin
- long = int
-
-if sys.version_info < (3,):
- # -- pylint: disable=no-member
- # BINARY_TYPE = str
- itervalues = dict.itervalues
- iteritems = dict.iteritems
-else:
- # BINARY_TYPE = bytes
- itervalues = dict.values
- iteritems = dict.items
-
-def unicode_escape_decode(x):
- if sys.version_info < (3,):
- return codecs.unicode_escape_decode(x)[0]
- else:
- return x
-
-# Treat all headers starting with 'h' equally: .h, .hpp, .hxx etc.
-# This is set by --headers flag.
-_hpp_headers = set([])
-
-# {str, bool}: a map from error categories to booleans which indicate if the
-# category should be suppressed for every line.
-_global_error_suppressions = {}
-
-def ProcessHppHeadersOption(val):
- global _hpp_headers
- try:
- _hpp_headers = {ext.strip() for ext in val.split(',')}
- except ValueError:
- PrintUsage('Header extensions must be comma separated list.')
-
-def ProcessIncludeOrderOption(val):
- if val is None or val == "default":
- pass
- elif val == "standardcfirst":
- global _include_order
- _include_order = val
- else:
- PrintUsage('Invalid includeorder value %s. Expected default|standardcfirst')
-
-def IsHeaderExtension(file_extension):
- return file_extension in GetHeaderExtensions()
-
-def GetHeaderExtensions():
- if _hpp_headers:
- return _hpp_headers
- if _valid_extensions:
- return {h for h in _valid_extensions if 'h' in h}
- return set(['h', 'hh', 'hpp', 'hxx', 'h++', 'cuh'])
-
-# The allowed extensions for file names
-# This is set by --extensions flag
-def GetAllExtensions():
- return GetHeaderExtensions().union(_valid_extensions or set(
- ['c', 'cc', 'cpp', 'cxx', 'c++', 'cu', 'm', 'mm']))
-
-def ProcessExtensionsOption(val):
- global _valid_extensions
- try:
- extensions = [ext.strip() for ext in val.split(',')]
- _valid_extensions = set(extensions)
- except ValueError:
- PrintUsage('Extensions should be a comma-separated list of values;'
- 'for example: extensions=hpp,cpp\n'
- 'This could not be parsed: "%s"' % (val,))
-
-def GetNonHeaderExtensions():
- return GetAllExtensions().difference(GetHeaderExtensions())
-
-def ParseNolintSuppressions(filename, raw_line, linenum, error):
- """Updates the global list of line error-suppressions.
-
- Parses any NOLINT comments on the current line, updating the global
- error_suppressions store. Reports an error if the NOLINT comment
- was malformed.
-
- Args:
- filename: str, the name of the input file.
- raw_line: str, the line of input text, with comments.
- linenum: int, the number of the current line.
- error: function, an error handler.
- """
- matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
- if matched:
- if matched.group(1):
- suppressed_line = linenum + 1
- else:
- suppressed_line = linenum
- category = matched.group(2)
- if category in (None, '(*)'): # => "suppress all"
- _error_suppressions.setdefault(None, set()).add(suppressed_line)
- else:
- if category.startswith('(') and category.endswith(')'):
- category = category[1:-1]
- if category in _ERROR_CATEGORIES:
- _error_suppressions.setdefault(category, set()).add(suppressed_line)
- elif category not in _LEGACY_ERROR_CATEGORIES:
- error(filename, linenum, 'readability/nolint', 5,
- 'Unknown NOLINT error category: %s' % category)
-
-
-def ProcessGlobalSuppresions(lines):
- """Updates the list of global error suppressions.
-
- Parses any lint directives in the file that have global effect.
-
- Args:
- lines: An array of strings, each representing a line of the file, with the
- last element being empty if the file is terminated with a newline.
- """
- for line in lines:
- if _SEARCH_C_FILE.search(line):
- for category in _DEFAULT_C_SUPPRESSED_CATEGORIES:
- _global_error_suppressions[category] = True
- if _SEARCH_KERNEL_FILE.search(line):
- for category in _DEFAULT_KERNEL_SUPPRESSED_CATEGORIES:
- _global_error_suppressions[category] = True
-
-
-def ResetNolintSuppressions():
- """Resets the set of NOLINT suppressions to empty."""
- _error_suppressions.clear()
- _global_error_suppressions.clear()
-
-
-def IsErrorSuppressedByNolint(category, linenum):
- """Returns true if the specified error category is suppressed on this line.
-
- Consults the global error_suppressions map populated by
- ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions.
-
- Args:
- category: str, the category of the error.
- linenum: int, the current line number.
- Returns:
- bool, True iff the error should be suppressed due to a NOLINT comment or
- global suppression.
- """
- return (_global_error_suppressions.get(category, False) or
- linenum in _error_suppressions.get(category, set()) or
- linenum in _error_suppressions.get(None, set()))
-
-
-def Match(pattern, s):
- """Matches the string with the pattern, caching the compiled regexp."""
- # The regexp compilation caching is inlined in both Match and Search for
- # performance reasons; factoring it out into a separate function turns out
- # to be noticeably expensive.
- if pattern not in _regexp_compile_cache:
- _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
- return _regexp_compile_cache[pattern].match(s)
-
-
-def ReplaceAll(pattern, rep, s):
- """Replaces instances of pattern in a string with a replacement.
-
- The compiled regex is kept in a cache shared by Match and Search.
-
- Args:
- pattern: regex pattern
- rep: replacement text
- s: search string
-
- Returns:
- string with replacements made (or original string if no replacements)
- """
- if pattern not in _regexp_compile_cache:
- _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
- return _regexp_compile_cache[pattern].sub(rep, s)
-
-
-def Search(pattern, s):
- """Searches the string for the pattern, caching the compiled regexp."""
- if pattern not in _regexp_compile_cache:
- _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
- return _regexp_compile_cache[pattern].search(s)
-
-
-def _IsSourceExtension(s):
- """File extension (excluding dot) matches a source file extension."""
- return s in GetNonHeaderExtensions()
-
-
-def _IsSourceFilename(filename):
- ext = os.path.splitext(filename)[-1].lower()
- ext = ext[1:] # leading dot
- return _IsSourceExtension(ext)
-
-
-class _IncludeState(object):
- """Tracks line numbers for includes, and the order in which includes appear.
-
- include_list contains list of lists of (header, line number) pairs.
- It's a lists of lists rather than just one flat list to make it
- easier to update across preprocessor boundaries.
-
- Call CheckNextIncludeOrder() once for each header in the file, passing
- in the type constants defined above. Calls in an illegal order will
- raise an _IncludeError with an appropriate error message.
-
- """
- # self._section will move monotonically through this set. If it ever
- # needs to move backwards, CheckNextIncludeOrder will raise an error.
- _INITIAL_SECTION = 0
- _MY_H_SECTION = 1
- _C_SECTION = 2
- _CPP_SECTION = 3
- _OTHER_SYS_SECTION = 4
- _OTHER_H_SECTION = 5
-
- _TYPE_NAMES = {
- _C_SYS_HEADER: 'C system header',
- _CPP_SYS_HEADER: 'C++ system header',
- _OTHER_SYS_HEADER: 'other system header',
- _LIKELY_MY_HEADER: 'header this file implements',
- _POSSIBLE_MY_HEADER: 'header this file may implement',
- _OTHER_HEADER: 'other header',
- }
- _SECTION_NAMES = {
- _INITIAL_SECTION: "... nothing. (This can't be an error.)",
- _MY_H_SECTION: 'a header this file implements',
- _C_SECTION: 'C system header',
- _CPP_SECTION: 'C++ system header',
- _OTHER_SYS_SECTION: 'other system header',
- _OTHER_H_SECTION: 'other header',
- }
-
- def __init__(self):
- self.include_list = [[]]
- self._section = None
- self._last_header = None
- self.ResetSection('')
-
- def FindHeader(self, header):
- """Check if a header has already been included.
-
- Args:
- header: header to check.
- Returns:
- Line number of previous occurrence, or -1 if the header has not
- been seen before.
- """
- for section_list in self.include_list:
- for f in section_list:
- if f[0] == header:
- return f[1]
- return -1
-
- def ResetSection(self, directive):
- """Reset section checking for preprocessor directive.
-
- Args:
- directive: preprocessor directive (e.g. "if", "else").
- """
- # The name of the current section.
- self._section = self._INITIAL_SECTION
- # The path of last found header.
- self._last_header = ''
-
- # Update list of includes. Note that we never pop from the
- # include list.
- if directive in ('if', 'ifdef', 'ifndef'):
- self.include_list.append([])
- elif directive in ('else', 'elif'):
- self.include_list[-1] = []
-
- def SetLastHeader(self, header_path):
- self._last_header = header_path
-
- def CanonicalizeAlphabeticalOrder(self, header_path):
- """Returns a path canonicalized for alphabetical comparison.
-
- - replaces "-" with "_" so they both cmp the same.
- - removes '-inl' since we don't require them to be after the main header.
- - lowercase everything, just in case.
-
- Args:
- header_path: Path to be canonicalized.
-
- Returns:
- Canonicalized path.
- """
- return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
-
- def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
- """Check if a header is in alphabetical order with the previous header.
-
- Args:
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- header_path: Canonicalized header to be checked.
-
- Returns:
- Returns true if the header is in alphabetical order.
- """
- # If previous section is different from current section, _last_header will
- # be reset to empty string, so it's always less than current header.
- #
- # If previous line was a blank line, assume that the headers are
- # intentionally sorted the way they are.
- if (self._last_header > header_path and
- Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
- return False
- return True
-
- def CheckNextIncludeOrder(self, header_type):
- """Returns a non-empty error message if the next header is out of order.
-
- This function also updates the internal state to be ready to check
- the next include.
-
- Args:
- header_type: One of the _XXX_HEADER constants defined above.
-
- Returns:
- The empty string if the header is in the right order, or an
- error message describing what's wrong.
-
- """
- error_message = ('Found %s after %s' %
- (self._TYPE_NAMES[header_type],
- self._SECTION_NAMES[self._section]))
-
- last_section = self._section
-
- if header_type == _C_SYS_HEADER:
- if self._section <= self._C_SECTION:
- self._section = self._C_SECTION
- else:
- self._last_header = ''
- return error_message
- elif header_type == _CPP_SYS_HEADER:
- if self._section <= self._CPP_SECTION:
- self._section = self._CPP_SECTION
- else:
- self._last_header = ''
- return error_message
- elif header_type == _OTHER_SYS_HEADER:
- if self._section <= self._OTHER_SYS_SECTION:
- self._section = self._OTHER_SYS_SECTION
- else:
- self._last_header = ''
- return error_message
- elif header_type == _LIKELY_MY_HEADER:
- if self._section <= self._MY_H_SECTION:
- self._section = self._MY_H_SECTION
- else:
- self._section = self._OTHER_H_SECTION
- elif header_type == _POSSIBLE_MY_HEADER:
- if self._section <= self._MY_H_SECTION:
- self._section = self._MY_H_SECTION
- else:
- # This will always be the fallback because we're not sure
- # enough that the header is associated with this file.
- self._section = self._OTHER_H_SECTION
- else:
- assert header_type == _OTHER_HEADER
- self._section = self._OTHER_H_SECTION
-
- if last_section != self._section:
- self._last_header = ''
-
- return ''
-
-
-class _CppLintState(object):
- """Maintains module-wide state.."""
-
- def __init__(self):
- self.verbose_level = 1 # global setting.
- self.error_count = 0 # global count of reported errors
- # filters to apply when emitting error messages
- self.filters = _DEFAULT_FILTERS[:]
- # backup of filter list. Used to restore the state after each file.
- self._filters_backup = self.filters[:]
- self.counting = 'total' # In what way are we counting errors?
- self.errors_by_category = {} # string to int dict storing error counts
- self.quiet = False # Suppress non-error messagess?
-
- # output format:
- # "emacs" - format that emacs can parse (default)
- # "eclipse" - format that eclipse can parse
- # "vs7" - format that Microsoft Visual Studio 7 can parse
- # "junit" - format that Jenkins, Bamboo, etc can parse
- # "sed" - returns a gnu sed command to fix the problem
- # "gsed" - like sed, but names the command gsed, e.g. for macOS homebrew users
- self.output_format = 'emacs'
-
- # For JUnit output, save errors and failures until the end so that they
- # can be written into the XML
- self._junit_errors = []
- self._junit_failures = []
-
- def SetOutputFormat(self, output_format):
- """Sets the output format for errors."""
- self.output_format = output_format
-
- def SetQuiet(self, quiet):
- """Sets the module's quiet settings, and returns the previous setting."""
- last_quiet = self.quiet
- self.quiet = quiet
- return last_quiet
-
- def SetVerboseLevel(self, level):
- """Sets the module's verbosity, and returns the previous setting."""
- last_verbose_level = self.verbose_level
- self.verbose_level = level
- return last_verbose_level
-
- def SetCountingStyle(self, counting_style):
- """Sets the module's counting options."""
- self.counting = counting_style
-
- def SetFilters(self, filters):
- """Sets the error-message filters.
-
- These filters are applied when deciding whether to emit a given
- error message.
-
- Args:
- filters: A string of comma-separated filters (eg "+whitespace/indent").
- Each filter should start with + or -; else we die.
-
- Raises:
- ValueError: The comma-separated filters did not all start with '+' or '-'.
- E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
- """
- # Default filters always have less priority than the flag ones.
- self.filters = _DEFAULT_FILTERS[:]
- self.AddFilters(filters)
-
- def AddFilters(self, filters):
- """ Adds more filters to the existing list of error-message filters. """
- for filt in filters.split(','):
- clean_filt = filt.strip()
- if clean_filt:
- self.filters.append(clean_filt)
- for filt in self.filters:
- if not (filt.startswith('+') or filt.startswith('-')):
- raise ValueError('Every filter in --filters must start with + or -'
- ' (%s does not)' % filt)
-
- def BackupFilters(self):
- """ Saves the current filter list to backup storage."""
- self._filters_backup = self.filters[:]
-
- def RestoreFilters(self):
- """ Restores filters previously backed up."""
- self.filters = self._filters_backup[:]
-
- def ResetErrorCounts(self):
- """Sets the module's error statistic back to zero."""
- self.error_count = 0
- self.errors_by_category = {}
-
- def IncrementErrorCount(self, category):
- """Bumps the module's error statistic."""
- self.error_count += 1
- if self.counting in ('toplevel', 'detailed'):
- if self.counting != 'detailed':
- category = category.split('/')[0]
- if category not in self.errors_by_category:
- self.errors_by_category[category] = 0
- self.errors_by_category[category] += 1
-
- def PrintErrorCounts(self):
- """Print a summary of errors by category, and the total."""
- for category, count in sorted(iteritems(self.errors_by_category)):
- self.PrintInfo('Category \'%s\' errors found: %d\n' %
- (category, count))
- if self.error_count > 0:
- self.PrintInfo('Total errors found: %d\n' % self.error_count)
-
- def PrintInfo(self, message):
- # _quiet does not represent --quiet flag.
- # Hide infos from stdout to keep stdout pure for machine consumption
- if not _quiet and self.output_format not in _MACHINE_OUTPUTS:
- sys.stdout.write(message)
-
- def PrintError(self, message):
- if self.output_format == 'junit':
- self._junit_errors.append(message)
- else:
- sys.stderr.write(message)
-
- def AddJUnitFailure(self, filename, linenum, message, category, confidence):
- self._junit_failures.append((filename, linenum, message, category,
- confidence))
-
- def FormatJUnitXML(self):
- num_errors = len(self._junit_errors)
- num_failures = len(self._junit_failures)
-
- testsuite = xml.etree.ElementTree.Element('testsuite')
- testsuite.attrib['errors'] = str(num_errors)
- testsuite.attrib['failures'] = str(num_failures)
- testsuite.attrib['name'] = 'cpplint'
-
- if num_errors == 0 and num_failures == 0:
- testsuite.attrib['tests'] = str(1)
- xml.etree.ElementTree.SubElement(testsuite, 'testcase', name='passed')
-
- else:
- testsuite.attrib['tests'] = str(num_errors + num_failures)
- if num_errors > 0:
- testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
- testcase.attrib['name'] = 'errors'
- error = xml.etree.ElementTree.SubElement(testcase, 'error')
- error.text = '\n'.join(self._junit_errors)
- if num_failures > 0:
- # Group failures by file
- failed_file_order = []
- failures_by_file = {}
- for failure in self._junit_failures:
- failed_file = failure[0]
- if failed_file not in failed_file_order:
- failed_file_order.append(failed_file)
- failures_by_file[failed_file] = []
- failures_by_file[failed_file].append(failure)
- # Create a testcase for each file
- for failed_file in failed_file_order:
- failures = failures_by_file[failed_file]
- testcase = xml.etree.ElementTree.SubElement(testsuite, 'testcase')
- testcase.attrib['name'] = failed_file
- failure = xml.etree.ElementTree.SubElement(testcase, 'failure')
- template = '{0}: {1} [{2}] [{3}]'
- texts = [template.format(f[1], f[2], f[3], f[4]) for f in failures]
- failure.text = '\n'.join(texts)
-
- xml_decl = '\n'
- return xml_decl + xml.etree.ElementTree.tostring(testsuite, 'utf-8').decode('utf-8')
-
-
-_cpplint_state = _CppLintState()
-
-
-def _OutputFormat():
- """Gets the module's output format."""
- return _cpplint_state.output_format
-
-
-def _SetOutputFormat(output_format):
- """Sets the module's output format."""
- _cpplint_state.SetOutputFormat(output_format)
-
-def _Quiet():
- """Return's the module's quiet setting."""
- return _cpplint_state.quiet
-
-def _SetQuiet(quiet):
- """Set the module's quiet status, and return previous setting."""
- return _cpplint_state.SetQuiet(quiet)
-
-
-def _VerboseLevel():
- """Returns the module's verbosity setting."""
- return _cpplint_state.verbose_level
-
-
-def _SetVerboseLevel(level):
- """Sets the module's verbosity, and returns the previous setting."""
- return _cpplint_state.SetVerboseLevel(level)
-
-
-def _SetCountingStyle(level):
- """Sets the module's counting options."""
- _cpplint_state.SetCountingStyle(level)
-
-
-def _Filters():
- """Returns the module's list of output filters, as a list."""
- return _cpplint_state.filters
-
-
-def _SetFilters(filters):
- """Sets the module's error-message filters.
-
- These filters are applied when deciding whether to emit a given
- error message.
-
- Args:
- filters: A string of comma-separated filters (eg "whitespace/indent").
- Each filter should start with + or -; else we die.
- """
- _cpplint_state.SetFilters(filters)
-
-def _AddFilters(filters):
- """Adds more filter overrides.
-
- Unlike _SetFilters, this function does not reset the current list of filters
- available.
-
- Args:
- filters: A string of comma-separated filters (eg "whitespace/indent").
- Each filter should start with + or -; else we die.
- """
- _cpplint_state.AddFilters(filters)
-
-def _BackupFilters():
- """ Saves the current filter list to backup storage."""
- _cpplint_state.BackupFilters()
-
-def _RestoreFilters():
- """ Restores filters previously backed up."""
- _cpplint_state.RestoreFilters()
-
-class _FunctionState(object):
- """Tracks current function name and the number of lines in its body."""
-
- _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
- _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
-
- def __init__(self):
- self.in_a_function = False
- self.lines_in_function = 0
- self.current_function = ''
-
- def Begin(self, function_name):
- """Start analyzing function body.
-
- Args:
- function_name: The name of the function being tracked.
- """
- self.in_a_function = True
- self.lines_in_function = 0
- self.current_function = function_name
-
- def Count(self):
- """Count line in current function body."""
- if self.in_a_function:
- self.lines_in_function += 1
-
- def Check(self, error, filename, linenum):
- """Report if too many lines in function body.
-
- Args:
- error: The function to call with any errors found.
- filename: The name of the current file.
- linenum: The number of the line to check.
- """
- if not self.in_a_function:
- return
-
- if Match(r'T(EST|est)', self.current_function):
- base_trigger = self._TEST_TRIGGER
- else:
- base_trigger = self._NORMAL_TRIGGER
- trigger = base_trigger * 2**_VerboseLevel()
-
- if self.lines_in_function > trigger:
- error_level = int(math.log(self.lines_in_function / base_trigger, 2))
- # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
- if error_level > 5:
- error_level = 5
- error(filename, linenum, 'readability/fn_size', error_level,
- 'Small and focused functions are preferred:'
- ' %s has %d non-comment lines'
- ' (error triggered by exceeding %d lines).' % (
- self.current_function, self.lines_in_function, trigger))
-
- def End(self):
- """Stop analyzing function body."""
- self.in_a_function = False
-
-
-class _IncludeError(Exception):
- """Indicates a problem with the include order in a file."""
- pass
-
-
-class FileInfo(object):
- """Provides utility functions for filenames.
-
- FileInfo provides easy access to the components of a file's path
- relative to the project root.
- """
-
- def __init__(self, filename):
- self._filename = filename
-
- def FullName(self):
- """Make Windows paths like Unix."""
- return os.path.abspath(self._filename).replace('\\', '/')
-
- def RepositoryName(self):
- r"""FullName after removing the local path to the repository.
-
- If we have a real absolute path name here we can try to do something smart:
- detecting the root of the checkout and truncating /path/to/checkout from
- the name so that we get header guards that don't include things like
- "C:\\Documents and Settings\\..." or "/home/username/..." in them and thus
- people on different computers who have checked the source out to different
- locations won't see bogus errors.
- """
- fullname = self.FullName()
-
- if os.path.exists(fullname):
- project_dir = os.path.dirname(fullname)
-
- # If the user specified a repository path, it exists, and the file is
- # contained in it, use the specified repository path
- if _repository:
- repo = FileInfo(_repository).FullName()
- root_dir = project_dir
- while os.path.exists(root_dir):
- # allow case insensitive compare on Windows
- if os.path.normcase(root_dir) == os.path.normcase(repo):
- return os.path.relpath(fullname, root_dir).replace('\\', '/')
- one_up_dir = os.path.dirname(root_dir)
- if one_up_dir == root_dir:
- break
- root_dir = one_up_dir
-
- if os.path.exists(os.path.join(project_dir, ".svn")):
- # If there's a .svn file in the current directory, we recursively look
- # up the directory tree for the top of the SVN checkout
- root_dir = project_dir
- one_up_dir = os.path.dirname(root_dir)
- while os.path.exists(os.path.join(one_up_dir, ".svn")):
- root_dir = os.path.dirname(root_dir)
- one_up_dir = os.path.dirname(one_up_dir)
-
- prefix = os.path.commonprefix([root_dir, project_dir])
- return fullname[len(prefix) + 1:]
-
- # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
- # searching up from the current path.
- root_dir = current_dir = os.path.dirname(fullname)
- while current_dir != os.path.dirname(current_dir):
- if (os.path.exists(os.path.join(current_dir, ".git")) or
- os.path.exists(os.path.join(current_dir, ".hg")) or
- os.path.exists(os.path.join(current_dir, ".svn"))):
- root_dir = current_dir
- current_dir = os.path.dirname(current_dir)
-
- if (os.path.exists(os.path.join(root_dir, ".git")) or
- os.path.exists(os.path.join(root_dir, ".hg")) or
- os.path.exists(os.path.join(root_dir, ".svn"))):
- prefix = os.path.commonprefix([root_dir, project_dir])
- return fullname[len(prefix) + 1:]
-
- # Don't know what to do; header guard warnings may be wrong...
- return fullname
-
- def Split(self):
- """Splits the file into the directory, basename, and extension.
-
- For 'chrome/browser/browser.cc', Split() would
- return ('chrome/browser', 'browser', '.cc')
-
- Returns:
- A tuple of (directory, basename, extension).
- """
-
- googlename = self.RepositoryName()
- project, rest = os.path.split(googlename)
- return (project,) + os.path.splitext(rest)
-
- def BaseName(self):
- """File base name - text after the final slash, before the final period."""
- return self.Split()[1]
-
- def Extension(self):
- """File extension - text following the final period, includes that period."""
- return self.Split()[2]
-
- def NoExtension(self):
- """File has no source file extension."""
- return '/'.join(self.Split()[0:2])
-
- def IsSource(self):
- """File has a source file extension."""
- return _IsSourceExtension(self.Extension()[1:])
-
-
-def _ShouldPrintError(category, confidence, linenum):
- """If confidence >= verbose, category passes filter and is not suppressed."""
-
- # There are three ways we might decide not to print an error message:
- # a "NOLINT(category)" comment appears in the source,
- # the verbosity level isn't high enough, or the filters filter it out.
- if IsErrorSuppressedByNolint(category, linenum):
- return False
-
- if confidence < _cpplint_state.verbose_level:
- return False
-
- is_filtered = False
- for one_filter in _Filters():
- if one_filter.startswith('-'):
- if category.startswith(one_filter[1:]):
- is_filtered = True
- elif one_filter.startswith('+'):
- if category.startswith(one_filter[1:]):
- is_filtered = False
- else:
- assert False # should have been checked for in SetFilter.
- if is_filtered:
- return False
-
- return True
-
-
-def Error(filename, linenum, category, confidence, message):
- """Logs the fact we've found a lint error.
-
- We log where the error was found, and also our confidence in the error,
- that is, how certain we are this is a legitimate style regression, and
- not a misidentification or a use that's sometimes justified.
-
- False positives can be suppressed by the use of
- "cpplint(category)" comments on the offending line. These are
- parsed into _error_suppressions.
-
- Args:
- filename: The name of the file containing the error.
- linenum: The number of the line containing the error.
- category: A string used to describe the "category" this bug
- falls under: "whitespace", say, or "runtime". Categories
- may have a hierarchy separated by slashes: "whitespace/indent".
- confidence: A number from 1-5 representing a confidence score for
- the error, with 5 meaning that we are certain of the problem,
- and 1 meaning that it could be a legitimate construct.
- message: The error message.
- """
- if _ShouldPrintError(category, confidence, linenum):
- _cpplint_state.IncrementErrorCount(category)
- if _cpplint_state.output_format == 'vs7':
- _cpplint_state.PrintError('%s(%s): error cpplint: [%s] %s [%d]\n' % (
- filename, linenum, category, message, confidence))
- elif _cpplint_state.output_format == 'eclipse':
- sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
- filename, linenum, message, category, confidence))
- elif _cpplint_state.output_format == 'junit':
- _cpplint_state.AddJUnitFailure(filename, linenum, message, category,
- confidence)
- elif _cpplint_state.output_format in ['sed', 'gsed']:
- if message in _SED_FIXUPS:
- sys.stdout.write(_cpplint_state.output_format + " -i '%s%s' %s # %s [%s] [%d]\n" % (
- linenum, _SED_FIXUPS[message], filename, message, category, confidence))
- else:
- sys.stderr.write('# %s:%s: "%s" [%s] [%d]\n' % (
- filename, linenum, message, category, confidence))
- else:
- final_message = '%s:%s: %s [%s] [%d]\n' % (
- filename, linenum, message, category, confidence)
- sys.stderr.write(final_message)
-
-# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
-_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
- r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
-# Match a single C style comment on the same line.
-_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
-# Matches multi-line C style comments.
-# This RE is a little bit more complicated than one might expect, because we
-# have to take care of space removals tools so we can handle comments inside
-# statements better.
-# The current rule is: We only clear spaces from both sides when we're at the
-# end of the line. Otherwise, we try to remove spaces from the right side,
-# if this doesn't work we try on left side but only if there's a non-character
-# on the right.
-_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
- r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
- _RE_PATTERN_C_COMMENTS + r'\s+|' +
- r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
- _RE_PATTERN_C_COMMENTS + r')')
-
-
-def IsCppString(line):
- """Does line terminate so, that the next symbol is in string constant.
-
- This function does not consider single-line nor multi-line comments.
-
- Args:
- line: is a partial line of code starting from the 0..n.
-
- Returns:
- True, if next character appended to 'line' is inside a
- string constant.
- """
-
- line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
- return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
-
-
-def CleanseRawStrings(raw_lines):
- """Removes C++11 raw strings from lines.
-
- Before:
- static const char kData[] = R"(
- multi-line string
- )";
-
- After:
- static const char kData[] = ""
- (replaced by blank line)
- "";
-
- Args:
- raw_lines: list of raw lines.
-
- Returns:
- list of lines with C++11 raw strings replaced by empty strings.
- """
-
- delimiter = None
- lines_without_raw_strings = []
- for line in raw_lines:
- if delimiter:
- # Inside a raw string, look for the end
- end = line.find(delimiter)
- if end >= 0:
- # Found the end of the string, match leading space for this
- # line and resume copying the original lines, and also insert
- # a "" on the last line.
- leading_space = Match(r'^(\s*)\S', line)
- line = leading_space.group(1) + '""' + line[end + len(delimiter):]
- delimiter = None
- else:
- # Haven't found the end yet, append a blank line.
- line = '""'
-
- # Look for beginning of a raw string, and replace them with
- # empty strings. This is done in a loop to handle multiple raw
- # strings on the same line.
- while delimiter is None:
- # Look for beginning of a raw string.
- # See 2.14.15 [lex.string] for syntax.
- #
- # Once we have matched a raw string, we check the prefix of the
- # line to make sure that the line is not part of a single line
- # comment. It's done this way because we remove raw strings
- # before removing comments as opposed to removing comments
- # before removing raw strings. This is because there are some
- # cpplint checks that requires the comments to be preserved, but
- # we don't want to check comments that are inside raw strings.
- matched = Match(r'^(.*?)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
- if (matched and
- not Match(r'^([^\'"]|\'(\\.|[^\'])*\'|"(\\.|[^"])*")*//',
- matched.group(1))):
- delimiter = ')' + matched.group(2) + '"'
-
- end = matched.group(3).find(delimiter)
- if end >= 0:
- # Raw string ended on same line
- line = (matched.group(1) + '""' +
- matched.group(3)[end + len(delimiter):])
- delimiter = None
- else:
- # Start of a multi-line raw string
- line = matched.group(1) + '""'
- else:
- break
-
- lines_without_raw_strings.append(line)
-
- # TODO(unknown): if delimiter is not None here, we might want to
- # emit a warning for unterminated string.
- return lines_without_raw_strings
-
-
-def FindNextMultiLineCommentStart(lines, lineix):
- """Find the beginning marker for a multiline comment."""
- while lineix < len(lines):
- if lines[lineix].strip().startswith('/*'):
- # Only return this marker if the comment goes beyond this line
- if lines[lineix].strip().find('*/', 2) < 0:
- return lineix
- lineix += 1
- return len(lines)
-
-
-def FindNextMultiLineCommentEnd(lines, lineix):
- """We are inside a comment, find the end marker."""
- while lineix < len(lines):
- if lines[lineix].strip().endswith('*/'):
- return lineix
- lineix += 1
- return len(lines)
-
-
-def RemoveMultiLineCommentsFromRange(lines, begin, end):
- """Clears a range of lines for multi-line comments."""
- # Having // dummy comments makes the lines non-empty, so we will not get
- # unnecessary blank line warnings later in the code.
- for i in range(begin, end):
- lines[i] = '/**/'
-
-
-def RemoveMultiLineComments(filename, lines, error):
- """Removes multiline (c-style) comments from lines."""
- lineix = 0
- while lineix < len(lines):
- lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
- if lineix_begin >= len(lines):
- return
- lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
- if lineix_end >= len(lines):
- error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
- 'Could not find end of multi-line comment')
- return
- RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
- lineix = lineix_end + 1
-
-
-def CleanseComments(line):
- """Removes //-comments and single-line C-style /* */ comments.
-
- Args:
- line: A line of C++ source.
-
- Returns:
- The line with single-line comments removed.
- """
- commentpos = line.find('//')
- if commentpos != -1 and not IsCppString(line[:commentpos]):
- line = line[:commentpos].rstrip()
- # get rid of /* ... */
- return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
-
-
-class CleansedLines(object):
- """Holds 4 copies of all lines with different preprocessing applied to them.
-
- 1) elided member contains lines without strings and comments.
- 2) lines member contains lines without comments.
- 3) raw_lines member contains all the lines without processing.
- 4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
- strings removed.
- All these members are of , and of the same length.
- """
-
- def __init__(self, lines):
- self.elided = []
- self.lines = []
- self.raw_lines = lines
- self.num_lines = len(lines)
- self.lines_without_raw_strings = CleanseRawStrings(lines)
- for linenum in range(len(self.lines_without_raw_strings)):
- self.lines.append(CleanseComments(
- self.lines_without_raw_strings[linenum]))
- elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
- self.elided.append(CleanseComments(elided))
-
- def NumLines(self):
- """Returns the number of lines represented."""
- return self.num_lines
-
- @staticmethod
- def _CollapseStrings(elided):
- """Collapses strings and chars on a line to simple "" or '' blocks.
-
- We nix strings first so we're not fooled by text like '"http://"'
-
- Args:
- elided: The line being processed.
-
- Returns:
- The line with collapsed strings.
- """
- if _RE_PATTERN_INCLUDE.match(elided):
- return elided
-
- # Remove escaped characters first to make quote/single quote collapsing
- # basic. Things that look like escaped characters shouldn't occur
- # outside of strings and chars.
- elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
-
- # Replace quoted strings and digit separators. Both single quotes
- # and double quotes are processed in the same loop, otherwise
- # nested quotes wouldn't work.
- collapsed = ''
- while True:
- # Find the first quote character
- match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
- if not match:
- collapsed += elided
- break
- head, quote, tail = match.groups()
-
- if quote == '"':
- # Collapse double quoted strings
- second_quote = tail.find('"')
- if second_quote >= 0:
- collapsed += head + '""'
- elided = tail[second_quote + 1:]
- else:
- # Unmatched double quote, don't bother processing the rest
- # of the line since this is probably a multiline string.
- collapsed += elided
- break
- else:
- # Found single quote, check nearby text to eliminate digit separators.
- #
- # There is no special handling for floating point here, because
- # the integer/fractional/exponent parts would all be parsed
- # correctly as long as there are digits on both sides of the
- # separator. So we are fine as long as we don't see something
- # like "0.'3" (gcc 4.9.0 will not allow this literal).
- if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
- match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
- collapsed += head + match_literal.group(1).replace("'", '')
- elided = match_literal.group(2)
- else:
- second_quote = tail.find('\'')
- if second_quote >= 0:
- collapsed += head + "''"
- elided = tail[second_quote + 1:]
- else:
- # Unmatched single quote
- collapsed += elided
- break
-
- return collapsed
-
-
-def FindEndOfExpressionInLine(line, startpos, stack):
- """Find the position just after the end of current parenthesized expression.
-
- Args:
- line: a CleansedLines line.
- startpos: start searching at this position.
- stack: nesting stack at startpos.
-
- Returns:
- On finding matching end: (index just after matching end, None)
- On finding an unclosed expression: (-1, None)
- Otherwise: (-1, new stack at end of this line)
- """
- for i in xrange(startpos, len(line)):
- char = line[i]
- if char in '([{':
- # Found start of parenthesized expression, push to expression stack
- stack.append(char)
- elif char == '<':
- # Found potential start of template argument list
- if i > 0 and line[i - 1] == '<':
- # Left shift operator
- if stack and stack[-1] == '<':
- stack.pop()
- if not stack:
- return (-1, None)
- elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
- # operator<, don't add to stack
- continue
- else:
- # Tentative start of template argument list
- stack.append('<')
- elif char in ')]}':
- # Found end of parenthesized expression.
- #
- # If we are currently expecting a matching '>', the pending '<'
- # must have been an operator. Remove them from expression stack.
- while stack and stack[-1] == '<':
- stack.pop()
- if not stack:
- return (-1, None)
- if ((stack[-1] == '(' and char == ')') or
- (stack[-1] == '[' and char == ']') or
- (stack[-1] == '{' and char == '}')):
- stack.pop()
- if not stack:
- return (i + 1, None)
- else:
- # Mismatched parentheses
- return (-1, None)
- elif char == '>':
- # Found potential end of template argument list.
-
- # Ignore "->" and operator functions
- if (i > 0 and
- (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
- continue
-
- # Pop the stack if there is a matching '<'. Otherwise, ignore
- # this '>' since it must be an operator.
- if stack:
- if stack[-1] == '<':
- stack.pop()
- if not stack:
- return (i + 1, None)
- elif char == ';':
- # Found something that look like end of statements. If we are currently
- # expecting a '>', the matching '<' must have been an operator, since
- # template argument list should not contain statements.
- while stack and stack[-1] == '<':
- stack.pop()
- if not stack:
- return (-1, None)
-
- # Did not find end of expression or unbalanced parentheses on this line
- return (-1, stack)
-
-
-def CloseExpression(clean_lines, linenum, pos):
- """If input points to ( or { or [ or <, finds the position that closes it.
-
- If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
- linenum/pos that correspond to the closing of the expression.
-
- TODO(unknown): cpplint spends a fair bit of time matching parentheses.
- Ideally we would want to index all opening and closing parentheses once
- and have CloseExpression be just a simple lookup, but due to preprocessor
- tricks, this is not so easy.
-
- Args:
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- pos: A position on the line.
-
- Returns:
- A tuple (line, linenum, pos) pointer *past* the closing brace, or
- (line, len(lines), -1) if we never find a close. Note we ignore
- strings and comments when matching; and the line we return is the
- 'cleansed' line at linenum.
- """
-
- line = clean_lines.elided[linenum]
- if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
- return (line, clean_lines.NumLines(), -1)
-
- # Check first line
- (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
- if end_pos > -1:
- return (line, linenum, end_pos)
-
- # Continue scanning forward
- while stack and linenum < clean_lines.NumLines() - 1:
- linenum += 1
- line = clean_lines.elided[linenum]
- (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
- if end_pos > -1:
- return (line, linenum, end_pos)
-
- # Did not find end of expression before end of file, give up
- return (line, clean_lines.NumLines(), -1)
-
-
-def FindStartOfExpressionInLine(line, endpos, stack):
- """Find position at the matching start of current expression.
-
- This is almost the reverse of FindEndOfExpressionInLine, but note
- that the input position and returned position differs by 1.
-
- Args:
- line: a CleansedLines line.
- endpos: start searching at this position.
- stack: nesting stack at endpos.
-
- Returns:
- On finding matching start: (index at matching start, None)
- On finding an unclosed expression: (-1, None)
- Otherwise: (-1, new stack at beginning of this line)
- """
- i = endpos
- while i >= 0:
- char = line[i]
- if char in ')]}':
- # Found end of expression, push to expression stack
- stack.append(char)
- elif char == '>':
- # Found potential end of template argument list.
- #
- # Ignore it if it's a "->" or ">=" or "operator>"
- if (i > 0 and
- (line[i - 1] == '-' or
- Match(r'\s>=\s', line[i - 1:]) or
- Search(r'\boperator\s*$', line[0:i]))):
- i -= 1
- else:
- stack.append('>')
- elif char == '<':
- # Found potential start of template argument list
- if i > 0 and line[i - 1] == '<':
- # Left shift operator
- i -= 1
- else:
- # If there is a matching '>', we can pop the expression stack.
- # Otherwise, ignore this '<' since it must be an operator.
- if stack and stack[-1] == '>':
- stack.pop()
- if not stack:
- return (i, None)
- elif char in '([{':
- # Found start of expression.
- #
- # If there are any unmatched '>' on the stack, they must be
- # operators. Remove those.
- while stack and stack[-1] == '>':
- stack.pop()
- if not stack:
- return (-1, None)
- if ((char == '(' and stack[-1] == ')') or
- (char == '[' and stack[-1] == ']') or
- (char == '{' and stack[-1] == '}')):
- stack.pop()
- if not stack:
- return (i, None)
- else:
- # Mismatched parentheses
- return (-1, None)
- elif char == ';':
- # Found something that look like end of statements. If we are currently
- # expecting a '<', the matching '>' must have been an operator, since
- # template argument list should not contain statements.
- while stack and stack[-1] == '>':
- stack.pop()
- if not stack:
- return (-1, None)
-
- i -= 1
-
- return (-1, stack)
-
-
-def ReverseCloseExpression(clean_lines, linenum, pos):
- """If input points to ) or } or ] or >, finds the position that opens it.
-
- If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
- linenum/pos that correspond to the opening of the expression.
-
- Args:
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- pos: A position on the line.
-
- Returns:
- A tuple (line, linenum, pos) pointer *at* the opening brace, or
- (line, 0, -1) if we never find the matching opening brace. Note
- we ignore strings and comments when matching; and the line we
- return is the 'cleansed' line at linenum.
- """
- line = clean_lines.elided[linenum]
- if line[pos] not in ')}]>':
- return (line, 0, -1)
-
- # Check last line
- (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
- if start_pos > -1:
- return (line, linenum, start_pos)
-
- # Continue scanning backward
- while stack and linenum > 0:
- linenum -= 1
- line = clean_lines.elided[linenum]
- (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
- if start_pos > -1:
- return (line, linenum, start_pos)
-
- # Did not find start of expression before beginning of file, give up
- return (line, 0, -1)
-
-
-def CheckForCopyright(filename, lines, error):
- """Logs an error if no Copyright message appears at the top of the file."""
-
- # We'll say it should occur by line 10. Don't forget there's a
- # dummy line at the front.
- for line in xrange(1, min(len(lines), 11)):
- if re.search(r'Copyright', lines[line], re.I): break
- else: # means no copyright line was found
- error(filename, 0, 'legal/copyright', 5,
- 'No copyright message found. '
- 'You should have a line: "Copyright [year] "')
-
-
-def GetIndentLevel(line):
- """Return the number of leading spaces in line.
-
- Args:
- line: A string to check.
-
- Returns:
- An integer count of leading spaces, possibly zero.
- """
- indent = Match(r'^( *)\S', line)
- if indent:
- return len(indent.group(1))
- else:
- return 0
-
-def PathSplitToList(path):
- """Returns the path split into a list by the separator.
-
- Args:
- path: An absolute or relative path (e.g. '/a/b/c/' or '../a')
-
- Returns:
- A list of path components (e.g. ['a', 'b', 'c]).
- """
- lst = []
- while True:
- (head, tail) = os.path.split(path)
- if head == path: # absolute paths end
- lst.append(head)
- break
- if tail == path: # relative paths end
- lst.append(tail)
- break
-
- path = head
- lst.append(tail)
-
- lst.reverse()
- return lst
-
-def GetHeaderGuardCPPVariable(filename):
- """Returns the CPP variable that should be used as a header guard.
-
- Args:
- filename: The name of a C++ header file.
-
- Returns:
- The CPP variable that should be used as a header guard in the
- named file.
-
- """
-
- # Restores original filename in case that cpplint is invoked from Emacs's
- # flymake.
- filename = re.sub(r'_flymake\.h$', '.h', filename)
- filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
- # Replace 'c++' with 'cpp'.
- filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
-
- fileinfo = FileInfo(filename)
- file_path_from_root = fileinfo.RepositoryName()
-
- def FixupPathFromRoot():
- if _root_debug:
- sys.stderr.write("\n_root fixup, _root = '%s', repository name = '%s'\n"
- % (_root, fileinfo.RepositoryName()))
-
- # Process the file path with the --root flag if it was set.
- if not _root:
- if _root_debug:
- sys.stderr.write("_root unspecified\n")
- return file_path_from_root
-
- def StripListPrefix(lst, prefix):
- # f(['x', 'y'], ['w, z']) -> None (not a valid prefix)
- if lst[:len(prefix)] != prefix:
- return None
- # f(['a, 'b', 'c', 'd'], ['a', 'b']) -> ['c', 'd']
- return lst[(len(prefix)):]
-
- # root behavior:
- # --root=subdir , lstrips subdir from the header guard
- maybe_path = StripListPrefix(PathSplitToList(file_path_from_root),
- PathSplitToList(_root))
-
- if _root_debug:
- sys.stderr.write(("_root lstrip (maybe_path=%s, file_path_from_root=%s," +
- " _root=%s)\n") % (maybe_path, file_path_from_root, _root))
-
- if maybe_path:
- return os.path.join(*maybe_path)
-
- # --root=.. , will prepend the outer directory to the header guard
- full_path = fileinfo.FullName()
- root_abspath = os.path.abspath(_root)
-
- maybe_path = StripListPrefix(PathSplitToList(full_path),
- PathSplitToList(root_abspath))
-
- if _root_debug:
- sys.stderr.write(("_root prepend (maybe_path=%s, full_path=%s, " +
- "root_abspath=%s)\n") % (maybe_path, full_path, root_abspath))
-
- if maybe_path:
- return os.path.join(*maybe_path)
-
- if _root_debug:
- sys.stderr.write("_root ignore, returning %s\n" % (file_path_from_root))
-
- # --root=FAKE_DIR is ignored
- return file_path_from_root
-
- file_path_from_root = FixupPathFromRoot()
- return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
-
-
-def CheckForHeaderGuard(filename, clean_lines, error):
- """Checks that the file contains a header guard.
-
- Logs an error if no #ifndef header guard is present. For other
- headers, checks that the full pathname is used.
-
- Args:
- filename: The name of the C++ header file.
- clean_lines: A CleansedLines instance containing the file.
- error: The function to call with any errors found.
- """
-
- # Don't check for header guards if there are error suppression
- # comments somewhere in this file.
- #
- # Because this is silencing a warning for a nonexistent line, we
- # only support the very specific NOLINT(build/header_guard) syntax,
- # and not the general NOLINT or NOLINT(*) syntax.
- raw_lines = clean_lines.lines_without_raw_strings
- for i in raw_lines:
- if Search(r'//\s*NOLINT\(build/header_guard\)', i):
- return
-
- # Allow pragma once instead of header guards
- for i in raw_lines:
- if Search(r'^\s*#pragma\s+once', i):
- return
-
- cppvar = GetHeaderGuardCPPVariable(filename)
-
- ifndef = ''
- ifndef_linenum = 0
- define = ''
- endif = ''
- endif_linenum = 0
- for linenum, line in enumerate(raw_lines):
- linesplit = line.split()
- if len(linesplit) >= 2:
- # find the first occurrence of #ifndef and #define, save arg
- if not ifndef and linesplit[0] == '#ifndef':
- # set ifndef to the header guard presented on the #ifndef line.
- ifndef = linesplit[1]
- ifndef_linenum = linenum
- if not define and linesplit[0] == '#define':
- define = linesplit[1]
- # find the last occurrence of #endif, save entire line
- if line.startswith('#endif'):
- endif = line
- endif_linenum = linenum
-
- if not ifndef or not define or ifndef != define:
- error(filename, 0, 'build/header_guard', 5,
- 'No #ifndef header guard found, suggested CPP variable is: %s' %
- cppvar)
- return
-
- # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
- # for backward compatibility.
- if ifndef != cppvar:
- error_level = 0
- if ifndef != cppvar + '_':
- error_level = 5
-
- ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
- error)
- error(filename, ifndef_linenum, 'build/header_guard', error_level,
- '#ifndef header guard has wrong style, please use: %s' % cppvar)
-
- # Check for "//" comments on endif line.
- ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
- error)
- match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
- if match:
- if match.group(1) == '_':
- # Issue low severity warning for deprecated double trailing underscore
- error(filename, endif_linenum, 'build/header_guard', 0,
- '#endif line should be "#endif // %s"' % cppvar)
- return
-
- # Didn't find the corresponding "//" comment. If this file does not
- # contain any "//" comments at all, it could be that the compiler
- # only wants "/**/" comments, look for those instead.
- no_single_line_comments = True
- for i in xrange(1, len(raw_lines) - 1):
- line = raw_lines[i]
- if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
- no_single_line_comments = False
- break
-
- if no_single_line_comments:
- match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
- if match:
- if match.group(1) == '_':
- # Low severity warning for double trailing underscore
- error(filename, endif_linenum, 'build/header_guard', 0,
- '#endif line should be "#endif /* %s */"' % cppvar)
- return
-
- # Didn't find anything
- error(filename, endif_linenum, 'build/header_guard', 5,
- '#endif line should be "#endif // %s"' % cppvar)
-
-
-def CheckHeaderFileIncluded(filename, include_state, error):
- """Logs an error if a source file does not include its header."""
-
- # Do not check test files
- fileinfo = FileInfo(filename)
- if Search(_TEST_FILE_SUFFIX, fileinfo.BaseName()):
- return
-
- for ext in GetHeaderExtensions():
- basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
- headerfile = basefilename + '.' + ext
- if not os.path.exists(headerfile):
- continue
- headername = FileInfo(headerfile).RepositoryName()
- first_include = None
- include_uses_unix_dir_aliases = False
- for section_list in include_state.include_list:
- for f in section_list:
- include_text = f[0]
- if "./" in include_text:
- include_uses_unix_dir_aliases = True
- if headername in include_text or include_text in headername:
- return
- if not first_include:
- first_include = f[1]
-
- message = '%s should include its header file %s' % (fileinfo.RepositoryName(), headername)
- if include_uses_unix_dir_aliases:
- message += ". Relative paths like . and .. are not allowed."
-
- error(filename, first_include, 'build/include', 5, message)
-
-
-def CheckForBadCharacters(filename, lines, error):
- """Logs an error for each line containing bad characters.
-
- Two kinds of bad characters:
-
- 1. Unicode replacement characters: These indicate that either the file
- contained invalid UTF-8 (likely) or Unicode replacement characters (which
- it shouldn't). Note that it's possible for this to throw off line
- numbering if the invalid UTF-8 occurred adjacent to a newline.
-
- 2. NUL bytes. These are problematic for some tools.
-
- Args:
- filename: The name of the current file.
- lines: An array of strings, each representing a line of the file.
- error: The function to call with any errors found.
- """
- for linenum, line in enumerate(lines):
- if unicode_escape_decode('\ufffd') in line:
- error(filename, linenum, 'readability/utf8', 5,
- 'Line contains invalid UTF-8 (or Unicode replacement character).')
- if '\0' in line:
- error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
-
-
-def CheckForNewlineAtEOF(filename, lines, error):
- """Logs an error if there is no newline char at the end of the file.
-
- Args:
- filename: The name of the current file.
- lines: An array of strings, each representing a line of the file.
- error: The function to call with any errors found.
- """
-
- # The array lines() was created by adding two newlines to the
- # original file (go figure), then splitting on \n.
- # To verify that the file ends in \n, we just have to make sure the
- # last-but-two element of lines() exists and is empty.
- if len(lines) < 3 or lines[-2]:
- error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
- 'Could not find a newline character at the end of the file.')
-
-
-def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
- """Logs an error if we see /* ... */ or "..." that extend past one line.
-
- /* ... */ comments are legit inside macros, for one line.
- Otherwise, we prefer // comments, so it's ok to warn about the
- other. Likewise, it's ok for strings to extend across multiple
- lines, as long as a line continuation character (backslash)
- terminates each line. Although not currently prohibited by the C++
- style guide, it's ugly and unnecessary. We don't do well with either
- in this lint program, so we warn about both.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
-
- # Remove all \\ (escaped backslashes) from the line. They are OK, and the
- # second (escaped) slash may trigger later \" detection erroneously.
- line = line.replace('\\\\', '')
-
- if line.count('/*') > line.count('*/'):
- error(filename, linenum, 'readability/multiline_comment', 5,
- 'Complex multi-line /*...*/-style comment found. '
- 'Lint may give bogus warnings. '
- 'Consider replacing these with //-style comments, '
- 'with #if 0...#endif, '
- 'or with more clearly structured multi-line comments.')
-
- if (line.count('"') - line.count('\\"')) % 2:
- error(filename, linenum, 'readability/multiline_string', 5,
- 'Multi-line string ("...") found. This lint script doesn\'t '
- 'do well with such strings, and may give bogus warnings. '
- 'Use C++11 raw strings or concatenation instead.')
-
-
-# (non-threadsafe name, thread-safe alternative, validation pattern)
-#
-# The validation pattern is used to eliminate false positives such as:
-# _rand(); // false positive due to substring match.
-# ->rand(); // some member function rand().
-# ACMRandom rand(seed); // some variable named rand.
-# ISAACRandom rand(); // another variable named rand.
-#
-# Basically we require the return value of these functions to be used
-# in some expression context on the same line by matching on some
-# operator before the function name. This eliminates constructors and
-# member function calls.
-_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
-_THREADING_LIST = (
- ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
- ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
- ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
- ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
- ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
- ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
- ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
- ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
- ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
- ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
- ('strtok(', 'strtok_r(',
- _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
- ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
- )
-
-
-def CheckPosixThreading(filename, clean_lines, linenum, error):
- """Checks for calls to thread-unsafe functions.
-
- Much code has been originally written without consideration of
- multi-threading. Also, engineers are relying on their old experience;
- they have learned posix before threading extensions were added. These
- tests guide the engineers to use thread-safe functions (when using
- posix directly).
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
- for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
- # Additional pattern matching check to confirm that this is the
- # function we are looking for
- if Search(pattern, line):
- error(filename, linenum, 'runtime/threadsafe_fn', 2,
- 'Consider using ' + multithread_safe_func +
- '...) instead of ' + single_thread_func +
- '...) for improved thread safety.')
-
-
-def CheckVlogArguments(filename, clean_lines, linenum, error):
- """Checks that VLOG() is only used for defining a logging level.
-
- For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
- VLOG(FATAL) are not.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
- if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
- error(filename, linenum, 'runtime/vlog', 5,
- 'VLOG() should be used with numeric verbosity level. '
- 'Use LOG() if you want symbolic severity levels.')
-
-# Matches invalid increment: *count++, which moves pointer instead of
-# incrementing a value.
-_RE_PATTERN_INVALID_INCREMENT = re.compile(
- r'^\s*\*\w+(\+\+|--);')
-
-
-def CheckInvalidIncrement(filename, clean_lines, linenum, error):
- """Checks for invalid increment *count++.
-
- For example following function:
- void increment_counter(int* count) {
- *count++;
- }
- is invalid, because it effectively does count++, moving pointer, and should
- be replaced with ++*count, (*count)++ or *count += 1.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
- if _RE_PATTERN_INVALID_INCREMENT.match(line):
- error(filename, linenum, 'runtime/invalid_increment', 5,
- 'Changing pointer instead of value (or unused value of operator*).')
-
-
-def IsMacroDefinition(clean_lines, linenum):
- if Search(r'^#define', clean_lines[linenum]):
- return True
-
- if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
- return True
-
- return False
-
-
-def IsForwardClassDeclaration(clean_lines, linenum):
- return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
-
-
-class _BlockInfo(object):
- """Stores information about a generic block of code."""
-
- def __init__(self, linenum, seen_open_brace):
- self.starting_linenum = linenum
- self.seen_open_brace = seen_open_brace
- self.open_parentheses = 0
- self.inline_asm = _NO_ASM
- self.check_namespace_indentation = False
-
- def CheckBegin(self, filename, clean_lines, linenum, error):
- """Run checks that applies to text up to the opening brace.
-
- This is mostly for checking the text after the class identifier
- and the "{", usually where the base class is specified. For other
- blocks, there isn't much to check, so we always pass.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- pass
-
- def CheckEnd(self, filename, clean_lines, linenum, error):
- """Run checks that applies to text after the closing brace.
-
- This is mostly used for checking end of namespace comments.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- pass
-
- def IsBlockInfo(self):
- """Returns true if this block is a _BlockInfo.
-
- This is convenient for verifying that an object is an instance of
- a _BlockInfo, but not an instance of any of the derived classes.
-
- Returns:
- True for this class, False for derived classes.
- """
- return self.__class__ == _BlockInfo
-
-
-class _ExternCInfo(_BlockInfo):
- """Stores information about an 'extern "C"' block."""
-
- def __init__(self, linenum):
- _BlockInfo.__init__(self, linenum, True)
-
-
-class _ClassInfo(_BlockInfo):
- """Stores information about a class."""
-
- def __init__(self, name, class_or_struct, clean_lines, linenum):
- _BlockInfo.__init__(self, linenum, False)
- self.name = name
- self.is_derived = False
- self.check_namespace_indentation = True
- if class_or_struct == 'struct':
- self.access = 'public'
- self.is_struct = True
- else:
- self.access = 'private'
- self.is_struct = False
-
- # Remember initial indentation level for this class. Using raw_lines here
- # instead of elided to account for leading comments.
- self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
-
- # Try to find the end of the class. This will be confused by things like:
- # class A {
- # } *x = { ...
- #
- # But it's still good enough for CheckSectionSpacing.
- self.last_line = 0
- depth = 0
- for i in range(linenum, clean_lines.NumLines()):
- line = clean_lines.elided[i]
- depth += line.count('{') - line.count('}')
- if not depth:
- self.last_line = i
- break
-
- def CheckBegin(self, filename, clean_lines, linenum, error):
- # Look for a bare ':'
- if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
- self.is_derived = True
-
- def CheckEnd(self, filename, clean_lines, linenum, error):
- # If there is a DISALLOW macro, it should appear near the end of
- # the class.
- seen_last_thing_in_class = False
- for i in xrange(linenum - 1, self.starting_linenum, -1):
- match = Search(
- r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
- self.name + r'\)',
- clean_lines.elided[i])
- if match:
- if seen_last_thing_in_class:
- error(filename, i, 'readability/constructors', 3,
- match.group(1) + ' should be the last thing in the class')
- break
-
- if not Match(r'^\s*$', clean_lines.elided[i]):
- seen_last_thing_in_class = True
-
- # Check that closing brace is aligned with beginning of the class.
- # Only do this if the closing brace is indented by only whitespaces.
- # This means we will not check single-line class definitions.
- indent = Match(r'^( *)\}', clean_lines.elided[linenum])
- if indent and len(indent.group(1)) != self.class_indent:
- if self.is_struct:
- parent = 'struct ' + self.name
- else:
- parent = 'class ' + self.name
- error(filename, linenum, 'whitespace/indent', 3,
- 'Closing brace should be aligned with beginning of %s' % parent)
-
-
-class _NamespaceInfo(_BlockInfo):
- """Stores information about a namespace."""
-
- def __init__(self, name, linenum):
- _BlockInfo.__init__(self, linenum, False)
- self.name = name or ''
- self.check_namespace_indentation = True
-
- def CheckEnd(self, filename, clean_lines, linenum, error):
- """Check end of namespace comments."""
- line = clean_lines.raw_lines[linenum]
-
- # Check how many lines is enclosed in this namespace. Don't issue
- # warning for missing namespace comments if there aren't enough
- # lines. However, do apply checks if there is already an end of
- # namespace comment and it's incorrect.
- #
- # TODO(unknown): We always want to check end of namespace comments
- # if a namespace is large, but sometimes we also want to apply the
- # check if a short namespace contained nontrivial things (something
- # other than forward declarations). There is currently no logic on
- # deciding what these nontrivial things are, so this check is
- # triggered by namespace size only, which works most of the time.
- if (linenum - self.starting_linenum < 10
- and not Match(r'^\s*};*\s*(//|/\*).*\bnamespace\b', line)):
- return
-
- # Look for matching comment at end of namespace.
- #
- # Note that we accept C style "/* */" comments for terminating
- # namespaces, so that code that terminate namespaces inside
- # preprocessor macros can be cpplint clean.
- #
- # We also accept stuff like "// end of namespace ." with the
- # period at the end.
- #
- # Besides these, we don't accept anything else, otherwise we might
- # get false negatives when existing comment is a substring of the
- # expected namespace.
- if self.name:
- # Named namespace
- if not Match((r'^\s*};*\s*(//|/\*).*\bnamespace\s+' +
- re.escape(self.name) + r'[\*/\.\\\s]*$'),
- line):
- error(filename, linenum, 'readability/namespace', 5,
- 'Namespace should be terminated with "// namespace %s"' %
- self.name)
- else:
- # Anonymous namespace
- if not Match(r'^\s*};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
- # If "// namespace anonymous" or "// anonymous namespace (more text)",
- # mention "// anonymous namespace" as an acceptable form
- if Match(r'^\s*}.*\b(namespace anonymous|anonymous namespace)\b', line):
- error(filename, linenum, 'readability/namespace', 5,
- 'Anonymous namespace should be terminated with "// namespace"'
- ' or "// anonymous namespace"')
- else:
- error(filename, linenum, 'readability/namespace', 5,
- 'Anonymous namespace should be terminated with "// namespace"')
-
-
-class _PreprocessorInfo(object):
- """Stores checkpoints of nesting stacks when #if/#else is seen."""
-
- def __init__(self, stack_before_if):
- # The entire nesting stack before #if
- self.stack_before_if = stack_before_if
-
- # The entire nesting stack up to #else
- self.stack_before_else = []
-
- # Whether we have already seen #else or #elif
- self.seen_else = False
-
-
-class NestingState(object):
- """Holds states related to parsing braces."""
-
- def __init__(self):
- # Stack for tracking all braces. An object is pushed whenever we
- # see a "{", and popped when we see a "}". Only 3 types of
- # objects are possible:
- # - _ClassInfo: a class or struct.
- # - _NamespaceInfo: a namespace.
- # - _BlockInfo: some other type of block.
- self.stack = []
-
- # Top of the previous stack before each Update().
- #
- # Because the nesting_stack is updated at the end of each line, we
- # had to do some convoluted checks to find out what is the current
- # scope at the beginning of the line. This check is simplified by
- # saving the previous top of nesting stack.
- #
- # We could save the full stack, but we only need the top. Copying
- # the full nesting stack would slow down cpplint by ~10%.
- self.previous_stack_top = []
-
- # Stack of _PreprocessorInfo objects.
- self.pp_stack = []
-
- def SeenOpenBrace(self):
- """Check if we have seen the opening brace for the innermost block.
-
- Returns:
- True if we have seen the opening brace, False if the innermost
- block is still expecting an opening brace.
- """
- return (not self.stack) or self.stack[-1].seen_open_brace
-
- def InNamespaceBody(self):
- """Check if we are currently one level inside a namespace body.
-
- Returns:
- True if top of the stack is a namespace block, False otherwise.
- """
- return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
-
- def InExternC(self):
- """Check if we are currently one level inside an 'extern "C"' block.
-
- Returns:
- True if top of the stack is an extern block, False otherwise.
- """
- return self.stack and isinstance(self.stack[-1], _ExternCInfo)
-
- def InClassDeclaration(self):
- """Check if we are currently one level inside a class or struct declaration.
-
- Returns:
- True if top of the stack is a class/struct, False otherwise.
- """
- return self.stack and isinstance(self.stack[-1], _ClassInfo)
-
- def InAsmBlock(self):
- """Check if we are currently one level inside an inline ASM block.
-
- Returns:
- True if the top of the stack is a block containing inline ASM.
- """
- return self.stack and self.stack[-1].inline_asm != _NO_ASM
-
- def InTemplateArgumentList(self, clean_lines, linenum, pos):
- """Check if current position is inside template argument list.
-
- Args:
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- pos: position just after the suspected template argument.
- Returns:
- True if (linenum, pos) is inside template arguments.
- """
- while linenum < clean_lines.NumLines():
- # Find the earliest character that might indicate a template argument
- line = clean_lines.elided[linenum]
- match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
- if not match:
- linenum += 1
- pos = 0
- continue
- token = match.group(1)
- pos += len(match.group(0))
-
- # These things do not look like template argument list:
- # class Suspect {
- # class Suspect x; }
- if token in ('{', '}', ';'): return False
-
- # These things look like template argument list:
- # template
- # template
- # template
- # template
- if token in ('>', '=', '[', ']', '.'): return True
-
- # Check if token is an unmatched '<'.
- # If not, move on to the next character.
- if token != '<':
- pos += 1
- if pos >= len(line):
- linenum += 1
- pos = 0
- continue
-
- # We can't be sure if we just find a single '<', and need to
- # find the matching '>'.
- (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
- if end_pos < 0:
- # Not sure if template argument list or syntax error in file
- return False
- linenum = end_line
- pos = end_pos
- return False
-
- def UpdatePreprocessor(self, line):
- """Update preprocessor stack.
-
- We need to handle preprocessors due to classes like this:
- #ifdef SWIG
- struct ResultDetailsPageElementExtensionPoint {
- #else
- struct ResultDetailsPageElementExtensionPoint : public Extension {
- #endif
-
- We make the following assumptions (good enough for most files):
- - Preprocessor condition evaluates to true from #if up to first
- #else/#elif/#endif.
-
- - Preprocessor condition evaluates to false from #else/#elif up
- to #endif. We still perform lint checks on these lines, but
- these do not affect nesting stack.
-
- Args:
- line: current line to check.
- """
- if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
- # Beginning of #if block, save the nesting stack here. The saved
- # stack will allow us to restore the parsing state in the #else case.
- self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
- elif Match(r'^\s*#\s*(else|elif)\b', line):
- # Beginning of #else block
- if self.pp_stack:
- if not self.pp_stack[-1].seen_else:
- # This is the first #else or #elif block. Remember the
- # whole nesting stack up to this point. This is what we
- # keep after the #endif.
- self.pp_stack[-1].seen_else = True
- self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
-
- # Restore the stack to how it was before the #if
- self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
- else:
- # TODO(unknown): unexpected #else, issue warning?
- pass
- elif Match(r'^\s*#\s*endif\b', line):
- # End of #if or #else blocks.
- if self.pp_stack:
- # If we saw an #else, we will need to restore the nesting
- # stack to its former state before the #else, otherwise we
- # will just continue from where we left off.
- if self.pp_stack[-1].seen_else:
- # Here we can just use a shallow copy since we are the last
- # reference to it.
- self.stack = self.pp_stack[-1].stack_before_else
- # Drop the corresponding #if
- self.pp_stack.pop()
- else:
- # TODO(unknown): unexpected #endif, issue warning?
- pass
-
- # TODO(unknown): Update() is too long, but we will refactor later.
- def Update(self, filename, clean_lines, linenum, error):
- """Update nesting state with current line.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
-
- # Remember top of the previous nesting stack.
- #
- # The stack is always pushed/popped and not modified in place, so
- # we can just do a shallow copy instead of copy.deepcopy. Using
- # deepcopy would slow down cpplint by ~28%.
- if self.stack:
- self.previous_stack_top = self.stack[-1]
- else:
- self.previous_stack_top = None
-
- # Update pp_stack
- self.UpdatePreprocessor(line)
-
- # Count parentheses. This is to avoid adding struct arguments to
- # the nesting stack.
- if self.stack:
- inner_block = self.stack[-1]
- depth_change = line.count('(') - line.count(')')
- inner_block.open_parentheses += depth_change
-
- # Also check if we are starting or ending an inline assembly block.
- if inner_block.inline_asm in (_NO_ASM, _END_ASM):
- if (depth_change != 0 and
- inner_block.open_parentheses == 1 and
- _MATCH_ASM.match(line)):
- # Enter assembly block
- inner_block.inline_asm = _INSIDE_ASM
- else:
- # Not entering assembly block. If previous line was _END_ASM,
- # we will now shift to _NO_ASM state.
- inner_block.inline_asm = _NO_ASM
- elif (inner_block.inline_asm == _INSIDE_ASM and
- inner_block.open_parentheses == 0):
- # Exit assembly block
- inner_block.inline_asm = _END_ASM
-
- # Consume namespace declaration at the beginning of the line. Do
- # this in a loop so that we catch same line declarations like this:
- # namespace proto2 { namespace bridge { class MessageSet; } }
- while True:
- # Match start of namespace. The "\b\s*" below catches namespace
- # declarations even if it weren't followed by a whitespace, this
- # is so that we don't confuse our namespace checker. The
- # missing spaces will be flagged by CheckSpacing.
- namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
- if not namespace_decl_match:
- break
-
- new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
- self.stack.append(new_namespace)
-
- line = namespace_decl_match.group(2)
- if line.find('{') != -1:
- new_namespace.seen_open_brace = True
- line = line[line.find('{') + 1:]
-
- # Look for a class declaration in whatever is left of the line
- # after parsing namespaces. The regexp accounts for decorated classes
- # such as in:
- # class LOCKABLE API Object {
- # };
- class_decl_match = Match(
- r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
- r'(class|struct)\s+(?:[a-zA-Z0-9_]+\s+)*(\w+(?:::\w+)*))'
- r'(.*)$', line)
- if (class_decl_match and
- (not self.stack or self.stack[-1].open_parentheses == 0)):
- # We do not want to accept classes that are actually template arguments:
- # template ,
- # template class Ignore3>
- # void Function() {};
- #
- # To avoid template argument cases, we scan forward and look for
- # an unmatched '>'. If we see one, assume we are inside a
- # template argument list.
- end_declaration = len(class_decl_match.group(1))
- if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
- self.stack.append(_ClassInfo(
- class_decl_match.group(3), class_decl_match.group(2),
- clean_lines, linenum))
- line = class_decl_match.group(4)
-
- # If we have not yet seen the opening brace for the innermost block,
- # run checks here.
- if not self.SeenOpenBrace():
- self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
-
- # Update access control if we are inside a class/struct
- if self.stack and isinstance(self.stack[-1], _ClassInfo):
- classinfo = self.stack[-1]
- access_match = Match(
- r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
- r':(?:[^:]|$)',
- line)
- if access_match:
- classinfo.access = access_match.group(2)
-
- # Check that access keywords are indented +1 space. Skip this
- # check if the keywords are not preceded by whitespaces.
- indent = access_match.group(1)
- if (len(indent) != classinfo.class_indent + 1 and
- Match(r'^\s*$', indent)):
- if classinfo.is_struct:
- parent = 'struct ' + classinfo.name
- else:
- parent = 'class ' + classinfo.name
- slots = ''
- if access_match.group(3):
- slots = access_match.group(3)
- error(filename, linenum, 'whitespace/indent', 3,
- '%s%s: should be indented +1 space inside %s' % (
- access_match.group(2), slots, parent))
-
- # Consume braces or semicolons from what's left of the line
- while True:
- # Match first brace, semicolon, or closed parenthesis.
- matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
- if not matched:
- break
-
- token = matched.group(1)
- if token == '{':
- # If namespace or class hasn't seen a opening brace yet, mark
- # namespace/class head as complete. Push a new block onto the
- # stack otherwise.
- if not self.SeenOpenBrace():
- self.stack[-1].seen_open_brace = True
- elif Match(r'^extern\s*"[^"]*"\s*\{', line):
- self.stack.append(_ExternCInfo(linenum))
- else:
- self.stack.append(_BlockInfo(linenum, True))
- if _MATCH_ASM.match(line):
- self.stack[-1].inline_asm = _BLOCK_ASM
-
- elif token == ';' or token == ')':
- # If we haven't seen an opening brace yet, but we already saw
- # a semicolon, this is probably a forward declaration. Pop
- # the stack for these.
- #
- # Similarly, if we haven't seen an opening brace yet, but we
- # already saw a closing parenthesis, then these are probably
- # function arguments with extra "class" or "struct" keywords.
- # Also pop these stack for these.
- if not self.SeenOpenBrace():
- self.stack.pop()
- else: # token == '}'
- # Perform end of block checks and pop the stack.
- if self.stack:
- self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
- self.stack.pop()
- line = matched.group(2)
-
- def InnermostClass(self):
- """Get class info on the top of the stack.
-
- Returns:
- A _ClassInfo object if we are inside a class, or None otherwise.
- """
- for i in range(len(self.stack), 0, -1):
- classinfo = self.stack[i - 1]
- if isinstance(classinfo, _ClassInfo):
- return classinfo
- return None
-
- def CheckCompletedBlocks(self, filename, error):
- """Checks that all classes and namespaces have been completely parsed.
-
- Call this when all lines in a file have been processed.
- Args:
- filename: The name of the current file.
- error: The function to call with any errors found.
- """
- # Note: This test can result in false positives if #ifdef constructs
- # get in the way of brace matching. See the testBuildClass test in
- # cpplint_unittest.py for an example of this.
- for obj in self.stack:
- if isinstance(obj, _ClassInfo):
- error(filename, obj.starting_linenum, 'build/class', 5,
- 'Failed to find complete declaration of class %s' %
- obj.name)
- elif isinstance(obj, _NamespaceInfo):
- error(filename, obj.starting_linenum, 'build/namespaces', 5,
- 'Failed to find complete declaration of namespace %s' %
- obj.name)
-
-
-def CheckForNonStandardConstructs(filename, clean_lines, linenum,
- nesting_state, error):
- r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
-
- Complain about several constructs which gcc-2 accepts, but which are
- not standard C++. Warning about these in lint is one way to ease the
- transition to new compilers.
- - put storage class first (e.g. "static const" instead of "const static").
- - "%lld" instead of %qd" in printf-type functions.
- - "%1$d" is non-standard in printf-type functions.
- - "\%" is an undefined character escape sequence.
- - text after #endif is not allowed.
- - invalid inner-style forward declaration.
- - >? and operators, and their >?= and = cousins.
-
- Additionally, check for constructor/destructor style violations and reference
- members, as it is very convenient to do so while checking for
- gcc-2 compliance.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- nesting_state: A NestingState instance which maintains information about
- the current stack of nested blocks being parsed.
- error: A callable to which errors are reported, which takes 4 arguments:
- filename, line number, error level, and message
- """
-
- # Remove comments from the line, but leave in strings for now.
- line = clean_lines.lines[linenum]
-
- if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
- error(filename, linenum, 'runtime/printf_format', 3,
- '%q in format strings is deprecated. Use %ll instead.')
-
- if Search(r'printf\s*\(.*".*%\d+\$', line):
- error(filename, linenum, 'runtime/printf_format', 2,
- '%N$ formats are unconventional. Try rewriting to avoid them.')
-
- # Remove escaped backslashes before looking for undefined escapes.
- line = line.replace('\\\\', '')
-
- if Search(r'("|\').*\\(%|\[|\(|{)', line):
- error(filename, linenum, 'build/printf_format', 3,
- '%, [, (, and { are undefined character escapes. Unescape them.')
-
- # For the rest, work with both comments and strings removed.
- line = clean_lines.elided[linenum]
-
- if Search(r'\b(const|volatile|void|char|short|int|long'
- r'|float|double|signed|unsigned'
- r'|schar|u?int8|u?int16|u?int32|u?int64)'
- r'\s+(register|static|extern|typedef)\b',
- line):
- error(filename, linenum, 'build/storage_class', 5,
- 'Storage-class specifier (static, extern, typedef, etc) should be '
- 'at the beginning of the declaration.')
-
- if Match(r'\s*#\s*endif\s*[^/\s]+', line):
- error(filename, linenum, 'build/endif_comment', 5,
- 'Uncommented text after #endif is non-standard. Use a comment.')
-
- if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
- error(filename, linenum, 'build/forward_decl', 5,
- 'Inner-style forward declarations are invalid. Remove this line.')
-
- if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
- line):
- error(filename, linenum, 'build/deprecated', 3,
- '>? and (max and min) operators are non-standard and deprecated.')
-
- if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
- # TODO(unknown): Could it be expanded safely to arbitrary references,
- # without triggering too many false positives? The first
- # attempt triggered 5 warnings for mostly benign code in the regtest, hence
- # the restriction.
- # Here's the original regexp, for the reference:
- # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
- # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
- error(filename, linenum, 'runtime/member_string_references', 2,
- 'const string& members are dangerous. It is much better to use '
- 'alternatives, such as pointers or simple constants.')
-
- # Everything else in this function operates on class declarations.
- # Return early if the top of the nesting stack is not a class, or if
- # the class head is not completed yet.
- classinfo = nesting_state.InnermostClass()
- if not classinfo or not classinfo.seen_open_brace:
- return
-
- # The class may have been declared with namespace or classname qualifiers.
- # The constructor and destructor will not have those qualifiers.
- base_classname = classinfo.name.split('::')[-1]
-
- # Look for single-argument constructors that aren't marked explicit.
- # Technically a valid construct, but against style.
- explicit_constructor_match = Match(
- r'\s+(?:(?:inline|constexpr)\s+)*(explicit\s+)?'
- r'(?:(?:inline|constexpr)\s+)*%s\s*'
- r'\(((?:[^()]|\([^()]*\))*)\)'
- % re.escape(base_classname),
- line)
-
- if explicit_constructor_match:
- is_marked_explicit = explicit_constructor_match.group(1)
-
- if not explicit_constructor_match.group(2):
- constructor_args = []
- else:
- constructor_args = explicit_constructor_match.group(2).split(',')
-
- # collapse arguments so that commas in template parameter lists and function
- # argument parameter lists don't split arguments in two
- i = 0
- while i < len(constructor_args):
- constructor_arg = constructor_args[i]
- while (constructor_arg.count('<') > constructor_arg.count('>') or
- constructor_arg.count('(') > constructor_arg.count(')')):
- constructor_arg += ',' + constructor_args[i + 1]
- del constructor_args[i + 1]
- constructor_args[i] = constructor_arg
- i += 1
-
- variadic_args = [arg for arg in constructor_args if '&&...' in arg]
- defaulted_args = [arg for arg in constructor_args if '=' in arg]
- noarg_constructor = (not constructor_args or # empty arg list
- # 'void' arg specifier
- (len(constructor_args) == 1 and
- constructor_args[0].strip() == 'void'))
- onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
- not noarg_constructor) or
- # all but at most one arg defaulted
- (len(constructor_args) >= 1 and
- not noarg_constructor and
- len(defaulted_args) >= len(constructor_args) - 1) or
- # variadic arguments with zero or one argument
- (len(constructor_args) <= 2 and
- len(variadic_args) >= 1))
- initializer_list_constructor = bool(
- onearg_constructor and
- Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
- copy_constructor = bool(
- onearg_constructor and
- Match(r'((const\s+(volatile\s+)?)?|(volatile\s+(const\s+)?))?'
- r'%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
- % re.escape(base_classname), constructor_args[0].strip()))
-
- if (not is_marked_explicit and
- onearg_constructor and
- not initializer_list_constructor and
- not copy_constructor):
- if defaulted_args or variadic_args:
- error(filename, linenum, 'runtime/explicit', 5,
- 'Constructors callable with one argument '
- 'should be marked explicit.')
- else:
- error(filename, linenum, 'runtime/explicit', 5,
- 'Single-parameter constructors should be marked explicit.')
- elif is_marked_explicit and not onearg_constructor:
- if noarg_constructor:
- error(filename, linenum, 'runtime/explicit', 5,
- 'Zero-parameter constructors should not be marked explicit.')
-
-
-def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
- """Checks for the correctness of various spacing around function calls.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
-
- # Since function calls often occur inside if/for/while/switch
- # expressions - which have their own, more liberal conventions - we
- # first see if we should be looking inside such an expression for a
- # function call, to which we can apply more strict standards.
- fncall = line # if there's no control flow construct, look at whole line
- for pattern in (r'\bif\s*\((.*)\)\s*{',
- r'\bfor\s*\((.*)\)\s*{',
- r'\bwhile\s*\((.*)\)\s*[{;]',
- r'\bswitch\s*\((.*)\)\s*{'):
- match = Search(pattern, line)
- if match:
- fncall = match.group(1) # look inside the parens for function calls
- break
-
- # Except in if/for/while/switch, there should never be space
- # immediately inside parens (eg "f( 3, 4 )"). We make an exception
- # for nested parens ( (a+b) + c ). Likewise, there should never be
- # a space before a ( when it's a function argument. I assume it's a
- # function argument when the char before the whitespace is legal in
- # a function name (alnum + _) and we're not starting a macro. Also ignore
- # pointers and references to arrays and functions coz they're too tricky:
- # we use a very simple way to recognize these:
- # " (something)(maybe-something)" or
- # " (something)(maybe-something," or
- # " (something)[something]"
- # Note that we assume the contents of [] to be short enough that
- # they'll never need to wrap.
- if ( # Ignore control structures.
- not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
- fncall) and
- # Ignore pointers/references to functions.
- not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
- # Ignore pointers/references to arrays.
- not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
- if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
- error(filename, linenum, 'whitespace/parens', 4,
- 'Extra space after ( in function call')
- elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
- error(filename, linenum, 'whitespace/parens', 2,
- 'Extra space after (')
- if (Search(r'\w\s+\(', fncall) and
- not Search(r'_{0,2}asm_{0,2}\s+_{0,2}volatile_{0,2}\s+\(', fncall) and
- not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
- not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
- not Search(r'\bcase\s+\(', fncall)):
- # TODO(unknown): Space after an operator function seem to be a common
- # error, silence those for now by restricting them to highest verbosity.
- if Search(r'\boperator_*\b', line):
- error(filename, linenum, 'whitespace/parens', 0,
- 'Extra space before ( in function call')
- else:
- error(filename, linenum, 'whitespace/parens', 4,
- 'Extra space before ( in function call')
- # If the ) is followed only by a newline or a { + newline, assume it's
- # part of a control statement (if/while/etc), and don't complain
- if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
- # If the closing parenthesis is preceded by only whitespaces,
- # try to give a more descriptive error message.
- if Search(r'^\s+\)', fncall):
- error(filename, linenum, 'whitespace/parens', 2,
- 'Closing ) should be moved to the previous line')
- else:
- error(filename, linenum, 'whitespace/parens', 2,
- 'Extra space before )')
-
-
-def IsBlankLine(line):
- """Returns true if the given line is blank.
-
- We consider a line to be blank if the line is empty or consists of
- only white spaces.
-
- Args:
- line: A line of a string.
-
- Returns:
- True, if the given line is blank.
- """
- return not line or line.isspace()
-
-
-def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
- error):
- is_namespace_indent_item = (
- len(nesting_state.stack) > 1 and
- nesting_state.stack[-1].check_namespace_indentation and
- isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
- nesting_state.previous_stack_top == nesting_state.stack[-2])
-
- if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
- clean_lines.elided, line):
- CheckItemIndentationInNamespace(filename, clean_lines.elided,
- line, error)
-
-
-def CheckForFunctionLengths(filename, clean_lines, linenum,
- function_state, error):
- """Reports for long function bodies.
-
- For an overview why this is done, see:
- https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
-
- Uses a simplistic algorithm assuming other style guidelines
- (especially spacing) are followed.
- Only checks unindented functions, so class members are unchecked.
- Trivial bodies are unchecked, so constructors with huge initializer lists
- may be missed.
- Blank/comment lines are not counted so as to avoid encouraging the removal
- of vertical space and comments just to get through a lint check.
- NOLINT *on the last line of a function* disables this check.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- function_state: Current function name and lines in body so far.
- error: The function to call with any errors found.
- """
- lines = clean_lines.lines
- line = lines[linenum]
- joined_line = ''
-
- starting_func = False
- regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
- match_result = Match(regexp, line)
- if match_result:
- # If the name is all caps and underscores, figure it's a macro and
- # ignore it, unless it's TEST or TEST_F.
- function_name = match_result.group(1).split()[-1]
- if function_name == 'TEST' or function_name == 'TEST_F' or (
- not Match(r'[A-Z_]+$', function_name)):
- starting_func = True
-
- if starting_func:
- body_found = False
- for start_linenum in xrange(linenum, clean_lines.NumLines()):
- start_line = lines[start_linenum]
- joined_line += ' ' + start_line.lstrip()
- if Search(r'(;|})', start_line): # Declarations and trivial functions
- body_found = True
- break # ... ignore
- if Search(r'{', start_line):
- body_found = True
- function = Search(r'((\w|:)*)\(', line).group(1)
- if Match(r'TEST', function): # Handle TEST... macros
- parameter_regexp = Search(r'(\(.*\))', joined_line)
- if parameter_regexp: # Ignore bad syntax
- function += parameter_regexp.group(1)
- else:
- function += '()'
- function_state.Begin(function)
- break
- if not body_found:
- # No body for the function (or evidence of a non-function) was found.
- error(filename, linenum, 'readability/fn_size', 5,
- 'Lint failed to find start of function body.')
- elif Match(r'^\}\s*$', line): # function end
- function_state.Check(error, filename, linenum)
- function_state.End()
- elif not Match(r'^\s*$', line):
- function_state.Count() # Count non-blank/non-comment lines.
-
-
-_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
-
-
-def CheckComment(line, filename, linenum, next_line_start, error):
- """Checks for common mistakes in comments.
-
- Args:
- line: The line in question.
- filename: The name of the current file.
- linenum: The number of the line to check.
- next_line_start: The first non-whitespace column of the next line.
- error: The function to call with any errors found.
- """
- commentpos = line.find('//')
- if commentpos != -1:
- # Check if the // may be in quotes. If so, ignore it
- if re.sub(r'\\.', '', line[0:commentpos]).count('"') % 2 == 0:
- # Allow one space for new scopes, two spaces otherwise:
- if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
- ((commentpos >= 1 and
- line[commentpos-1] not in string.whitespace) or
- (commentpos >= 2 and
- line[commentpos-2] not in string.whitespace))):
- error(filename, linenum, 'whitespace/comments', 2,
- 'At least two spaces is best between code and comments')
-
- # Checks for common mistakes in TODO comments.
- comment = line[commentpos:]
- match = _RE_PATTERN_TODO.match(comment)
- if match:
- # One whitespace is correct; zero whitespace is handled elsewhere.
- leading_whitespace = match.group(1)
- if len(leading_whitespace) > 1:
- error(filename, linenum, 'whitespace/todo', 2,
- 'Too many spaces before TODO')
-
- username = match.group(2)
- if not username:
- error(filename, linenum, 'readability/todo', 2,
- 'Missing username in TODO; it should look like '
- '"// TODO(my_username): Stuff."')
-
- middle_whitespace = match.group(3)
- # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
- if middle_whitespace != ' ' and middle_whitespace != '':
- error(filename, linenum, 'whitespace/todo', 2,
- 'TODO(my_username) should be followed by a space')
-
- # If the comment contains an alphanumeric character, there
- # should be a space somewhere between it and the // unless
- # it's a /// or //! Doxygen comment.
- if (Match(r'//[^ ]*\w', comment) and
- not Match(r'(///|//\!)(\s+|$)', comment)):
- error(filename, linenum, 'whitespace/comments', 4,
- 'Should have a space between // and comment')
-
-
-def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
- """Checks for the correctness of various spacing issues in the code.
-
- Things we check for: spaces around operators, spaces after
- if/for/while/switch, no spaces around parens in function calls, two
- spaces between code and comment, don't start a block with a blank
- line, don't end a function with a blank line, don't add a blank line
- after public/protected/private, don't have too many blank lines in a row.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- nesting_state: A NestingState instance which maintains information about
- the current stack of nested blocks being parsed.
- error: The function to call with any errors found.
- """
-
- # Don't use "elided" lines here, otherwise we can't check commented lines.
- # Don't want to use "raw" either, because we don't want to check inside C++11
- # raw strings,
- raw = clean_lines.lines_without_raw_strings
- line = raw[linenum]
-
- # Before nixing comments, check if the line is blank for no good
- # reason. This includes the first line after a block is opened, and
- # blank lines at the end of a function (ie, right before a line like '}'
- #
- # Skip all the blank line checks if we are immediately inside a
- # namespace body. In other words, don't issue blank line warnings
- # for this block:
- # namespace {
- #
- # }
- #
- # A warning about missing end of namespace comments will be issued instead.
- #
- # Also skip blank line checks for 'extern "C"' blocks, which are formatted
- # like namespaces.
- if (IsBlankLine(line) and
- not nesting_state.InNamespaceBody() and
- not nesting_state.InExternC()):
- elided = clean_lines.elided
- prev_line = elided[linenum - 1]
- prevbrace = prev_line.rfind('{')
- # TODO(unknown): Don't complain if line before blank line, and line after,
- # both start with alnums and are indented the same amount.
- # This ignores whitespace at the start of a namespace block
- # because those are not usually indented.
- if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
- # OK, we have a blank line at the start of a code block. Before we
- # complain, we check if it is an exception to the rule: The previous
- # non-empty line has the parameters of a function header that are indented
- # 4 spaces (because they did not fit in a 80 column line when placed on
- # the same line as the function name). We also check for the case where
- # the previous line is indented 6 spaces, which may happen when the
- # initializers of a constructor do not fit into a 80 column line.
- exception = False
- if Match(r' {6}\w', prev_line): # Initializer list?
- # We are looking for the opening column of initializer list, which
- # should be indented 4 spaces to cause 6 space indentation afterwards.
- search_position = linenum-2
- while (search_position >= 0
- and Match(r' {6}\w', elided[search_position])):
- search_position -= 1
- exception = (search_position >= 0
- and elided[search_position][:5] == ' :')
- else:
- # Search for the function arguments or an initializer list. We use a
- # simple heuristic here: If the line is indented 4 spaces; and we have a
- # closing paren, without the opening paren, followed by an opening brace
- # or colon (for initializer lists) we assume that it is the last line of
- # a function header. If we have a colon indented 4 spaces, it is an
- # initializer list.
- exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
- prev_line)
- or Match(r' {4}:', prev_line))
-
- if not exception:
- error(filename, linenum, 'whitespace/blank_line', 2,
- 'Redundant blank line at the start of a code block '
- 'should be deleted.')
- # Ignore blank lines at the end of a block in a long if-else
- # chain, like this:
- # if (condition1) {
- # // Something followed by a blank line
- #
- # } else if (condition2) {
- # // Something else
- # }
- if linenum + 1 < clean_lines.NumLines():
- next_line = raw[linenum + 1]
- if (next_line
- and Match(r'\s*}', next_line)
- and next_line.find('} else ') == -1):
- error(filename, linenum, 'whitespace/blank_line', 3,
- 'Redundant blank line at the end of a code block '
- 'should be deleted.')
-
- matched = Match(r'\s*(public|protected|private):', prev_line)
- if matched:
- error(filename, linenum, 'whitespace/blank_line', 3,
- 'Do not leave a blank line after "%s:"' % matched.group(1))
-
- # Next, check comments
- next_line_start = 0
- if linenum + 1 < clean_lines.NumLines():
- next_line = raw[linenum + 1]
- next_line_start = len(next_line) - len(next_line.lstrip())
- CheckComment(line, filename, linenum, next_line_start, error)
-
- # get rid of comments and strings
- line = clean_lines.elided[linenum]
-
- # You shouldn't have spaces before your brackets, except maybe after
- # 'delete []', 'return []() {};', or 'auto [abc, ...] = ...;'.
- if Search(r'\w\s+\[', line) and not Search(r'(?:auto&?|delete|return)\s+\[', line):
- error(filename, linenum, 'whitespace/braces', 5,
- 'Extra space before [')
-
- # In range-based for, we wanted spaces before and after the colon, but
- # not around "::" tokens that might appear.
- if (Search(r'for *\(.*[^:]:[^: ]', line) or
- Search(r'for *\(.*[^: ]:[^:]', line)):
- error(filename, linenum, 'whitespace/forcolon', 2,
- 'Missing space around colon in range-based for loop')
-
-
-def CheckOperatorSpacing(filename, clean_lines, linenum, error):
- """Checks for horizontal spacing around operators.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
-
- # Don't try to do spacing checks for operator methods. Do this by
- # replacing the troublesome characters with something else,
- # preserving column position for all other characters.
- #
- # The replacement is done repeatedly to avoid false positives from
- # operators that call operators.
- while True:
- match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
- if match:
- line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
- else:
- break
-
- # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
- # Otherwise not. Note we only check for non-spaces on *both* sides;
- # sometimes people put non-spaces on one side when aligning ='s among
- # many lines (not that this is behavior that I approve of...)
- if ((Search(r'[\w.]=', line) or
- Search(r'=[\w.]', line))
- and not Search(r'\b(if|while|for) ', line)
- # Operators taken from [lex.operators] in C++11 standard.
- and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
- and not Search(r'operator=', line)):
- error(filename, linenum, 'whitespace/operators', 4,
- 'Missing spaces around =')
-
- # It's ok not to have spaces around binary operators like + - * /, but if
- # there's too little whitespace, we get concerned. It's hard to tell,
- # though, so we punt on this one for now. TODO.
-
- # You should always have whitespace around binary operators.
- #
- # Check <= and >= first to avoid false positives with < and >, then
- # check non-include lines for spacing around < and >.
- #
- # If the operator is followed by a comma, assume it's be used in a
- # macro context and don't do any checks. This avoids false
- # positives.
- #
- # Note that && is not included here. This is because there are too
- # many false positives due to RValue references.
- match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
- if match:
- error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around %s' % match.group(1))
- elif not Match(r'#.*include', line):
- # Look for < that is not surrounded by spaces. This is only
- # triggered if both sides are missing spaces, even though
- # technically should should flag if at least one side is missing a
- # space. This is done to avoid some false positives with shifts.
- match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
- if match:
- (_, _, end_pos) = CloseExpression(
- clean_lines, linenum, len(match.group(1)))
- if end_pos <= -1:
- error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around <')
-
- # Look for > that is not surrounded by spaces. Similar to the
- # above, we only trigger if both sides are missing spaces to avoid
- # false positives with shifts.
- match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
- if match:
- (_, _, start_pos) = ReverseCloseExpression(
- clean_lines, linenum, len(match.group(1)))
- if start_pos <= -1:
- error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around >')
-
- # We allow no-spaces around << when used like this: 10<<20, but
- # not otherwise (particularly, not when used as streams)
- #
- # We also allow operators following an opening parenthesis, since
- # those tend to be macros that deal with operators.
- match = Search(r'(operator|[^\s(<])(?:L|UL|LL|ULL|l|ul|ll|ull)?<<([^\s,=<])', line)
- if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
- not (match.group(1) == 'operator' and match.group(2) == ';')):
- error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around <<')
-
- # We allow no-spaces around >> for almost anything. This is because
- # C++11 allows ">>" to close nested templates, which accounts for
- # most cases when ">>" is not followed by a space.
- #
- # We still warn on ">>" followed by alpha character, because that is
- # likely due to ">>" being used for right shifts, e.g.:
- # value >> alpha
- #
- # When ">>" is used to close templates, the alphanumeric letter that
- # follows would be part of an identifier, and there should still be
- # a space separating the template type and the identifier.
- # type> alpha
- match = Search(r'>>[a-zA-Z_]', line)
- if match:
- error(filename, linenum, 'whitespace/operators', 3,
- 'Missing spaces around >>')
-
- # There shouldn't be space around unary operators
- match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
- if match:
- error(filename, linenum, 'whitespace/operators', 4,
- 'Extra space for operator %s' % match.group(1))
-
-
-def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
- """Checks for horizontal spacing around parentheses.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
-
- # No spaces after an if, while, switch, or for
- match = Search(r' (if\(|for\(|while\(|switch\()', line)
- if match:
- error(filename, linenum, 'whitespace/parens', 5,
- 'Missing space before ( in %s' % match.group(1))
-
- # For if/for/while/switch, the left and right parens should be
- # consistent about how many spaces are inside the parens, and
- # there should either be zero or one spaces inside the parens.
- # We don't want: "if ( foo)" or "if ( foo )".
- # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
- match = Search(r'\b(if|for|while|switch)\s*'
- r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
- line)
- if match:
- if len(match.group(2)) != len(match.group(4)):
- if not (match.group(3) == ';' and
- len(match.group(2)) == 1 + len(match.group(4)) or
- not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
- error(filename, linenum, 'whitespace/parens', 5,
- 'Mismatching spaces inside () in %s' % match.group(1))
- if len(match.group(2)) not in [0, 1]:
- error(filename, linenum, 'whitespace/parens', 5,
- 'Should have zero or one spaces inside ( and ) in %s' %
- match.group(1))
-
-
-def CheckCommaSpacing(filename, clean_lines, linenum, error):
- """Checks for horizontal spacing near commas and semicolons.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- raw = clean_lines.lines_without_raw_strings
- line = clean_lines.elided[linenum]
-
- # You should always have a space after a comma (either as fn arg or operator)
- #
- # This does not apply when the non-space character following the
- # comma is another comma, since the only time when that happens is
- # for empty macro arguments.
- #
- # We run this check in two passes: first pass on elided lines to
- # verify that lines contain missing whitespaces, second pass on raw
- # lines to confirm that those missing whitespaces are not due to
- # elided comments.
- if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
- Search(r',[^,\s]', raw[linenum])):
- error(filename, linenum, 'whitespace/comma', 3,
- 'Missing space after ,')
-
- # You should always have a space after a semicolon
- # except for few corner cases
- # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
- # space after ;
- if Search(r';[^\s};\\)/]', line):
- error(filename, linenum, 'whitespace/semicolon', 3,
- 'Missing space after ;')
-
-
-def _IsType(clean_lines, nesting_state, expr):
- """Check if expression looks like a type name, returns true if so.
-
- Args:
- clean_lines: A CleansedLines instance containing the file.
- nesting_state: A NestingState instance which maintains information about
- the current stack of nested blocks being parsed.
- expr: The expression to check.
- Returns:
- True, if token looks like a type.
- """
- # Keep only the last token in the expression
- last_word = Match(r'^.*(\b\S+)$', expr)
- if last_word:
- token = last_word.group(1)
- else:
- token = expr
-
- # Match native types and stdint types
- if _TYPES.match(token):
- return True
-
- # Try a bit harder to match templated types. Walk up the nesting
- # stack until we find something that resembles a typename
- # declaration for what we are looking for.
- typename_pattern = (r'\b(?:typename|class|struct)\s+' + re.escape(token) +
- r'\b')
- block_index = len(nesting_state.stack) - 1
- while block_index >= 0:
- if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
- return False
-
- # Found where the opening brace is. We want to scan from this
- # line up to the beginning of the function, minus a few lines.
- # template
- # class C
- # : public ... { // start scanning here
- last_line = nesting_state.stack[block_index].starting_linenum
-
- next_block_start = 0
- if block_index > 0:
- next_block_start = nesting_state.stack[block_index - 1].starting_linenum
- first_line = last_line
- while first_line >= next_block_start:
- if clean_lines.elided[first_line].find('template') >= 0:
- break
- first_line -= 1
- if first_line < next_block_start:
- # Didn't find any "template" keyword before reaching the next block,
- # there are probably no template things to check for this block
- block_index -= 1
- continue
-
- # Look for typename in the specified range
- for i in xrange(first_line, last_line + 1, 1):
- if Search(typename_pattern, clean_lines.elided[i]):
- return True
- block_index -= 1
-
- return False
-
-
-def CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error):
- """Checks for horizontal spacing near commas.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- nesting_state: A NestingState instance which maintains information about
- the current stack of nested blocks being parsed.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
-
- # Except after an opening paren, or after another opening brace (in case of
- # an initializer list, for instance), you should have spaces before your
- # braces when they are delimiting blocks, classes, namespaces etc.
- # And since you should never have braces at the beginning of a line,
- # this is an easy test. Except that braces used for initialization don't
- # follow the same rule; we often don't want spaces before those.
- match = Match(r'^(.*[^ ({>]){', line)
-
- if match:
- # Try a bit harder to check for brace initialization. This
- # happens in one of the following forms:
- # Constructor() : initializer_list_{} { ... }
- # Constructor{}.MemberFunction()
- # Type variable{};
- # FunctionCall(type{}, ...);
- # LastArgument(..., type{});
- # LOG(INFO) << type{} << " ...";
- # map_of_type[{...}] = ...;
- # ternary = expr ? new type{} : nullptr;
- # OuterTemplate{}>
- #
- # We check for the character following the closing brace, and
- # silence the warning if it's one of those listed above, i.e.
- # "{.;,)<>]:".
- #
- # To account for nested initializer list, we allow any number of
- # closing braces up to "{;,)<". We can't simply silence the
- # warning on first sight of closing brace, because that would
- # cause false negatives for things that are not initializer lists.
- # Silence this: But not this:
- # Outer{ if (...) {
- # Inner{...} if (...){ // Missing space before {
- # }; }
- #
- # There is a false negative with this approach if people inserted
- # spurious semicolons, e.g. "if (cond){};", but we will catch the
- # spurious semicolon with a separate check.
- leading_text = match.group(1)
- (endline, endlinenum, endpos) = CloseExpression(
- clean_lines, linenum, len(match.group(1)))
- trailing_text = ''
- if endpos > -1:
- trailing_text = endline[endpos:]
- for offset in xrange(endlinenum + 1,
- min(endlinenum + 3, clean_lines.NumLines() - 1)):
- trailing_text += clean_lines.elided[offset]
- # We also suppress warnings for `uint64_t{expression}` etc., as the style
- # guide recommends brace initialization for integral types to avoid
- # overflow/truncation.
- if (not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text)
- and not _IsType(clean_lines, nesting_state, leading_text)):
- error(filename, linenum, 'whitespace/braces', 5,
- 'Missing space before {')
-
- # Make sure '} else {' has spaces.
- if Search(r'}else', line):
- error(filename, linenum, 'whitespace/braces', 5,
- 'Missing space before else')
-
- # You shouldn't have a space before a semicolon at the end of the line.
- # There's a special case for "for" since the style guide allows space before
- # the semicolon there.
- if Search(r':\s*;\s*$', line):
- error(filename, linenum, 'whitespace/semicolon', 5,
- 'Semicolon defining empty statement. Use {} instead.')
- elif Search(r'^\s*;\s*$', line):
- error(filename, linenum, 'whitespace/semicolon', 5,
- 'Line contains only semicolon. If this should be an empty statement, '
- 'use {} instead.')
- elif (Search(r'\s+;\s*$', line) and
- not Search(r'\bfor\b', line)):
- error(filename, linenum, 'whitespace/semicolon', 5,
- 'Extra space before last semicolon. If this should be an empty '
- 'statement, use {} instead.')
-
-
-def IsDecltype(clean_lines, linenum, column):
- """Check if the token ending on (linenum, column) is decltype().
-
- Args:
- clean_lines: A CleansedLines instance containing the file.
- linenum: the number of the line to check.
- column: end column of the token to check.
- Returns:
- True if this token is decltype() expression, False otherwise.
- """
- (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
- if start_col < 0:
- return False
- if Search(r'\bdecltype\s*$', text[0:start_col]):
- return True
- return False
-
-def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
- """Checks for additional blank line issues related to sections.
-
- Currently the only thing checked here is blank line before protected/private.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- class_info: A _ClassInfo objects.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- # Skip checks if the class is small, where small means 25 lines or less.
- # 25 lines seems like a good cutoff since that's the usual height of
- # terminals, and any class that can't fit in one screen can't really
- # be considered "small".
- #
- # Also skip checks if we are on the first line. This accounts for
- # classes that look like
- # class Foo { public: ... };
- #
- # If we didn't find the end of the class, last_line would be zero,
- # and the check will be skipped by the first condition.
- if (class_info.last_line - class_info.starting_linenum <= 24 or
- linenum <= class_info.starting_linenum):
- return
-
- matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
- if matched:
- # Issue warning if the line before public/protected/private was
- # not a blank line, but don't do this if the previous line contains
- # "class" or "struct". This can happen two ways:
- # - We are at the beginning of the class.
- # - We are forward-declaring an inner class that is semantically
- # private, but needed to be public for implementation reasons.
- # Also ignores cases where the previous line ends with a backslash as can be
- # common when defining classes in C macros.
- prev_line = clean_lines.lines[linenum - 1]
- if (not IsBlankLine(prev_line) and
- not Search(r'\b(class|struct)\b', prev_line) and
- not Search(r'\\$', prev_line)):
- # Try a bit harder to find the beginning of the class. This is to
- # account for multi-line base-specifier lists, e.g.:
- # class Derived
- # : public Base {
- end_class_head = class_info.starting_linenum
- for i in range(class_info.starting_linenum, linenum):
- if Search(r'\{\s*$', clean_lines.lines[i]):
- end_class_head = i
- break
- if end_class_head < linenum - 1:
- error(filename, linenum, 'whitespace/blank_line', 3,
- '"%s:" should be preceded by a blank line' % matched.group(1))
-
-
-def GetPreviousNonBlankLine(clean_lines, linenum):
- """Return the most recent non-blank line and its line number.
-
- Args:
- clean_lines: A CleansedLines instance containing the file contents.
- linenum: The number of the line to check.
-
- Returns:
- A tuple with two elements. The first element is the contents of the last
- non-blank line before the current line, or the empty string if this is the
- first non-blank line. The second is the line number of that line, or -1
- if this is the first non-blank line.
- """
-
- prevlinenum = linenum - 1
- while prevlinenum >= 0:
- prevline = clean_lines.elided[prevlinenum]
- if not IsBlankLine(prevline): # if not a blank line...
- return (prevline, prevlinenum)
- prevlinenum -= 1
- return ('', -1)
-
-
-def CheckBraces(filename, clean_lines, linenum, error):
- """Looks for misplaced braces (e.g. at the end of line).
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
-
- line = clean_lines.elided[linenum] # get rid of comments and strings
-
- if Match(r'\s*{\s*$', line):
- # We allow an open brace to start a line in the case where someone is using
- # braces in a block to explicitly create a new scope, which is commonly used
- # to control the lifetime of stack-allocated variables. Braces are also
- # used for brace initializers inside function calls. We don't detect this
- # perfectly: we just don't complain if the last non-whitespace character on
- # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
- # previous line starts a preprocessor block. We also allow a brace on the
- # following line if it is part of an array initialization and would not fit
- # within the 80 character limit of the preceding line.
- prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
- if (not Search(r'[,;:}{(]\s*$', prevline) and
- not Match(r'\s*#', prevline) and
- not (GetLineWidth(prevline) > _line_length - 2 and '[]' in prevline)):
- error(filename, linenum, 'whitespace/braces', 4,
- '{ should almost always be at the end of the previous line')
-
- # An else clause should be on the same line as the preceding closing brace.
- if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
- prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
- if Match(r'\s*}\s*$', prevline):
- error(filename, linenum, 'whitespace/newline', 4,
- 'An else should appear on the same line as the preceding }')
-
- # If braces come on one side of an else, they should be on both.
- # However, we have to worry about "else if" that spans multiple lines!
- if Search(r'else if\s*\(', line): # could be multi-line if
- brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
- # find the ( after the if
- pos = line.find('else if')
- pos = line.find('(', pos)
- if pos > 0:
- (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
- brace_on_right = endline[endpos:].find('{') != -1
- if brace_on_left != brace_on_right: # must be brace after if
- error(filename, linenum, 'readability/braces', 5,
- 'If an else has a brace on one side, it should have it on both')
- elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
- error(filename, linenum, 'readability/braces', 5,
- 'If an else has a brace on one side, it should have it on both')
-
- # Likewise, an else should never have the else clause on the same line
- if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
- error(filename, linenum, 'whitespace/newline', 4,
- 'Else clause should never be on same line as else (use 2 lines)')
-
- # In the same way, a do/while should never be on one line
- if Match(r'\s*do [^\s{]', line):
- error(filename, linenum, 'whitespace/newline', 4,
- 'do/while clauses should not be on a single line')
-
- # Check single-line if/else bodies. The style guide says 'curly braces are not
- # required for single-line statements'. We additionally allow multi-line,
- # single statements, but we reject anything with more than one semicolon in
- # it. This means that the first semicolon after the if should be at the end of
- # its line, and the line after that should have an indent level equal to or
- # lower than the if. We also check for ambiguous if/else nesting without
- # braces.
- if_else_match = Search(r'\b(if\s*(|constexpr)\s*\(|else\b)', line)
- if if_else_match and not Match(r'\s*#', line):
- if_indent = GetIndentLevel(line)
- endline, endlinenum, endpos = line, linenum, if_else_match.end()
- if_match = Search(r'\bif\s*(|constexpr)\s*\(', line)
- if if_match:
- # This could be a multiline if condition, so find the end first.
- pos = if_match.end() - 1
- (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
- # Check for an opening brace, either directly after the if or on the next
- # line. If found, this isn't a single-statement conditional.
- if (not Match(r'\s*{', endline[endpos:])
- and not (Match(r'\s*$', endline[endpos:])
- and endlinenum < (len(clean_lines.elided) - 1)
- and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
- while (endlinenum < len(clean_lines.elided)
- and ';' not in clean_lines.elided[endlinenum][endpos:]):
- endlinenum += 1
- endpos = 0
- if endlinenum < len(clean_lines.elided):
- endline = clean_lines.elided[endlinenum]
- # We allow a mix of whitespace and closing braces (e.g. for one-liner
- # methods) and a single \ after the semicolon (for macros)
- endpos = endline.find(';')
- if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
- # Semicolon isn't the last character, there's something trailing.
- # Output a warning if the semicolon is not contained inside
- # a lambda expression.
- if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
- endline):
- error(filename, linenum, 'readability/braces', 4,
- 'If/else bodies with multiple statements require braces')
- elif endlinenum < len(clean_lines.elided) - 1:
- # Make sure the next line is dedented
- next_line = clean_lines.elided[endlinenum + 1]
- next_indent = GetIndentLevel(next_line)
- # With ambiguous nested if statements, this will error out on the
- # if that *doesn't* match the else, regardless of whether it's the
- # inner one or outer one.
- if (if_match and Match(r'\s*else\b', next_line)
- and next_indent != if_indent):
- error(filename, linenum, 'readability/braces', 4,
- 'Else clause should be indented at the same level as if. '
- 'Ambiguous nested if/else chains require braces.')
- elif next_indent > if_indent:
- error(filename, linenum, 'readability/braces', 4,
- 'If/else bodies with multiple statements require braces')
-
-
-def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
- """Looks for redundant trailing semicolon.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
-
- line = clean_lines.elided[linenum]
-
- # Block bodies should not be followed by a semicolon. Due to C++11
- # brace initialization, there are more places where semicolons are
- # required than not, so we explicitly list the allowed rules rather
- # than listing the disallowed ones. These are the places where "};"
- # should be replaced by just "}":
- # 1. Some flavor of block following closing parenthesis:
- # for (;;) {};
- # while (...) {};
- # switch (...) {};
- # Function(...) {};
- # if (...) {};
- # if (...) else if (...) {};
- #
- # 2. else block:
- # if (...) else {};
- #
- # 3. const member function:
- # Function(...) const {};
- #
- # 4. Block following some statement:
- # x = 42;
- # {};
- #
- # 5. Block at the beginning of a function:
- # Function(...) {
- # {};
- # }
- #
- # Note that naively checking for the preceding "{" will also match
- # braces inside multi-dimensional arrays, but this is fine since
- # that expression will not contain semicolons.
- #
- # 6. Block following another block:
- # while (true) {}
- # {};
- #
- # 7. End of namespaces:
- # namespace {};
- #
- # These semicolons seems far more common than other kinds of
- # redundant semicolons, possibly due to people converting classes
- # to namespaces. For now we do not warn for this case.
- #
- # Try matching case 1 first.
- match = Match(r'^(.*\)\s*)\{', line)
- if match:
- # Matched closing parenthesis (case 1). Check the token before the
- # matching opening parenthesis, and don't warn if it looks like a
- # macro. This avoids these false positives:
- # - macro that defines a base class
- # - multi-line macro that defines a base class
- # - macro that defines the whole class-head
- #
- # But we still issue warnings for macros that we know are safe to
- # warn, specifically:
- # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
- # - TYPED_TEST
- # - INTERFACE_DEF
- # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
- #
- # We implement a list of safe macros instead of a list of
- # unsafe macros, even though the latter appears less frequently in
- # google code and would have been easier to implement. This is because
- # the downside for getting the allowed checks wrong means some extra
- # semicolons, while the downside for getting disallowed checks wrong
- # would result in compile errors.
- #
- # In addition to macros, we also don't want to warn on
- # - Compound literals
- # - Lambdas
- # - alignas specifier with anonymous structs
- # - decltype
- closing_brace_pos = match.group(1).rfind(')')
- opening_parenthesis = ReverseCloseExpression(
- clean_lines, linenum, closing_brace_pos)
- if opening_parenthesis[2] > -1:
- line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
- macro = Search(r'\b([A-Z_][A-Z0-9_]*)\s*$', line_prefix)
- func = Match(r'^(.*\])\s*$', line_prefix)
- if ((macro and
- macro.group(1) not in (
- 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
- 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
- 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
- (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
- Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
- Search(r'\bdecltype$', line_prefix) or
- Search(r'\s+=\s*$', line_prefix)):
- match = None
- if (match and
- opening_parenthesis[1] > 1 and
- Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
- # Multi-line lambda-expression
- match = None
-
- else:
- # Try matching cases 2-3.
- match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
- if not match:
- # Try matching cases 4-6. These are always matched on separate lines.
- #
- # Note that we can't simply concatenate the previous line to the
- # current line and do a single match, otherwise we may output
- # duplicate warnings for the blank line case:
- # if (cond) {
- # // blank line
- # }
- prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
- if prevline and Search(r'[;{}]\s*$', prevline):
- match = Match(r'^(\s*)\{', line)
-
- # Check matching closing brace
- if match:
- (endline, endlinenum, endpos) = CloseExpression(
- clean_lines, linenum, len(match.group(1)))
- if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
- # Current {} pair is eligible for semicolon check, and we have found
- # the redundant semicolon, output warning here.
- #
- # Note: because we are scanning forward for opening braces, and
- # outputting warnings for the matching closing brace, if there are
- # nested blocks with trailing semicolons, we will get the error
- # messages in reversed order.
-
- # We need to check the line forward for NOLINT
- raw_lines = clean_lines.raw_lines
- ParseNolintSuppressions(filename, raw_lines[endlinenum-1], endlinenum-1,
- error)
- ParseNolintSuppressions(filename, raw_lines[endlinenum], endlinenum,
- error)
-
- error(filename, endlinenum, 'readability/braces', 4,
- "You don't need a ; after a }")
-
-
-def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
- """Look for empty loop/conditional body with only a single semicolon.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
-
- # Search for loop keywords at the beginning of the line. Because only
- # whitespaces are allowed before the keywords, this will also ignore most
- # do-while-loops, since those lines should start with closing brace.
- #
- # We also check "if" blocks here, since an empty conditional block
- # is likely an error.
- line = clean_lines.elided[linenum]
- matched = Match(r'\s*(for|while|if)\s*\(', line)
- if matched:
- # Find the end of the conditional expression.
- (end_line, end_linenum, end_pos) = CloseExpression(
- clean_lines, linenum, line.find('('))
-
- # Output warning if what follows the condition expression is a semicolon.
- # No warning for all other cases, including whitespace or newline, since we
- # have a separate check for semicolons preceded by whitespace.
- if end_pos >= 0 and Match(r';', end_line[end_pos:]):
- if matched.group(1) == 'if':
- error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
- 'Empty conditional bodies should use {}')
- else:
- error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
- 'Empty loop bodies should use {} or continue')
-
- # Check for if statements that have completely empty bodies (no comments)
- # and no else clauses.
- if end_pos >= 0 and matched.group(1) == 'if':
- # Find the position of the opening { for the if statement.
- # Return without logging an error if it has no brackets.
- opening_linenum = end_linenum
- opening_line_fragment = end_line[end_pos:]
- # Loop until EOF or find anything that's not whitespace or opening {.
- while not Search(r'^\s*\{', opening_line_fragment):
- if Search(r'^(?!\s*$)', opening_line_fragment):
- # Conditional has no brackets.
- return
- opening_linenum += 1
- if opening_linenum == len(clean_lines.elided):
- # Couldn't find conditional's opening { or any code before EOF.
- return
- opening_line_fragment = clean_lines.elided[opening_linenum]
- # Set opening_line (opening_line_fragment may not be entire opening line).
- opening_line = clean_lines.elided[opening_linenum]
-
- # Find the position of the closing }.
- opening_pos = opening_line_fragment.find('{')
- if opening_linenum == end_linenum:
- # We need to make opening_pos relative to the start of the entire line.
- opening_pos += end_pos
- (closing_line, closing_linenum, closing_pos) = CloseExpression(
- clean_lines, opening_linenum, opening_pos)
- if closing_pos < 0:
- return
-
- # Now construct the body of the conditional. This consists of the portion
- # of the opening line after the {, all lines until the closing line,
- # and the portion of the closing line before the }.
- if (clean_lines.raw_lines[opening_linenum] !=
- CleanseComments(clean_lines.raw_lines[opening_linenum])):
- # Opening line ends with a comment, so conditional isn't empty.
- return
- if closing_linenum > opening_linenum:
- # Opening line after the {. Ignore comments here since we checked above.
- bodylist = list(opening_line[opening_pos+1:])
- # All lines until closing line, excluding closing line, with comments.
- bodylist.extend(clean_lines.raw_lines[opening_linenum+1:closing_linenum])
- # Closing line before the }. Won't (and can't) have comments.
- bodylist.append(clean_lines.elided[closing_linenum][:closing_pos-1])
- body = '\n'.join(bodylist)
- else:
- # If statement has brackets and fits on a single line.
- body = opening_line[opening_pos+1:closing_pos-1]
-
- # Check if the body is empty
- if not _EMPTY_CONDITIONAL_BODY_PATTERN.search(body):
- return
- # The body is empty. Now make sure there's not an else clause.
- current_linenum = closing_linenum
- current_line_fragment = closing_line[closing_pos:]
- # Loop until EOF or find anything that's not whitespace or else clause.
- while Search(r'^\s*$|^(?=\s*else)', current_line_fragment):
- if Search(r'^(?=\s*else)', current_line_fragment):
- # Found an else clause, so don't log an error.
- return
- current_linenum += 1
- if current_linenum == len(clean_lines.elided):
- break
- current_line_fragment = clean_lines.elided[current_linenum]
-
- # The body is empty and there's no else clause until EOF or other code.
- error(filename, end_linenum, 'whitespace/empty_if_body', 4,
- ('If statement had no body and no else clause'))
-
-
-def FindCheckMacro(line):
- """Find a replaceable CHECK-like macro.
-
- Args:
- line: line to search on.
- Returns:
- (macro name, start position), or (None, -1) if no replaceable
- macro is found.
- """
- for macro in _CHECK_MACROS:
- i = line.find(macro)
- if i >= 0:
- # Find opening parenthesis. Do a regular expression match here
- # to make sure that we are matching the expected CHECK macro, as
- # opposed to some other macro that happens to contain the CHECK
- # substring.
- matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
- if not matched:
- continue
- return (macro, len(matched.group(1)))
- return (None, -1)
-
-
-def CheckCheck(filename, clean_lines, linenum, error):
- """Checks the use of CHECK and EXPECT macros.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
-
- # Decide the set of replacement macros that should be suggested
- lines = clean_lines.elided
- (check_macro, start_pos) = FindCheckMacro(lines[linenum])
- if not check_macro:
- return
-
- # Find end of the boolean expression by matching parentheses
- (last_line, end_line, end_pos) = CloseExpression(
- clean_lines, linenum, start_pos)
- if end_pos < 0:
- return
-
- # If the check macro is followed by something other than a
- # semicolon, assume users will log their own custom error messages
- # and don't suggest any replacements.
- if not Match(r'\s*;', last_line[end_pos:]):
- return
-
- if linenum == end_line:
- expression = lines[linenum][start_pos + 1:end_pos - 1]
- else:
- expression = lines[linenum][start_pos + 1:]
- for i in xrange(linenum + 1, end_line):
- expression += lines[i]
- expression += last_line[0:end_pos - 1]
-
- # Parse expression so that we can take parentheses into account.
- # This avoids false positives for inputs like "CHECK((a < 4) == b)",
- # which is not replaceable by CHECK_LE.
- lhs = ''
- rhs = ''
- operator = None
- while expression:
- matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
- r'==|!=|>=|>|<=|<|\()(.*)$', expression)
- if matched:
- token = matched.group(1)
- if token == '(':
- # Parenthesized operand
- expression = matched.group(2)
- (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
- if end < 0:
- return # Unmatched parenthesis
- lhs += '(' + expression[0:end]
- expression = expression[end:]
- elif token in ('&&', '||'):
- # Logical and/or operators. This means the expression
- # contains more than one term, for example:
- # CHECK(42 < a && a < b);
- #
- # These are not replaceable with CHECK_LE, so bail out early.
- return
- elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
- # Non-relational operator
- lhs += token
- expression = matched.group(2)
- else:
- # Relational operator
- operator = token
- rhs = matched.group(2)
- break
- else:
- # Unparenthesized operand. Instead of appending to lhs one character
- # at a time, we do another regular expression match to consume several
- # characters at once if possible. Trivial benchmark shows that this
- # is more efficient when the operands are longer than a single
- # character, which is generally the case.
- matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
- if not matched:
- matched = Match(r'^(\s*\S)(.*)$', expression)
- if not matched:
- break
- lhs += matched.group(1)
- expression = matched.group(2)
-
- # Only apply checks if we got all parts of the boolean expression
- if not (lhs and operator and rhs):
- return
-
- # Check that rhs do not contain logical operators. We already know
- # that lhs is fine since the loop above parses out && and ||.
- if rhs.find('&&') > -1 or rhs.find('||') > -1:
- return
-
- # At least one of the operands must be a constant literal. This is
- # to avoid suggesting replacements for unprintable things like
- # CHECK(variable != iterator)
- #
- # The following pattern matches decimal, hex integers, strings, and
- # characters (in that order).
- lhs = lhs.strip()
- rhs = rhs.strip()
- match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
- if Match(match_constant, lhs) or Match(match_constant, rhs):
- # Note: since we know both lhs and rhs, we can provide a more
- # descriptive error message like:
- # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
- # Instead of:
- # Consider using CHECK_EQ instead of CHECK(a == b)
- #
- # We are still keeping the less descriptive message because if lhs
- # or rhs gets long, the error message might become unreadable.
- error(filename, linenum, 'readability/check', 2,
- 'Consider using %s instead of %s(a %s b)' % (
- _CHECK_REPLACEMENT[check_macro][operator],
- check_macro, operator))
-
-
-def CheckAltTokens(filename, clean_lines, linenum, error):
- """Check alternative keywords being used in boolean expressions.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
-
- # Avoid preprocessor lines
- if Match(r'^\s*#', line):
- return
-
- # Last ditch effort to avoid multi-line comments. This will not help
- # if the comment started before the current line or ended after the
- # current line, but it catches most of the false positives. At least,
- # it provides a way to workaround this warning for people who use
- # multi-line comments in preprocessor macros.
- #
- # TODO(unknown): remove this once cpplint has better support for
- # multi-line comments.
- if line.find('/*') >= 0 or line.find('*/') >= 0:
- return
-
- for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
- error(filename, linenum, 'readability/alt_tokens', 2,
- 'Use operator %s instead of %s' % (
- _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
-
-
-def GetLineWidth(line):
- """Determines the width of the line in column positions.
-
- Args:
- line: A string, which may be a Unicode string.
-
- Returns:
- The width of the line in column positions, accounting for Unicode
- combining characters and wide characters.
- """
- if isinstance(line, unicode):
- width = 0
- for uc in unicodedata.normalize('NFC', line):
- if unicodedata.east_asian_width(uc) in ('W', 'F'):
- width += 2
- elif not unicodedata.combining(uc):
- # Issue 337
- # https://mail.python.org/pipermail/python-list/2012-August/628809.html
- if (sys.version_info.major, sys.version_info.minor) <= (3, 2):
- # https://github.com/python/cpython/blob/2.7/Include/unicodeobject.h#L81
- is_wide_build = sysconfig.get_config_var("Py_UNICODE_SIZE") >= 4
- # https://github.com/python/cpython/blob/2.7/Objects/unicodeobject.c#L564
- is_low_surrogate = 0xDC00 <= ord(uc) <= 0xDFFF
- if not is_wide_build and is_low_surrogate:
- width -= 1
-
- width += 1
- return width
- else:
- return len(line)
-
-
-def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
- error):
- """Checks rules from the 'C++ style rules' section of cppguide.html.
-
- Most of these rules are hard to test (naming, comment style), but we
- do what we can. In particular we check for 2-space indents, line lengths,
- tab usage, spaces inside code, etc.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- file_extension: The extension (without the dot) of the filename.
- nesting_state: A NestingState instance which maintains information about
- the current stack of nested blocks being parsed.
- error: The function to call with any errors found.
- """
-
- # Don't use "elided" lines here, otherwise we can't check commented lines.
- # Don't want to use "raw" either, because we don't want to check inside C++11
- # raw strings,
- raw_lines = clean_lines.lines_without_raw_strings
- line = raw_lines[linenum]
- prev = raw_lines[linenum - 1] if linenum > 0 else ''
-
- if line.find('\t') != -1:
- error(filename, linenum, 'whitespace/tab', 1,
- 'Tab found; better to use spaces')
-
- # One or three blank spaces at the beginning of the line is weird; it's
- # hard to reconcile that with 2-space indents.
- # NOTE: here are the conditions rob pike used for his tests. Mine aren't
- # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
- # if(RLENGTH > 20) complain = 0;
- # if(match($0, " +(error|private|public|protected):")) complain = 0;
- # if(match(prev, "&& *$")) complain = 0;
- # if(match(prev, "\\|\\| *$")) complain = 0;
- # if(match(prev, "[\",=><] *$")) complain = 0;
- # if(match($0, " <<")) complain = 0;
- # if(match(prev, " +for \\(")) complain = 0;
- # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
- scope_or_label_pattern = r'\s*(?:public|private|protected|signals)(?:\s+(?:slots\s*)?)?:\s*\\?$'
- classinfo = nesting_state.InnermostClass()
- initial_spaces = 0
- cleansed_line = clean_lines.elided[linenum]
- while initial_spaces < len(line) and line[initial_spaces] == ' ':
- initial_spaces += 1
- # There are certain situations we allow one space, notably for
- # section labels, and also lines containing multi-line raw strings.
- # We also don't check for lines that look like continuation lines
- # (of lines ending in double quotes, commas, equals, or angle brackets)
- # because the rules for how to indent those are non-trivial.
- if (not Search(r'[",=><] *$', prev) and
- (initial_spaces == 1 or initial_spaces == 3) and
- not Match(scope_or_label_pattern, cleansed_line) and
- not (clean_lines.raw_lines[linenum] != line and
- Match(r'^\s*""', line))):
- error(filename, linenum, 'whitespace/indent', 3,
- 'Weird number of spaces at line-start. '
- 'Are you using a 2-space indent?')
-
- if line and line[-1].isspace():
- error(filename, linenum, 'whitespace/end_of_line', 4,
- 'Line ends in whitespace. Consider deleting these extra spaces.')
-
- # Check if the line is a header guard.
- is_header_guard = False
- if IsHeaderExtension(file_extension):
- cppvar = GetHeaderGuardCPPVariable(filename)
- if (line.startswith('#ifndef %s' % cppvar) or
- line.startswith('#define %s' % cppvar) or
- line.startswith('#endif // %s' % cppvar)):
- is_header_guard = True
- # #include lines and header guards can be long, since there's no clean way to
- # split them.
- #
- # URLs can be long too. It's possible to split these, but it makes them
- # harder to cut&paste.
- #
- # The "$Id:...$" comment may also get very long without it being the
- # developers fault.
- #
- # Doxygen documentation copying can get pretty long when using an overloaded
- # function declaration
- if (not line.startswith('#include') and not is_header_guard and
- not Match(r'^\s*//.*http(s?)://\S*$', line) and
- not Match(r'^\s*//\s*[^\s]*$', line) and
- not Match(r'^// \$Id:.*#[0-9]+ \$$', line) and
- not Match(r'^\s*/// [@\\](copydoc|copydetails|copybrief) .*$', line)):
- line_width = GetLineWidth(line)
- if line_width > _line_length:
- error(filename, linenum, 'whitespace/line_length', 2,
- 'Lines should be <= %i characters long' % _line_length)
-
- if (cleansed_line.count(';') > 1 and
- # allow simple single line lambdas
- not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}\n\r]*\}',
- line) and
- # for loops are allowed two ;'s (and may run over two lines).
- cleansed_line.find('for') == -1 and
- (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
- GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
- # It's ok to have many commands in a switch case that fits in 1 line
- not ((cleansed_line.find('case ') != -1 or
- cleansed_line.find('default:') != -1) and
- cleansed_line.find('break;') != -1)):
- error(filename, linenum, 'whitespace/newline', 0,
- 'More than one command on the same line')
-
- # Some more style checks
- CheckBraces(filename, clean_lines, linenum, error)
- CheckTrailingSemicolon(filename, clean_lines, linenum, error)
- CheckEmptyBlockBody(filename, clean_lines, linenum, error)
- CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
- CheckOperatorSpacing(filename, clean_lines, linenum, error)
- CheckParenthesisSpacing(filename, clean_lines, linenum, error)
- CheckCommaSpacing(filename, clean_lines, linenum, error)
- CheckBracesSpacing(filename, clean_lines, linenum, nesting_state, error)
- CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
- CheckCheck(filename, clean_lines, linenum, error)
- CheckAltTokens(filename, clean_lines, linenum, error)
- classinfo = nesting_state.InnermostClass()
- if classinfo:
- CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
-
-
-_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*(?:include|import)\s*([<"])([^>"]*)[>"].*$')
-# Matches the first component of a filename delimited by -s and _s. That is:
-# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
-# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
-# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
-# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
-_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
-
-
-def _DropCommonSuffixes(filename):
- """Drops common suffixes like _test.cc or -inl.h from filename.
-
- For example:
- >>> _DropCommonSuffixes('foo/foo-inl.h')
- 'foo/foo'
- >>> _DropCommonSuffixes('foo/bar/foo.cc')
- 'foo/bar/foo'
- >>> _DropCommonSuffixes('foo/foo_internal.h')
- 'foo/foo'
- >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
- 'foo/foo_unusualinternal'
-
- Args:
- filename: The input filename.
-
- Returns:
- The filename with the common suffix removed.
- """
- for suffix in itertools.chain(
- ('%s.%s' % (test_suffix.lstrip('_'), ext)
- for test_suffix, ext in itertools.product(_test_suffixes, GetNonHeaderExtensions())),
- ('%s.%s' % (suffix, ext)
- for suffix, ext in itertools.product(['inl', 'imp', 'internal'], GetHeaderExtensions()))):
- if (filename.endswith(suffix) and len(filename) > len(suffix) and
- filename[-len(suffix) - 1] in ('-', '_')):
- return filename[:-len(suffix) - 1]
-
- for suffix in ['Tests.h', 'Test.m', 'Test.mm', 'Tests.m', 'Tests.mm']:
- if (filename.endswith(suffix) and len(filename) > len(suffix)):
- return filename[:-len(suffix)]
-
- return os.path.splitext(filename)[0]
-
-
-def _ClassifyInclude(fileinfo, include, used_angle_brackets, include_order="default"):
- """Figures out what kind of header 'include' is.
-
- Args:
- fileinfo: The current file cpplint is running over. A FileInfo instance.
- include: The path to a #included file.
- used_angle_brackets: True if the #include used <> rather than "".
- include_order: "default" or other value allowed in program arguments
-
- Returns:
- One of the _XXX_HEADER constants.
-
- For example:
- >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
- _C_SYS_HEADER
- >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
- _CPP_SYS_HEADER
- >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', True, "standardcfirst")
- _OTHER_SYS_HEADER
- >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
- _LIKELY_MY_HEADER
- >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
- ... 'bar/foo_other_ext.h', False)
- _POSSIBLE_MY_HEADER
- >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
- _OTHER_HEADER
- """
- # This is a list of all standard c++ header files, except
- # those already checked for above.
- is_cpp_header = include in _CPP_HEADERS
-
- # Mark include as C header if in list or in a known folder for standard-ish C headers.
- is_std_c_header = (include_order == "default") or (include in _C_HEADERS
- # additional linux glibc header folders
- or Search(r'(?:%s)\/.*\.h' % "|".join(C_STANDARD_HEADER_FOLDERS), include))
-
- # Headers with C++ extensions shouldn't be considered C system headers
- is_system = used_angle_brackets and not os.path.splitext(include)[1] in ['.hpp', '.hxx', '.h++']
-
- if is_system:
- if is_cpp_header:
- return _CPP_SYS_HEADER
- if is_std_c_header:
- return _C_SYS_HEADER
- else:
- return _OTHER_SYS_HEADER
-
- # If the target file and the include we're checking share a
- # basename when we drop common extensions, and the include
- # lives in . , then it's likely to be owned by the target file.
- target_dir, target_base = (
- os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
- include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
- target_dir_pub = os.path.normpath(target_dir + '/../public')
- target_dir_pub = target_dir_pub.replace('\\', '/')
- if target_base == include_base and (
- include_dir == target_dir or
- include_dir == target_dir_pub):
- return _LIKELY_MY_HEADER
-
- # If the target and include share some initial basename
- # component, it's possible the target is implementing the
- # include, so it's allowed to be first, but we'll never
- # complain if it's not there.
- target_first_component = _RE_FIRST_COMPONENT.match(target_base)
- include_first_component = _RE_FIRST_COMPONENT.match(include_base)
- if (target_first_component and include_first_component and
- target_first_component.group(0) ==
- include_first_component.group(0)):
- return _POSSIBLE_MY_HEADER
-
- return _OTHER_HEADER
-
-
-
-def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
- """Check rules that are applicable to #include lines.
-
- Strings on #include lines are NOT removed from elided line, to make
- certain tasks easier. However, to prevent false positives, checks
- applicable to #include lines in CheckLanguage must be put here.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- include_state: An _IncludeState instance in which the headers are inserted.
- error: The function to call with any errors found.
- """
- fileinfo = FileInfo(filename)
- line = clean_lines.lines[linenum]
-
- # system-style includes should not be used for project includes
- match = Match(r'#include\s*<(([^/>]+)/[^>]+)', line)
- if match:
- if match.group(2) not in _C_SYSTEM_DIRECTORIES:
- error(filename, linenum, 'build/include', 4,
- '<%s> should be #include "%s" or #import <%s>' %
- (match.group(1), match.group(1), match.group(1)))
-
- # framework-style imports should not be used for project imports
- match = Match(r'#import\s*<(Firestore/Source/[^>]+)', line)
- if match:
- error(filename, linenum, 'build/include', 4,
- 'Prefer #import "%s" for project import rather than #import <>' %
- match.group(1))
-
- # C++ system files should not be #imported
- match = Match(r'#import\s*<([^/>.]+)>', line)
- if match:
- error(filename, linenum, 'build/include', 4,
- 'C++ header <%s> was #imported. Should be #include <%s>' %
- (match.group(1), match.group(1)))
-
- # Prefer C++ wrappers for C headers
- match = Match(r'#include\s*<(([^>]+).h)>', line)
- if match:
- wrapper = 'c' + match.group(2)
- if wrapper in _CPP_HEADERS:
- error(filename, linenum, 'build/include', 4,
- 'Prefer C++ header <%s> for C system header %s' %
- (wrapper, match.group(1)))
-
- # "include" should use the new style "foo/bar.h" instead of just "bar.h"
- # Only do this check if the included header follows google naming
- # conventions. If not, assume that it's a 3rd party API that
- # requires special include conventions.
- #
- # We also make an exception for Lua headers, which follow google
- # naming convention but not the include convention.
- match = Match(r'#include\s*"([^/]+\.h)"', line)
- if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
- error(filename, linenum, 'build/include_subdir', 4,
- 'Include the directory when naming .h files')
-
- # we shouldn't include a file more than once. actually, there are a
- # handful of instances where doing so is okay, but in general it's
- # not.
- match = _RE_PATTERN_INCLUDE.search(line)
- if match:
- include = match.group(2)
- used_angle_brackets = (match.group(1) == '<')
- duplicate_line = include_state.FindHeader(include)
- if duplicate_line >= 0:
- error(filename, linenum, 'build/include', 4,
- '"%s" already included at %s:%s' %
- (include, filename, duplicate_line))
- return
-
- for extension in GetNonHeaderExtensions():
- if (include.endswith('.' + extension) and
- os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
- error(filename, linenum, 'build/include', 4,
- 'Do not include .' + extension + ' files from other packages')
- return
-
- # We DO want to include a 3rd party looking header if it matches the
- # filename. Otherwise we get an erroneous error "...should include its
- # header" error later.
- third_src_header = False
- for ext in GetHeaderExtensions():
- basefilename = filename[0:len(filename) - len(fileinfo.Extension())]
- headerfile = basefilename + '.' + ext
- headername = FileInfo(headerfile).RepositoryName()
- if headername in include or include in headername:
- third_src_header = True
- break
-
- if third_src_header or not _THIRD_PARTY_HEADERS_PATTERN.match(include):
- include_state.include_list[-1].append((include, linenum))
-
- # We want to ensure that headers appear in the right order:
- # 1) for foo.cc, foo.h (preferred location)
- # 2) c system files
- # 3) cpp system files
- # 4) for foo.cc, foo.h (deprecated location)
- # 5) other google headers
- #
- # We classify each include statement as one of those 5 types
- # using a number of techniques. The include_state object keeps
- # track of the highest type seen, and complains if we see a
- # lower type after that.
- error_message = include_state.CheckNextIncludeOrder(
- _ClassifyInclude(fileinfo, include, used_angle_brackets, _include_order))
- if error_message:
- error(filename, linenum, 'build/include_order', 4,
- '%s. Should be: %s.h, c system, c++ system, other.' %
- (error_message, fileinfo.BaseName()))
- canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
- if not include_state.IsInAlphabeticalOrder(
- clean_lines, linenum, canonical_include):
- error(filename, linenum, 'build/include_alpha', 4,
- 'Include "%s" not in alphabetical order' % include)
- include_state.SetLastHeader(canonical_include)
-
-
-
-def _GetTextInside(text, start_pattern):
- r"""Retrieves all the text between matching open and close parentheses.
-
- Given a string of lines and a regular expression string, retrieve all the text
- following the expression and between opening punctuation symbols like
- (, [, or {, and the matching close-punctuation symbol. This properly nested
- occurrences of the punctuations, so for the text like
- printf(a(), b(c()));
- a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
- start_pattern must match string having an open punctuation symbol at the end.
-
- Args:
- text: The lines to extract text. Its comments and strings must be elided.
- It can be single line and can span multiple lines.
- start_pattern: The regexp string indicating where to start extracting
- the text.
- Returns:
- The extracted text.
- None if either the opening string or ending punctuation could not be found.
- """
- # TODO(unknown): Audit cpplint.py to see what places could be profitably
- # rewritten to use _GetTextInside (and use inferior regexp matching today).
-
- # Give opening punctuations to get the matching close-punctuations.
- matching_punctuation = {'(': ')', '{': '}', '[': ']'}
- closing_punctuation = set(itervalues(matching_punctuation))
-
- # Find the position to start extracting text.
- match = re.search(start_pattern, text, re.M)
- if not match: # start_pattern not found in text.
- return None
- start_position = match.end(0)
-
- assert start_position > 0, (
- 'start_pattern must ends with an opening punctuation.')
- assert text[start_position - 1] in matching_punctuation, (
- 'start_pattern must ends with an opening punctuation.')
- # Stack of closing punctuations we expect to have in text after position.
- punctuation_stack = [matching_punctuation[text[start_position - 1]]]
- position = start_position
- while punctuation_stack and position < len(text):
- if text[position] == punctuation_stack[-1]:
- punctuation_stack.pop()
- elif text[position] in closing_punctuation:
- # A closing punctuation without matching opening punctuations.
- return None
- elif text[position] in matching_punctuation:
- punctuation_stack.append(matching_punctuation[text[position]])
- position += 1
- if punctuation_stack:
- # Opening punctuations left without matching close-punctuations.
- return None
- # punctuations match.
- return text[start_position:position - 1]
-
-
-# Patterns for matching call-by-reference parameters.
-#
-# Supports nested templates up to 2 levels deep using this messy pattern:
-# < (?: < (?: < [^<>]*
-# >
-# | [^<>] )*
-# >
-# | [^<>] )*
-# >
-_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
-_RE_PATTERN_TYPE = (
- r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
- r'(?:\w|'
- r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
- r'::)+')
-# A call-by-reference parameter ends with '& identifier'.
-_RE_PATTERN_REF_PARAM = re.compile(
- r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
- r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
-# A call-by-const-reference parameter either ends with 'const& identifier'
-# or looks like 'const type& identifier' when 'type' is atomic.
-_RE_PATTERN_CONST_REF_PARAM = (
- r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
- r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
-# Stream types.
-_RE_PATTERN_REF_STREAM_PARAM = (
- r'(?:.*stream\s*&\s*' + _RE_PATTERN_IDENT + r')')
-
-
-def CheckLanguage(filename, clean_lines, linenum, file_extension,
- include_state, nesting_state, error):
- """Checks rules from the 'C++ language rules' section of cppguide.html.
-
- Some of these rules are hard to test (function overloading, using
- uint32 inappropriately), but we do the best we can.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- file_extension: The extension (without the dot) of the filename.
- include_state: An _IncludeState instance in which the headers are inserted.
- nesting_state: A NestingState instance which maintains information about
- the current stack of nested blocks being parsed.
- error: The function to call with any errors found.
- """
- # If the line is empty or consists of entirely a comment, no need to
- # check it.
- line = clean_lines.elided[linenum]
- if not line:
- return
-
- match = _RE_PATTERN_INCLUDE.search(line)
- if match:
- CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
- return
-
- # Reset include state across preprocessor directives. This is meant
- # to silence warnings for conditional includes.
- match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
- if match:
- include_state.ResetSection(match.group(1))
-
-
- # Perform other checks now that we are sure that this is not an include line
- CheckCasts(filename, clean_lines, linenum, error)
- CheckGlobalStatic(filename, clean_lines, linenum, error)
- CheckPrintf(filename, clean_lines, linenum, error)
-
- if IsHeaderExtension(file_extension):
- # TODO(unknown): check that 1-arg constructors are explicit.
- # How to tell it's a constructor?
- # (handled in CheckForNonStandardConstructs for now)
- # TODO(unknown): check that classes declare or disable copy/assign
- # (level 1 error)
- pass
-
- # Check if people are using the verboten C basic types. The only exception
- # we regularly allow is "unsigned short port" for port.
- if Search(r'\bshort port\b', line):
- if not Search(r'\bunsigned short port\b', line):
- error(filename, linenum, 'runtime/int', 4,
- 'Use "unsigned short" for ports, not "short"')
- else:
- match = Search(r'\b(short|long(?! +double)|long long)\b', line)
- if match:
- error(filename, linenum, 'runtime/int', 4,
- 'Use int16/int64/etc, rather than the C type %s' % match.group(1))
-
- # Check if some verboten operator overloading is going on
- # TODO(unknown): catch out-of-line unary operator&:
- # class X {};
- # int operator&(const X& x) { return 42; } // unary operator&
- # The trick is it's hard to tell apart from binary operator&:
- # class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
- if Search(r'\boperator\s*&\s*\(\s*\)', line):
- error(filename, linenum, 'runtime/operator', 4,
- 'Unary operator& is dangerous. Do not use it.')
-
- # Check for suspicious usage of "if" like
- # } if (a == b) {
- if Search(r'\}\s*if\s*\(', line):
- error(filename, linenum, 'readability/braces', 4,
- 'Did you mean "else if"? If not, start a new line for "if".')
-
- # Check for potential format string bugs like printf(foo).
- # We constrain the pattern not to pick things like DocidForPrintf(foo).
- # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
- # TODO(unknown): Catch the following case. Need to change the calling
- # convention of the whole function to process multiple line to handle it.
- # printf(
- # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
- printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
- if printf_args:
- match = Match(r'([\w.\->()]+)$', printf_args)
- if match and match.group(1) != '__VA_ARGS__':
- function_name = re.search(r'\b((?:string)?printf)\s*\(',
- line, re.I).group(1)
- error(filename, linenum, 'runtime/printf', 4,
- 'Potential format string bug. Do %s("%%s", %s) instead.'
- % (function_name, match.group(1)))
-
- # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
- match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
- if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
- error(filename, linenum, 'runtime/memset', 4,
- 'Did you mean "memset(%s, 0, %s)"?'
- % (match.group(1), match.group(2)))
-
- if Search(r'\busing namespace\b', line):
- if Search(r'\bliterals\b', line):
- error(filename, linenum, 'build/namespaces_literals', 5,
- 'Do not use namespace using-directives. '
- 'Use using-declarations instead.')
- else:
- error(filename, linenum, 'build/namespaces', 5,
- 'Do not use namespace using-directives. '
- 'Use using-declarations instead.')
-
- # Detect variable-length arrays.
- match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
- if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
- match.group(3).find(']') == -1):
- # Split the size using space and arithmetic operators as delimiters.
- # If any of the resulting tokens are not compile time constants then
- # report the error.
- tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
- is_const = True
- skip_next = False
- for tok in tokens:
- if skip_next:
- skip_next = False
- continue
-
- if Search(r'sizeof\(.+\)', tok): continue
- if Search(r'arraysize\(\w+\)', tok): continue
-
- tok = tok.lstrip('(')
- tok = tok.rstrip(')')
- if not tok: continue
- if Match(r'\d+', tok): continue
- if Match(r'0[xX][0-9a-fA-F]+', tok): continue
- if Match(r'k[A-Z0-9]\w*', tok): continue
- if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
- if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
- # A catch all for tricky sizeof cases, including 'sizeof expression',
- # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
- # requires skipping the next token because we split on ' ' and '*'.
- if tok.startswith('sizeof'):
- skip_next = True
- continue
- is_const = False
- break
- if not is_const:
- error(filename, linenum, 'runtime/arrays', 1,
- 'Do not use variable-length arrays. Use an appropriately named '
- "('k' followed by CamelCase) compile-time constant for the size.")
-
- # Check for use of unnamed namespaces in header files. Registration
- # macros are typically OK, so we allow use of "namespace {" on lines
- # that end with backslashes.
- if (IsHeaderExtension(file_extension)
- and Search(r'\bnamespace\s*{', line)
- and line[-1] != '\\'):
- error(filename, linenum, 'build/namespaces_headers', 4,
- 'Do not use unnamed namespaces in header files. See '
- 'https://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
- ' for more information.')
-
-
-def CheckGlobalStatic(filename, clean_lines, linenum, error):
- """Check for unsafe global or static objects.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
-
- # Match two lines at a time to support multiline declarations
- if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
- line += clean_lines.elided[linenum + 1].strip()
-
- # Check for people declaring static/global STL strings at the top level.
- # This is dangerous because the C++ language does not guarantee that
- # globals with constructors are initialized before the first access, and
- # also because globals can be destroyed when some threads are still running.
- # TODO(unknown): Generalize this to also find static unique_ptr instances.
- # TODO(unknown): File bugs for clang-tidy to find these.
- match = Match(
- r'((?:|static +)(?:|const +))(?::*std::)?string( +const)? +'
- r'([a-zA-Z0-9_:]+)\b(.*)',
- line)
-
- # Remove false positives:
- # - String pointers (as opposed to values).
- # string *pointer
- # const string *pointer
- # string const *pointer
- # string *const pointer
- #
- # - Functions and template specializations.
- # string Function(...
- # string Class::Method(...
- #
- # - Operators. These are matched separately because operator names
- # cross non-word boundaries, and trying to match both operators
- # and functions at the same time would decrease accuracy of
- # matching identifiers.
- # string Class::operator*()
- if (match and
- not Search(r'\bstring\b(\s+const)?\s*[\*\&]\s*(const\s+)?\w', line) and
- not Search(r'\boperator\W', line) and
- not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(4))):
- if Search(r'\bconst\b', line):
- error(filename, linenum, 'runtime/string', 4,
- 'For a static/global string constant, use a C style string '
- 'instead: "%schar%s %s[]".' %
- (match.group(1), match.group(2) or '', match.group(3)))
- else:
- error(filename, linenum, 'runtime/string', 4,
- 'Static/global string variables are not permitted.')
-
- if (Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line) or
- Search(r'\b([A-Za-z0-9_]*_)\(CHECK_NOTNULL\(\1\)\)', line)):
- error(filename, linenum, 'runtime/init', 4,
- 'You seem to be initializing a member variable with itself.')
-
-
-def CheckPrintf(filename, clean_lines, linenum, error):
- """Check for printf related issues.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
-
- # When snprintf is used, the second argument shouldn't be a literal.
- match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
- if match and match.group(2) != '0':
- # If 2nd arg is zero, snprintf is used to calculate size.
- error(filename, linenum, 'runtime/printf', 3,
- 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
- 'to snprintf.' % (match.group(1), match.group(2)))
-
- # Check if some verboten C functions are being used.
- if Search(r'\bsprintf\s*\(', line):
- error(filename, linenum, 'runtime/printf', 5,
- 'Never use sprintf. Use snprintf instead.')
- match = Search(r'\b(strcpy|strcat)\s*\(', line)
- if match:
- error(filename, linenum, 'runtime/printf', 4,
- 'Almost always, snprintf is better than %s' % match.group(1))
-
-
-def IsDerivedFunction(clean_lines, linenum):
- """Check if current line contains an inherited function.
-
- Args:
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- Returns:
- True if current line contains a function with "override"
- virt-specifier.
- """
- # Scan back a few lines for start of current function
- for i in xrange(linenum, max(-1, linenum - 10), -1):
- match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
- if match:
- # Look for "override" after the matching closing parenthesis
- line, _, closing_paren = CloseExpression(
- clean_lines, i, len(match.group(1)))
- return (closing_paren >= 0 and
- Search(r'\boverride\b', line[closing_paren:]))
- return False
-
-
-def IsOutOfLineMethodDefinition(clean_lines, linenum):
- """Check if current line contains an out-of-line method definition.
-
- Args:
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- Returns:
- True if current line contains an out-of-line method definition.
- """
- # Scan back a few lines for start of current function
- for i in xrange(linenum, max(-1, linenum - 10), -1):
- if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
- return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
- return False
-
-
-def IsInitializerList(clean_lines, linenum):
- """Check if current line is inside constructor initializer list.
-
- Args:
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- Returns:
- True if current line appears to be inside constructor initializer
- list, False otherwise.
- """
- for i in xrange(linenum, 1, -1):
- line = clean_lines.elided[i]
- if i == linenum:
- remove_function_body = Match(r'^(.*)\{\s*$', line)
- if remove_function_body:
- line = remove_function_body.group(1)
-
- if Search(r'\s:\s*\w+[({]', line):
- # A lone colon tend to indicate the start of a constructor
- # initializer list. It could also be a ternary operator, which
- # also tend to appear in constructor initializer lists as
- # opposed to parameter lists.
- return True
- if Search(r'\}\s*,\s*$', line):
- # A closing brace followed by a comma is probably the end of a
- # brace-initialized member in constructor initializer list.
- return True
- if Search(r'[{};]\s*$', line):
- # Found one of the following:
- # - A closing brace or semicolon, probably the end of the previous
- # function.
- # - An opening brace, probably the start of current class or namespace.
- #
- # Current line is probably not inside an initializer list since
- # we saw one of those things without seeing the starting colon.
- return False
-
- # Got to the beginning of the file without seeing the start of
- # constructor initializer list.
- return False
-
-
-def CheckForStringViewReferences(filename, clean_lines, linenum, error):
- line = clean_lines.elided[linenum]
- match = Search(r'const absl::string_view(?:\s*&)', line)
- if match:
- error(filename, linenum, 'runtime/references', 5,
- 'Avoid const references to absl::string_view; just pass by value.')
-
-
-def CheckCasts(filename, clean_lines, linenum, error):
- """Various cast related checks.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- error: The function to call with any errors found.
- """
- line = clean_lines.elided[linenum]
-
- # Check to see if they're using an conversion function cast.
- # I just try to capture the most common basic types, though there are more.
- # Parameterless conversion functions, such as bool(), are allowed as they are
- # probably a member operator declaration or default constructor.
- match = Search(
- r'(\bnew\s+(?:const\s+)?|\S<\s*(?:const\s+)?)?\b'
- r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
- r'(\([^)].*)', line)
- expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
- if match and not expecting_function:
- matched_type = match.group(2)
-
- # matched_new_or_template is used to silence two false positives:
- # - New operators
- # - Template arguments with function types
- #
- # For template arguments, we match on types immediately following
- # an opening bracket without any spaces. This is a fast way to
- # silence the common case where the function type is the first
- # template argument. False negative with less-than comparison is
- # avoided because those operators are usually followed by a space.
- #
- # function // bracket + no space = false positive
- # value < double(42) // bracket + space = true positive
- matched_new_or_template = match.group(1)
-
- # Avoid arrays by looking for brackets that come after the closing
- # parenthesis.
- if Match(r'\([^()]+\)\s*\[', match.group(3)):
- return
-
- # Other things to ignore:
- # - Function pointers
- # - Casts to pointer types
- # - Placement new
- # - Alias declarations
- matched_funcptr = match.group(3)
- if (matched_new_or_template is None and
- not (matched_funcptr and
- (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
- matched_funcptr) or
- matched_funcptr.startswith('(*)'))) and
- not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
- not Search(r'new\(\S+\)\s*' + matched_type, line)):
- error(filename, linenum, 'readability/casting', 4,
- 'Using deprecated casting style. '
- 'Use static_cast<%s>(...) instead' %
- matched_type)
-
- if not expecting_function:
- CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
- r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
-
- # This doesn't catch all cases. Consider (const char * const)"hello".
- #
- # (char *) "foo" should always be a const_cast (reinterpret_cast won't
- # compile).
- if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
- r'\((char\s?\*+\s?)\)\s*"', error):
- pass
- else:
- # Check pointer casts for other than string constants
- CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
- r'\((\w+\s?\*+\s?)\)', error)
-
- # In addition, we look for people taking the address of a cast. This
- # is dangerous -- casts can assign to temporaries, so the pointer doesn't
- # point where you think.
- #
- # Some non-identifier character is required before the '&' for the
- # expression to be recognized as a cast. These are casts:
- # expression = &static_cast(temporary());
- # function(&(int*)(temporary()));
- #
- # This is not a cast:
- # reference_type&(int* function_param);
- match = Search(
- r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
- r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
- if match:
- # Try a better error message when the & is bound to something
- # dereferenced by the casted pointer, as opposed to the casted
- # pointer itself.
- parenthesis_error = False
- match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
- if match:
- _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
- if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
- _, y2, x2 = CloseExpression(clean_lines, y1, x1)
- if x2 >= 0:
- extended_line = clean_lines.elided[y2][x2:]
- if y2 < clean_lines.NumLines() - 1:
- extended_line += clean_lines.elided[y2 + 1]
- if Match(r'\s*(?:->|\[)', extended_line):
- parenthesis_error = True
-
- if parenthesis_error:
- error(filename, linenum, 'readability/casting', 4,
- ('Are you taking an address of something dereferenced '
- 'from a cast? Wrapping the dereferenced expression in '
- 'parentheses will make the binding more obvious'))
- else:
- error(filename, linenum, 'runtime/casting', 4,
- ('Are you taking an address of a cast? '
- 'This is dangerous: could be a temp var. '
- 'Take the address before doing the cast, rather than after'))
-
-
-def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
- """Checks for a C-style cast by looking for the pattern.
-
- Args:
- filename: The name of the current file.
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
- cast_type: The string for the C++ cast to recommend. This is either
- reinterpret_cast, static_cast, or const_cast, depending.
- pattern: The regular expression used to find C-style casts.
- error: The function to call with any errors found.
-
- Returns:
- True if an error was emitted.
- False otherwise.
- """
- line = clean_lines.elided[linenum]
- match = Search(pattern, line)
- if not match:
- return False
-
- # Exclude lines with keywords that tend to look like casts
- context = line[0:match.start(1) - 1]
- if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
- return False
-
- # Try expanding current context to see if we one level of
- # parentheses inside a macro.
- if linenum > 0:
- for i in xrange(linenum - 1, max(0, linenum - 5), -1):
- context = clean_lines.elided[i] + context
- if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
- return False
-
- # operator++(int) and operator--(int)
- if context.endswith(' operator++') or context.endswith(' operator--'):
- return False
-
- # A single unnamed argument for a function tends to look like old style cast.
- # If we see those, don't issue warnings for deprecated casts.
- remainder = line[match.end(0):]
- if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
- remainder):
- return False
-
- # At this point, all that should be left is actual casts.
- error(filename, linenum, 'readability/casting', 4,
- 'Using C-style cast. Use %s<%s>(...) instead' %
- (cast_type, match.group(1)))
-
- return True
-
-
-def ExpectingFunctionArgs(clean_lines, linenum):
- """Checks whether where function type arguments are expected.
-
- Args:
- clean_lines: A CleansedLines instance containing the file.
- linenum: The number of the line to check.
-
- Returns:
- True if the line at 'linenum' is inside something that expects arguments
- of function types.
- """
- line = clean_lines.elided[linenum]
- return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
- (linenum >= 2 and
- (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
- clean_lines.elided[linenum - 1]) or
- Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
- clean_lines.elided[linenum - 2]) or
- Search(r'\bstd::m?function\s*\<\s*$',
- clean_lines.elided[linenum - 1]))))
-
-
-_HEADERS_CONTAINING_TEMPLATES = (
- ('', ('deque',)),
- ('', ('unary_function', 'binary_function',
- 'plus', 'minus', 'multiplies', 'divides', 'modulus',
- 'negate',
- 'equal_to', 'not_equal_to', 'greater', 'less',
- 'greater_equal', 'less_equal',
- 'logical_and', 'logical_or', 'logical_not',
- 'unary_negate', 'not1', 'binary_negate', 'not2',
- 'bind1st', 'bind2nd',
- 'pointer_to_unary_function',
- 'pointer_to_binary_function',
- 'ptr_fun',
- 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
- 'mem_fun_ref_t',
- 'const_mem_fun_t', 'const_mem_fun1_t',
- 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
- 'mem_fun_ref',
- )),
- ('', ('numeric_limits',)),
- ('', ('list',)),
- ('